diff --git a/Makefile b/Makefile
index d0de6cab..27f7310b 100644
--- a/Makefile
+++ b/Makefile
@@ -115,6 +115,20 @@ docker-push:
fi
# Testing
+
+# this target updates Spark image for all SparkApplication manifests in tests
+.PHONY: update-spark-image-in-test-manifests
+update-spark-image-in-test-manifests:
+ if ! command -v yq &> /dev/null; then
+ echo 'Error: yq is not installed.' >&2
+ exit 1
+ fi
+ for yaml in tests/**/**/*.yaml; do
+ if [[ `yq r $$yaml 'kind'` == 'SparkApplication' ]]; then
+ yq w -i $$yaml 'spec.image' $(SPARK_IMAGE_FULL_NAME)
+ fi
+ done
+
.PHONY: test
test: docker-builder
test: docker-push
@@ -122,18 +136,18 @@ test:
docker run -i --rm \
-v $(ROOT_DIR):/kudo-spark-operator \
-v $(KUBECONFIG):/root/.kube/config \
- -e TEST_DIR=/kudo-spark-operator/tests \
+ -w /kudo-spark-operator \
+ -e ROOT_DIR=/kudo-spark-operator \
-e KUBECONFIG=/root/.kube/config \
- -e SPARK_IMAGE=$(SPARK_IMAGE_FULL_NAME) \
- -e OPERATOR_IMAGE=$(OPERATOR_IMAGE_FULL_NAME) \
- -e TEAMCITY_VERSION="$(TEAMCITY_VERSION)" \
+ -e OPERATOR_DOCKER_REPO=$(OPERATOR_DOCKER_REPO) \
+ -e OPERATOR_VERSION=$(OPERATOR_VERSION) \
-e AWS_ACCESS_KEY_ID="$(AWS_ACCESS_KEY_ID)" \
-e AWS_SECRET_ACCESS_KEY="$(AWS_SECRET_ACCESS_KEY)" \
-e AWS_SESSION_TOKEN="$(AWS_SESSION_TOKEN)" \
- -e AWS_BUCKET_NAME="$(AWS_BUCKET_NAME)" \
- -e AWS_BUCKET_PATH="$(AWS_BUCKET_PATH)" \
+ -e AWS_BUCKET_NAME=$(AWS_BUCKET_NAME) \
+ -e AWS_BUCKET_PATH=$(AWS_BUCKET_PATH) \
$(shell cat $(ROOT_DIR)/docker-builder) \
- /kudo-spark-operator/tests/run.sh
+ kubectl kuttl test --report xml $(KUTTL_FLAGS)
.PHONY: install
install:
diff --git a/images/builder/Dockerfile b/images/builder/Dockerfile
index 77b29452..9b1fc532 100644
--- a/images/builder/Dockerfile
+++ b/images/builder/Dockerfile
@@ -1,15 +1,20 @@
-FROM golang:1.13.0@sha256:de697ce5ae02f3d9a57b0603fbb648efadfa212727e702ad3a807b43eba7f6d6
+FROM python:3.8.2-slim-buster
-ARG KUDO_DOWNLOAD_URL=https://github.com/kudobuilder/kudo/releases/download/v0.15.0/kubectl-kudo_0.15.0_linux_x86_64
-ARG KUBECTL_DOWNLOAD_URL=https://storage.googleapis.com/kubernetes-release/release/v1.17.8/bin/linux/amd64/kubectl
+ARG KUDO_VERSION=0.15.0
+ARG KUBECTL_VERSION=1.17.8
+ARG KUTTL_VERSION=0.6.1
+ARG KUDO_DOWNLOAD_URL=https://github.com/kudobuilder/kudo/releases/download/v${KUDO_VERSION}/kubectl-kudo_${KUDO_VERSION}_linux_x86_64
+ARG KUBECTL_DOWNLOAD_URL=https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl
+ARG KUTTL_DOWNLOAD_URL=https://github.com/kudobuilder/kuttl/releases/download/v${KUTTL_VERSION}/kubectl-kuttl_${KUTTL_VERSION}_linux_x86_64
ARG JAVA_URL=https://downloads.mesosphere.com/java/openjdk-8u212b03-hotspot-linux-x64.tar.gz
ENV JAVA_HOME=/usr/local/java
ENV PATH=$PATH:${JAVA_HOME}/bin
-RUN curl -LO ${KUBECTL_DOWNLOAD_URL} && \
- chmod +x ./kubectl && \
- mv ./kubectl /usr/local/bin/kubectl && \
- curl -L ${KUDO_DOWNLOAD_URL} -o /go/bin/kubectl-kudo && \
- chmod +x /go/bin/kubectl-kudo && \
+RUN apt-get update && apt-get install -y curl jq && \
+ pip install awscli && \
+ curl -L ${KUBECTL_DOWNLOAD_URL} -o /usr/local/bin/kubectl && \
+ curl -L ${KUDO_DOWNLOAD_URL} -o /usr/local/bin/kubectl-kudo && \
+ curl -L ${KUTTL_DOWNLOAD_URL} -o /usr/local/bin/kubectl-kuttl && \
+ for tool in kubectl*; do chmod +x /usr/local/bin/${tool}; done && \
mkdir -p /usr/local/java && \
curl -L -O ${JAVA_URL} && tar -xf openjdk-8u212b03-hotspot-linux-x64.tar.gz -C /usr/local/java --strip-components=1
diff --git a/kuttl-test.yaml b/kuttl-test.yaml
new file mode 100644
index 00000000..afddd374
--- /dev/null
+++ b/kuttl-test.yaml
@@ -0,0 +1,5 @@
+apiVersion: kudo.dev/v1alpha1
+kind: TestSuite
+testDirs:
+- tests
+timeout: 120
diff --git a/operators b/operators
index 4cfe6827..006fe389 160000
--- a/operators
+++ b/operators
@@ -1 +1 @@
-Subproject commit 4cfe6827baf1ee823ffff9977b80f1736daa9fba
+Subproject commit 006fe3898caebb198923ea2694af5872d71e0040
diff --git a/tests/affinity_and_toleration_test.go b/tests/affinity_and_toleration_test.go
deleted file mode 100644
index 500c10fe..00000000
--- a/tests/affinity_and_toleration_test.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package tests
-
-import (
- "errors"
- "fmt"
- "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- log "github.com/sirupsen/logrus"
- "gotest.tools/assert"
- v12 "k8s.io/api/core/v1"
- "testing"
-)
-
-const testLabelName = "non_existing_label"
-
-func TestPodAffinityAndToleration(t *testing.T) {
-
- spark := utils.SparkOperatorInstallation{}
- err := spark.InstallSparkOperator()
- defer spark.CleanUp()
-
- assert.NilError(t, err)
-
- jobName := "mock-task-runner"
- job := utils.SparkJob{
- Name: jobName,
- Template: "spark-mock-task-runner-job.yaml",
- Params: map[string]interface{}{
- "args": []string{"1", "60"},
- "DriverAffinity": true,
- "DriverTolerations": true,
- "ExecutorAffinity": true,
- "ExecutorTolerations": true,
- "Label": testLabelName,
- },
- }
-
- err = spark.SubmitJob(&job)
- assert.NilError(t, err)
-
- err = spark.WaitForJobState(job, v1beta2.RunningState)
- assert.NilError(t, err)
-
- var executors []v12.Pod
-
- log.Infof("Checking executor pods...")
- err = utils.Retry(func() error {
- pods, e := spark.ExecutorPods(job)
- if e != nil {
- return err
- } else if len(pods) == 0 {
- return errors.New("No executors found")
- } else {
- log.Infof("Found %d executor(s).", len(pods))
- executors = pods
- }
- return nil
- })
-
- if err != nil {
- t.Fatal(err)
- }
-
- t.Run("TestDriverPod", func(t *testing.T) {
- driver, err := spark.DriverPod(job)
- assert.NilError(t, err)
- verifyPodSpec(t, driver)
- })
-
- t.Run("TestExecutorPod", func(t *testing.T) {
- verifyPodSpec(t, executors[0])
- })
-
-}
-
-func verifyPodSpec(t *testing.T, pod v12.Pod) {
- testAffinityRulesWithKeyPresent(t, pod, testLabelName)
- testTolerationWithKeyPresent(t, pod, testLabelName)
-}
-
-func testAffinityRulesWithKeyPresent(t *testing.T, pod v12.Pod, label string) {
- assert.Assert(t, pod.Spec.Affinity != nil, "Pod affinity is nil")
- var nodeAffinityRulePresent bool
- var podAffinityRulePresent bool
- for _, rule := range pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.
- NodeSelectorTerms {
- for _, term := range rule.MatchExpressions {
- if term.Key == label {
- nodeAffinityRulePresent = true
- break
- }
- }
- }
- for _, rule := range pod.Spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
- for _, expr := range rule.LabelSelector.MatchExpressions {
- if expr.Key == label {
- podAffinityRulePresent = true
- break
- }
- }
- }
- assert.Assert(t, nodeAffinityRulePresent, fmt.Sprintf("Node affinity rule is missing, pod spec: \n%s", pod.Spec.String()))
- assert.Assert(t, podAffinityRulePresent, fmt.Sprintf("Pod affinity rule is missing, pod spec: \n%s", pod.Spec.String()))
-}
-
-func testTolerationWithKeyPresent(t *testing.T, pod v12.Pod, label string) {
- var tolerationPresent bool
- for _, toleration := range pod.Spec.Tolerations {
- if toleration.Key == label {
- tolerationPresent = true
- break
- }
- }
- assert.Assert(t, tolerationPresent, fmt.Sprintf("Toleration with key \"%s\" not found, pod spec: \n%s",
- label, pod.Spec.String()))
-}
diff --git a/tests/basic_test.go b/tests/basic_test.go
deleted file mode 100644
index f2dac57b..00000000
--- a/tests/basic_test.go
+++ /dev/null
@@ -1,357 +0,0 @@
-package tests
-
-import (
- "fmt"
- "github.com/google/uuid"
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- log "github.com/sirupsen/logrus"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "strings"
- "testing"
-)
-
-func TestMain(m *testing.M) {
- utils.InstallKudo()
- defer utils.UninstallKudo()
-
- m.Run()
-}
-
-func TestSparkOperatorInstallation(t *testing.T) {
- spark := utils.SparkOperatorInstallation{}
- err := spark.InstallSparkOperator()
- defer spark.CleanUp()
-
- if err != nil {
- t.Fatal(err.Error())
- }
-
- k8sNamespace, err := spark.K8sClients.CoreV1().Namespaces().Get(spark.Namespace, v1.GetOptions{})
- if err != nil {
- t.Fatal(err.Error())
- }
-
- log.Infof("Spark operator is installed in namespace %s", k8sNamespace.Name)
-}
-
-func TestSparkOperatorInstallationWithCustomNamespace(t *testing.T) {
- customNamespace := "custom-test-namespace"
- spark := utils.SparkOperatorInstallation{
- Namespace: customNamespace,
- }
- err := spark.InstallSparkOperator()
- defer spark.CleanUp()
-
- if err != nil {
- t.Fatal(err.Error())
- }
-
- k8sNamespace, err := spark.K8sClients.CoreV1().Namespaces().Get(spark.Namespace, v1.GetOptions{})
- if err != nil {
- t.Fatal(err.Error())
- }
-
- if k8sNamespace.Name != customNamespace {
- t.Errorf("Actual namespace is %s, while %s was expected", k8sNamespace.Name, customNamespace)
- }
-}
-
-func TestJobSubmission(t *testing.T) {
- spark := utils.SparkOperatorInstallation{}
- err := spark.InstallSparkOperator()
- defer spark.CleanUp()
-
- if err != nil {
- t.Fatal(err)
- }
-
- job := utils.SparkJob{
- Name: "spark-pi",
- Template: "spark-pi.yaml",
- }
-
- err = spark.SubmitJob(&job)
- if err != nil {
- t.Fatal(err.Error())
- }
-
- err = spark.WaitUntilSucceeded(job)
- if err != nil {
- t.Error(err.Error())
- }
-}
-
-func TestSparkHistoryServerInstallation(t *testing.T) {
- awsBucketName, err := utils.GetS3BucketName()
- if err != nil {
- t.Fatal(err)
- }
- awsFolderPath, err := utils.GetS3BucketPath()
- if err != nil {
- t.Fatal(err)
- }
-
- awsFolderPath = fmt.Sprintf("%s/%s/%s", awsFolderPath, "spark-history-server", uuid.New().String())
- // Make sure folder is created
- if err := utils.AwsS3CreateFolder(awsBucketName, awsFolderPath); err != nil {
- t.Fatal(err.Error())
- }
- defer utils.AwsS3DeleteFolder(awsBucketName, awsFolderPath)
-
- awsBucketPath := "s3a://" + awsBucketName + "/" + awsFolderPath
-
- awsCredentials, err := utils.GetAwsCredentials()
- if err != nil {
- t.Fatal(err)
- }
-
- clientSet, err := utils.GetK8sClientSet()
- if err != nil {
- t.Fatal(err)
- }
-
- if _, err := utils.CreateNamespace(clientSet, utils.DefaultNamespace); err != nil {
- t.Fatal(err)
- }
-
- // create a Secret with Spark configuration holding AWS credentials
- // which will be used by Spark History Server to authenticate with S3
- var sparkConf = strings.Join(
- []string{
- fmt.Sprintf("spark.hadoop.fs.s3a.access.key %s", awsCredentials[utils.AwsAccessKeyId]),
- fmt.Sprintf("spark.hadoop.fs.s3a.secret.key %s", awsCredentials[utils.AwsSecretAccessKey]),
- fmt.Sprintf("spark.hadoop.fs.s3a.session.token %s", awsCredentials[utils.AwsSessionToken]),
- },
- "\n",
- )
-
- sparkConfSecretName := "spark-conf"
- sparkConfSecretKey := "spark-defaults.conf"
- sparkConfSecretData := map[string][]byte{
- sparkConfSecretKey: []byte(sparkConf),
- }
-
- if err := utils.CreateSecretEncoded(clientSet, sparkConfSecretName, utils.DefaultNamespace, sparkConfSecretData); err != nil {
- t.Fatal("Error while creating a Secret", err)
- }
-
- // configure Spark Operator parameters
- operatorParams := map[string]string{
- "enableHistoryServer": "true",
- "historyServerFsLogDirectory": awsBucketPath,
- "historyServerOpts": "-Dspark.hadoop.fs.s3a.impl=org.apache.hadoop.fs.s3a.S3AFileSystem",
- "historyServerSparkConfSecret": sparkConfSecretName,
- }
-
- // in case we are using temporary security credentials
- if len(string(awsCredentials[utils.AwsSessionToken])) > 0 {
- operatorParams["historyServerOpts"] =
- strings.Join(
- []string{
- operatorParams["historyServerOpts"],
- "-Dspark.hadoop.fs.s3a.aws.credentials.provider=org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider",
- },
- " ",
- )
-
- }
-
- spark := utils.SparkOperatorInstallation{
- SkipNamespaceCleanUp: true,
- Params: operatorParams,
- }
-
- if err := spark.InstallSparkOperator(); err != nil {
- t.Fatal(err.Error())
- }
- defer spark.CleanUp()
-
- // create a Secret for SparkApplication
- if err := utils.CreateSecretEncoded(clientSet, utils.DefaultAwsSecretName, utils.DefaultNamespace, awsCredentials); err != nil {
- t.Fatal("Error while creating a Secret", err)
- }
-
- sparkAppParams := map[string]interface{}{
- "AwsBucketPath": awsBucketPath,
- "AwsSecretName": utils.DefaultAwsSecretName,
- }
-
- if _, isPresent := awsCredentials[utils.AwsSessionToken]; isPresent {
- sparkAppParams["AwsSessionToken"] = "true"
- }
-
- job := utils.SparkJob{
- Name: "spark-pi-history-server",
- Params: sparkAppParams,
- Template: "spark-pi-history-server-job.yaml",
- }
-
- // Submit a SparkApplication
- if err := spark.SubmitJob(&job); err != nil {
- t.Fatal(err.Error())
- }
-
- if err := spark.WaitUntilSucceeded(job); err != nil {
- t.Error(err.Error())
- }
-
- // Find out History Server POD name
- instanceName := fmt.Sprint(utils.OperatorName, "-history-server")
- historyServerPodName, err := utils.Kubectl("get", "pods",
- fmt.Sprintf("--namespace=%s", spark.Namespace),
- "--field-selector=status.phase=Running",
- fmt.Sprintf("--selector=app.kubernetes.io/name=%s", instanceName),
- "--output=jsonpath={.items[*].metadata.name}")
- if err != nil {
- t.Error(err.Error())
- }
-
- // Find out the Job ID for the submitted SparkApplication
- jobID, err := utils.Kubectl(
- "get",
- "pods",
- "--namespace="+spark.Namespace,
- "--output=jsonpath={.items[*].metadata.labels.spark-app-selector}",
- )
- if err != nil {
- t.Error(err.Error())
- }
-
- // Get an application detail from History Server
- err = utils.RetryWithTimeout(utils.DefaultRetryTimeout, utils.DefaultRetryInterval, func() error {
- historyServerResponse, err := utils.Kubectl(
- "exec",
- historyServerPodName,
- "--namespace="+spark.Namespace,
- "--",
- "/usr/bin/curl",
- "http://localhost:18080/api/v1/applications/"+jobID+"/jobs",
- )
- if err != nil {
- return err
- }
-
- if len(historyServerResponse) > 0 &&
- !strings.Contains(historyServerResponse, "no such app") {
- log.Infof("Job Id '%s' is successfully recorded in History Server", jobID)
- return nil
- }
- return fmt.Errorf("Expecting Job Id '%s' to be recorded in History Server", jobID)
- })
-
- if err != nil {
- t.Errorf("The Job Id '%s' haven't appeared in History Server", jobID)
- log.Infof("Spark History Server logs:")
- utils.Kubectl("logs", "-n", spark.Namespace, historyServerPodName)
- log.Info("Driver logs:")
- utils.Kubectl("logs", "-n", spark.Namespace, utils.DriverPodName(job.Name))
- }
-}
-
-func TestVolumeMounts(t *testing.T) {
- spark := utils.SparkOperatorInstallation{}
- err := spark.InstallSparkOperator()
- defer spark.CleanUp()
-
- if err != nil {
- t.Fatal(err)
- }
-
- jobName := "mock-task-runner"
- volumeName := "test-volume"
- mountPath := "/opt/spark/work-dir"
- job := utils.SparkJob{
- Name: jobName,
- Template: "spark-mock-task-runner-job.yaml",
- Params: map[string]interface{}{
- "args": []string{"1", "60"},
- "VolumeName": volumeName,
- "MountPath": mountPath,
- },
- }
-
- err = spark.SubmitJob(&job)
- if err != nil {
- t.Fatal(err.Error())
- }
-
- err = utils.RetryWithTimeout(utils.DefaultRetryTimeout, utils.DefaultRetryInterval, func() error {
- lsCmdResponse, err := utils.Kubectl(
- "exec",
- utils.DriverPodName(jobName),
- "--namespace="+spark.Namespace,
- "--",
- "ls",
- "-ltr",
- mountPath+"/tmp",
- )
- if err != nil {
- return err
- }
-
- if len(lsCmdResponse) > 0 &&
- strings.Contains(lsCmdResponse, "spark") {
- log.Infof("Successfully mounted '%s' and volume is writable", volumeName)
- return nil
- }
- return fmt.Errorf("Expecting '%s' to be mounted", volumeName)
- })
-
- if err != nil {
- t.Errorf("Unable to mount volume '%s'", volumeName)
- }
-
- err = spark.WaitUntilSucceeded(job)
- if err != nil {
- t.Error(err.Error())
- }
-}
-
-func TestPythonSupport(t *testing.T) {
- spark := utils.SparkOperatorInstallation{}
- if err := spark.InstallSparkOperator(); err != nil {
- t.Fatal(err)
- }
- defer spark.CleanUp()
-
- jobName := "spark-pi-python"
- job := utils.SparkJob{
- Name: jobName,
- Template: fmt.Sprintf("%s.yaml", jobName),
- }
-
- if err := spark.SubmitJob(&job); err != nil {
- t.Fatal(err)
- }
-
- if err := spark.WaitForOutput(job, "Pi is roughly 3.1"); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestRSupport(t *testing.T) {
- spark := utils.SparkOperatorInstallation{}
- if err := spark.InstallSparkOperator(); err != nil {
- t.Fatal(err)
- }
- defer spark.CleanUp()
-
- jobName := "spark-r-als"
- job := utils.SparkJob{
- Name: jobName,
- Template: fmt.Sprintf("%s.yaml", jobName),
- ExecutorsCount: 3,
- }
-
- if err := spark.SubmitJob(&job); err != nil {
- t.Fatal(err)
- }
-
- if err := spark.WaitForOutput(job, "userId movieId rating prediction"); err != nil {
- t.Fatal(err)
- }
-
- if err := spark.WaitUntilSucceeded(job); err != nil {
- t.Fatal(err)
- }
-}
diff --git a/tests/config_test.go b/tests/config_test.go
deleted file mode 100644
index 8c675277..00000000
--- a/tests/config_test.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package tests
-
-import (
- "errors"
- "fmt"
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- log "github.com/sirupsen/logrus"
- "io/ioutil"
- v1 "k8s.io/api/core/v1"
- "path"
- "strings"
- "testing"
-)
-
-func TestMountSparkConfigMap(t *testing.T) {
- err := testMountedConfigMap(
- "sparkConfigMap",
- "resources/test-mount-config-map/spark-defaults.conf",
- "spark-test-configmap",
- "/etc/spark/conf",
- "SPARK_CONF_DIR")
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestMountHadoopConfigMap(t *testing.T) {
- err := testMountedConfigMap(
- "hadoopConfigMap",
- "resources/test-mount-config-map/core-site.xml",
- "hadoop-test-configmap",
- "/etc/hadoop/conf",
- "HADOOP_CONF_DIR")
- if err != nil {
- t.Error(err)
- }
-}
-
-func testMountedConfigMap(sparkAppConfigParam string, confFilepath string, configMapName string, remoteConfDir string, confDirEnvVarName string) error {
-
- _, confFilename := path.Split(confFilepath)
-
- spark := utils.SparkOperatorInstallation{}
- err := spark.InstallSparkOperator()
- defer spark.CleanUp()
-
- if err != nil {
- return err
- }
-
- // Create a configmap for spark-defaults.com
- err = utils.CreateConfigMap(spark.K8sClients, configMapName, spark.Namespace)
- if err != nil {
- return err
- }
- defer utils.DeleteConfigName(spark.K8sClients, configMapName, spark.Namespace)
-
- err = utils.AddFileToConfigMap(spark.K8sClients, configMapName, spark.Namespace, confFilename, confFilepath)
- if err != nil {
- return err
- }
-
- job := utils.SparkJob{
- Name: "mount-spark-configmap-test",
- Template: "spark-mock-task-runner-job-mount-config.yaml",
- Params: map[string]interface{}{
- "args": []string{"1", "600"},
- sparkAppConfigParam: configMapName,
- },
- }
-
- err = spark.SubmitAndWaitForExecutors(&job)
- if err != nil {
- return err
- }
-
- // Making sure driver and executor pods have correct volume mounted
- executors, _ := spark.ExecutorPods(job)
- driver, _ := spark.DriverPod(job)
-
- for _, pod := range append(executors, driver) {
- if !hasConfigMap(pod, configMapName) {
- return errors.New(fmt.Sprintf("Couldn't find volume %s mounted on pod %s", configMapName, pod.Name))
- }
-
- // Check that *_CONF_DIR is set correctly
- if !utils.IsEnvVarPresentInPod(v1.EnvVar{Name: confDirEnvVarName, Value: remoteConfDir}, pod) {
- return errors.New(fmt.Sprintf("%s is not set to %s on pod %s", confDirEnvVarName, remoteConfDir, pod.Name))
- }
-
- // Check that the config file is available in the container
- sameContent, err := hasSimilarContents(pod, path.Join(remoteConfDir, confFilename), confFilepath)
- if err != nil {
- return errors.New(fmt.Sprintf("Couldn't compare spark configuration file: %v", err))
- }
- if !sameContent {
- return errors.New(fmt.Sprintf("The content of %s differs locally and in pod %s/%s", confFilename, pod.Namespace, pod.Name))
- } else {
- log.Infof("%s was mounted correctly!", confFilename)
- }
- }
-
- return nil
-}
-
-func hasConfigMap(pod v1.Pod, configMapName string) bool {
- for _, v := range pod.Spec.Volumes {
- if v.ConfigMap != nil && v.ConfigMap.Name == configMapName {
- log.Infof("Found volume %s: %s in pod %s/%s", v.Name, v.ConfigMap.Name, pod.Namespace, pod.Name)
- return true
- }
- }
- return false
-}
-
-func hasSimilarContents(pod v1.Pod, remotePath string, localPath string) (bool, error) {
- localContent, err := ioutil.ReadFile(localPath)
- if err != nil {
- return false, err
- }
-
- var remoteContent string
-
- err = utils.Retry(func() error {
- remoteContent, err = utils.Kubectl("exec", "-n", pod.Namespace, pod.Name, "--", "cat", remotePath)
- return err
- })
-
- return strings.Compare(strings.TrimSpace(string(localContent)), strings.TrimSpace(remoteContent)) == 0, nil
-}
diff --git a/tests/encryption_test.go b/tests/encryption_test.go
deleted file mode 100644
index f537a481..00000000
--- a/tests/encryption_test.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package tests
-
-import (
- "fmt"
- "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- "github.com/stretchr/testify/suite"
- "io/ioutil"
- "os/exec"
- "strings"
- "testing"
- "time"
-)
-
-var counter int
-
-type SparkEncryptionSuite struct {
- operator utils.SparkOperatorInstallation
- // name of Secret object with sensitive data
- sparkSecrets string
- // password for private key
- keyPassword string
- // password for keystore
- keyStorePassword string
- // password for truststore
- trustStorePassword string
- keyStorePath string
- trustStorePath string
- // Spark config properties
- sparkConf map[string]string
- // name of a Secret with key-stores
- sslSecrets string
- // folder used for storing the key-stores
- sslMountDir string
- suite.Suite
-}
-
-func TestSparkEncryptionSuite(t *testing.T) {
- sslMountDir := "/tmp/spark/ssl"
- testSuite := SparkEncryptionSuite{
- sparkSecrets: "secrets",
- keyPassword: "changeit",
- keyStorePassword: "changeit",
- trustStorePassword: "changeit",
- sslSecrets: "ssl-secrets",
- sslMountDir: sslMountDir,
- keyStorePath: fmt.Sprintf("%s/keystore.jks", sslMountDir),
- trustStorePath: fmt.Sprintf("%s/truststore.jks", sslMountDir),
- }
- suite.Run(t, &testSuite)
-}
-
-func (suite *SparkEncryptionSuite) SetupSuite() {
- suite.operator = utils.SparkOperatorInstallation{}
- if err := suite.operator.InstallSparkOperator(); err != nil {
- suite.FailNow(err.Error())
- }
- suite.prepareKeyStores()
- suite.createSecrets()
-}
-
-func (suite *SparkEncryptionSuite) createSecrets() {
- sparkSecrets := map[string][]byte{
- "key-password": []byte(suite.keyPassword),
- "keystore-password": []byte(suite.keyStorePassword),
- "truststore-password": []byte(suite.trustStorePassword),
- }
-
- if err := utils.CreateSecretEncoded(
- suite.operator.K8sClients,
- suite.sparkSecrets,
- suite.operator.Namespace,
- sparkSecrets); err != nil {
- suite.FailNowf("Error while creating secret \"%s\"", suite.sparkSecrets, err)
- }
-
- keystore, _ := ioutil.ReadFile(suite.keyStorePath)
- truststore, _ := ioutil.ReadFile(suite.trustStorePath)
-
- if err := utils.CreateSecretEncoded(suite.operator.K8sClients,
- suite.sslSecrets,
- suite.operator.Namespace,
- map[string][]byte{
- "keystore.jks": keystore,
- "truststore.jks": truststore,
- }); err != nil {
- suite.FailNowf("Error while creating secret \"%s\"", suite.sslSecrets, err)
- }
-}
-
-func (suite *SparkEncryptionSuite) TearDownSuite() {
- suite.operator.CleanUp()
-}
-
-func (suite *SparkEncryptionSuite) TestRpc() {
- sparkConf := map[string]string{
- "spark.authenticate": "true",
- }
- suite.Run("TestAuth", func() {
- assertSparkApp(suite, sparkConf, []string{"1", "1"})
- })
-
- sparkConf["spark.network.crypto.enabled"] = "true"
- suite.Run("TestNetworkEncryption", func() {
- assertSparkApp(suite, sparkConf, []string{"1", "1"})
- })
-
- sparkConf["spark.authenticate.enableSaslEncryption"] = "true"
- suite.Run("TestSaslEncryption", func() {
- assertSparkApp(suite, sparkConf, []string{"1", "1"})
- })
-}
-
-func (suite *SparkEncryptionSuite) TestSSL() {
- sparkConf := map[string]string{
- "spark.ssl.enabled": "true",
- "spark.ssl.keyStore": suite.keyStorePath,
- "spark.ssl.protocol": "TLSv1.2",
- "spark.ssl.trustStore": suite.trustStorePath,
- "spark.kubernetes.driver.secretKeyRef.SPARK_SSL_KEYPASSWORD": fmt.Sprintf("%s:key-password", suite.sparkSecrets),
- "spark.kubernetes.driver.secretKeyRef.SPARK_SSL_KEYSTOREPASSWORD": fmt.Sprintf("%s:keystore-password", suite.sparkSecrets),
- "spark.kubernetes.driver.secretKeyRef.SPARK_SSL_TRUSTSTOREPASSWORD": fmt.Sprintf("%s:truststore-password", suite.sparkSecrets),
- fmt.Sprintf("spark.kubernetes.driver.secrets.%s", suite.sslSecrets): suite.sslMountDir,
- }
- assertSparkApp(suite, sparkConf, []string{"1", "20"})
-}
-
-// method creates required key stores for Spark SSL configuration
-func (suite *SparkEncryptionSuite) prepareKeyStores() {
- const commandName = "keytool"
- const alias = "selfsigned"
- certPath := fmt.Sprint(suite.sslMountDir, "/", "test.cert")
-
- if err := exec.Command("mkdir", "-p", suite.sslMountDir).Run(); err != nil {
- suite.FailNowf("Can't create a temp dir \"%s\"", suite.sslMountDir, err)
- }
- // generate a public-private key pair
- genKeyPairCmd := []string{
- "-genkeypair",
- "-keystore", suite.keyStorePath,
- "-keyalg", "RSA",
- "-alias", alias,
- "-dname", "CN=sparkcert OU=KUDO O=D2IQ L=SF S=CA C=US",
- "-storepass", suite.keyStorePassword,
- "-keypass", suite.keyPassword,
- }
- // export the generated certificate
- exportCertCmd := []string{
- "-exportcert",
- "-keystore", suite.keyStorePath,
- "-alias", alias,
- "-storepass", suite.keyStorePassword,
- "-file", certPath,
- }
- // import the certificate into a truststore
- importCertCmd := []string{
- "-importcert",
- "-keystore", suite.trustStorePath,
- "-alias", alias,
- "-storepass", suite.trustStorePath,
- "-file", certPath,
- "-noprompt",
- }
-
- prepKeyStoresCommandChain := [][]string{genKeyPairCmd, exportCertCmd, importCertCmd}
-
- for _, commandArgs := range prepKeyStoresCommandChain {
- _, err := utils.RunAndLogCommandOutput(exec.Command(commandName, commandArgs...))
- if err != nil {
- suite.FailNow("Error while preparing the key-stores", err)
- }
- }
-
- suite.Assert().FileExists(suite.keyStorePath)
- suite.Assert().FileExists(suite.trustStorePath)
- suite.Assert().FileExists(certPath)
-}
-
-// launches a Spark application based on `sparkConf` and asserts its successful completion
-func assertSparkApp(suite *SparkEncryptionSuite, sparkConf map[string]string, args []string) {
- counter++
- appName := fmt.Sprintf("spark-mock-task-runner-%d", counter)
-
- _, authEnabled := sparkConf["spark.authenticate"]
- _, sslEnabled := sparkConf["spark.ssl.enabled"]
-
- sparkApp := utils.SparkJob{
- Name: appName,
- Template: "spark-mock-task-runner.yaml",
- Params: map[string]interface{}{
- "Args": args,
- "SparkConf": sparkConf,
- "SparkSecrets": suite.sparkSecrets,
- "SslSecrets": suite.sslSecrets,
- "AuthEnabled": authEnabled,
- "SslEnabled": sslEnabled,
- },
- }
-
- suite.Assert().NoError(suite.operator.SubmitJob(&sparkApp))
- defer suite.operator.DeleteJob(sparkApp)
-
- // when ssl is configured, check Spark UI is accessible via https on 4440 port
- if sslEnabled {
- checkSparkUI(appName, sparkApp, suite)
- }
-
- suite.Assert().NoError(suite.operator.WaitUntilSucceeded(sparkApp))
-}
-
-func checkSparkUI(appName string, sparkApp utils.SparkJob, suite *SparkEncryptionSuite) {
- if err := suite.operator.WaitForJobState(sparkApp, v1beta2.RunningState); err != nil {
- suite.Fail("SparkApplication \"%s\" is not running", appName, err)
- }
- if err := utils.RetryWithTimeout(1*time.Minute, 5*time.Second, func() error {
- response, err := utils.Kubectl("exec", utils.DriverPodName(appName), "-n", sparkApp.Namespace,
- "--",
- "curl",
- "--insecure", // allow insecure SSL
- "--location", // follow redirects
- "--include", // include headers
- "https://localhost:4440")
- if err != nil {
- return err
- }
- if !strings.Contains(response, "HTTP/1.1 200") {
- return fmt.Errorf("received status code is not successful")
- }
- suite.Assert().Contains(response, "
MockTaskRunner - Spark Jobs")
- return nil
- }); err != nil {
- suite.Fail("Unable to access Spark UI", err)
- }
-}
diff --git a/tests/go.mod b/tests/go.mod
deleted file mode 100644
index ea25b0c0..00000000
--- a/tests/go.mod
+++ /dev/null
@@ -1,26 +0,0 @@
-module github.com/mesosphere/kudo-spark-operator/tests
-
-go 1.13
-
-require (
- github.com/GoogleCloudPlatform/spark-on-k8s-operator v0.0.0-20191015041843-d615901d19b3
- github.com/aws/aws-sdk-go v1.25.24
- github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0
- github.com/fatih/structs v1.1.0
- github.com/google/uuid v1.0.0
- github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334
- github.com/imdario/mergo v0.3.7 // indirect
- github.com/pkg/errors v0.8.1
- github.com/prometheus/client_golang v1.0.0
- github.com/prometheus/common v0.7.0 // indirect
- github.com/sirupsen/logrus v1.4.2
- github.com/stretchr/testify v1.3.0
- golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 // indirect
- golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect
- golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
- gotest.tools v2.2.0+incompatible
- k8s.io/api v0.0.0-20190819141258-3544db3b9e44
- k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d
- k8s.io/client-go v0.0.0-20190819141724-e14f31a72a77
- k8s.io/utils v0.0.0-20190907131718-3d4f5b7dea0b // indirect
-)
diff --git a/tests/go.sum b/tests/go.sum
deleted file mode 100644
index aa10e460..00000000
--- a/tests/go.sum
+++ /dev/null
@@ -1,175 +0,0 @@
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/GoogleCloudPlatform/spark-on-k8s-operator v0.0.0-20191015041843-d615901d19b3 h1:ISii+0vrdAL8zFQY64ayEOqXtLeUShE/FM1jqtr3E9E=
-github.com/GoogleCloudPlatform/spark-on-k8s-operator v0.0.0-20191015041843-d615901d19b3/go.mod h1:6PnrZv6zUDkrNMw0mIoGRmGBR7i9LulhKPmxFq4rUiM=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/aws/aws-sdk-go v1.25.24 h1:E8b33GQlfNuMumepuWhpSX+Mw71cEQK1BQaKzVBl3js=
-github.com/aws/aws-sdk-go v1.25.24/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0 h1:90Ly+6UfUypEF6vvvW5rQIv9opIL8CbmW9FT20LDQoY=
-github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8=
-github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M=
-github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
-github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM=
-github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=
-github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
-github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
-github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
-github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
-github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8=
-github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
-github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE=
-github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
-github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU=
-golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68=
-golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU=
-golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e h1:FDhOuMEY4JVRztM/gsbk+IKUQ8kj74bxZrgw87eMMVc=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o=
-gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-k8s.io/api v0.0.0-20190819141258-3544db3b9e44 h1:7Gz7/nQ7X2qmPXMyN0bNq7Zm9Uip+UnFuMZTd2l3vms=
-k8s.io/api v0.0.0-20190819141258-3544db3b9e44/go.mod h1:AOxZTnaXR/xiarlQL0JUfwQPxjmKDvVYoRp58cA7lUo=
-k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d h1:7Kns6qqhMAQWvGkxYOLSLRZ5hJO0/5pcE5lPGP2fxUw=
-k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d/go.mod h1:3jediapYqJ2w1BFw7lAZPCx7scubsTfosqHkhXCWJKw=
-k8s.io/client-go v0.0.0-20190819141724-e14f31a72a77 h1:w1BoabVnPpPqQCY3sHK4qVwa12Lk8ip1pKMR1C+qbdo=
-k8s.io/client-go v0.0.0-20190819141724-e14f31a72a77/go.mod h1:DmkJD5UDP87MVqUQ5VJ6Tj9Oen8WzXPhk3la4qpyG4g=
-k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68=
-k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
-k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
-k8s.io/utils v0.0.0-20190907131718-3d4f5b7dea0b h1:eMM0sTvh3KBVGwJfuNcU86P38TJhlVMAICbFPDG3t0M=
-k8s.io/utils v0.0.0-20190907131718-3d4f5b7dea0b/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
-sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
diff --git a/tests/ha_test.go b/tests/ha_test.go
deleted file mode 100644
index 10eb7a8c..00000000
--- a/tests/ha_test.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package tests
-
-import (
- "encoding/json"
- "fmt"
- "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
- "github.com/fatih/structs"
- "github.com/iancoleman/strcase"
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- "github.com/pkg/errors"
- log "github.com/sirupsen/logrus"
- "github.com/stretchr/testify/suite"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- . "k8s.io/client-go/tools/leaderelection/resourcelock"
- "strconv"
- "strings"
- "testing"
- "time"
-)
-
-const electionRecordRetryInterval = 2 * time.Second
-const electionRecordRetryTimeout = 30 * time.Second
-const processingKeyLogRecordFormat = "Starting processing key: \"%s/%s\""
-const deploymentWaitTimeout = 1 * time.Minute
-
-type LeaderElectionParameters struct {
- Replicas int
- EnableLeaderElection bool
- LeaderElectionLockName string
- LeaderElectionLeaseDuration string
- LeaderElectionRenewDeadline string
- LeaderElectionRetryPeriod string
-}
-
-type HighAvailabilityTestSuite struct {
- leaderElectionParams LeaderElectionParameters
- operator utils.SparkOperatorInstallation
- suite.Suite
-}
-
-func TestHASuite(t *testing.T) {
- suite.Run(t, new(HighAvailabilityTestSuite))
-}
-
-func (suite *HighAvailabilityTestSuite) SetupSuite() {
- suite.leaderElectionParams = LeaderElectionParameters{
- Replicas: 3,
- EnableLeaderElection: true,
- LeaderElectionLockName: "leader-election-lock",
- LeaderElectionLeaseDuration: "15s",
- LeaderElectionRenewDeadline: "10s",
- LeaderElectionRetryPeriod: "3s",
- }
- if paramsMap, err := convertStructToMap(suite.leaderElectionParams); err != nil {
- suite.FailNow(err.Error())
- } else {
- suite.operator = utils.SparkOperatorInstallation{
- Params: paramsMap,
- }
- }
-
- if err := suite.operator.InstallSparkOperator(); err != nil {
- suite.FailNow(err.Error())
- }
- utils.Kubectl("wait", "deployment", "--all", "--for", "condition=available",
- "--namespace", suite.operator.Namespace,
- fmt.Sprintf("--timeout=%v", deploymentWaitTimeout))
-}
-
-func (suite *HighAvailabilityTestSuite) TearDownSuite() {
- suite.operator.CleanUp()
-}
-
-func (suite *HighAvailabilityTestSuite) Test_LeaderElectionConfiguration() {
- operator := suite.operator
- params := suite.leaderElectionParams
- args, err := utils.Kubectl("get", "deployment", operator.InstanceName, "-n", operator.Namespace,
- "-o=jsonpath={.spec.template.spec.containers[0].args}")
- if err != nil {
- suite.FailNow(err.Error())
- }
- availableReplicas, _ := utils.Kubectl("get", "deployment", operator.InstanceName,
- "-n", operator.Namespace,
- "-o=jsonpath={.status.availableReplicas}")
-
- suite.Equal(strconv.Itoa(params.Replicas), availableReplicas)
- suite.Contains(args, fmt.Sprint("-leader-election=", params.EnableLeaderElection))
- suite.Contains(args, fmt.Sprint("-leader-election-lock-name=", params.LeaderElectionLockName))
- suite.Contains(args, fmt.Sprint("-leader-election-lock-namespace=", operator.Namespace))
- suite.Contains(args, fmt.Sprint("-leader-election-lease-duration=", params.LeaderElectionLeaseDuration))
- suite.Contains(args, fmt.Sprint("-leader-election-renew-deadline=", params.LeaderElectionRenewDeadline))
- suite.Contains(args, fmt.Sprint("-leader-election-retry-period=", params.LeaderElectionRetryPeriod))
-}
-
-func (suite *HighAvailabilityTestSuite) Test_LeaderElectionRecord() {
- leaderElectionRecord, err := getLeaderElectionRecord(suite.operator)
- if suite.NoError(err) {
- suite.NotEmpty(leaderElectionRecord.HolderIdentity)
- }
-}
-
-func (suite *HighAvailabilityTestSuite) Test_LeaderFailover() {
- // print the current deployment state
- utils.Kubectl("describe", "deployment", suite.operator.InstanceName, "-n", suite.operator.Namespace)
- utils.Kubectl("get", "all", "-n", suite.operator.Namespace)
-
- operator := suite.operator
- leaderElectionRecord, err := getLeaderElectionRecord(operator)
- if err != nil {
- suite.FailNow(err.Error())
- }
-
- fmt.Println("Current leader: ", leaderElectionRecord.HolderIdentity)
- // deploy workload
- jobName := "mock-task-runner"
- mockTaskRunner := utils.SparkJob{
- Name: jobName,
- Template: "spark-mock-task-runner-job.yaml",
- Params: map[string]interface{}{
- "args": []string{"1", "30"},
- },
- }
- if err := operator.SubmitJob(&mockTaskRunner); err != nil {
- suite.FailNow(err.Error())
- }
- // wait until the application is running
- if err := operator.WaitForJobState(mockTaskRunner, v1beta2.RunningState); err != nil {
- suite.FailNow(err.Error())
- }
-
- // check leader started processing the application
- logContains, err := utils.PodLogContains(mockTaskRunner.Namespace, leaderElectionRecord.HolderIdentity,
- fmt.Sprintf(processingKeyLogRecordFormat, mockTaskRunner.Namespace, mockTaskRunner.Name))
- if suite.NoError(err) {
- suite.True(logContains)
- }
-
- log.Infof("deleting current leader pod \"%s\"", leaderElectionRecord.HolderIdentity)
- if err := utils.DeleteResource(operator.Namespace, "pod", leaderElectionRecord.HolderIdentity); err != nil {
- suite.FailNow(err.Error())
- }
- var newLeaderPodName string
- // check re-election
- if err := utils.RetryWithTimeout(electionRecordRetryTimeout, electionRecordRetryInterval, func() error {
- if newLeaderElectionRecord, err := getLeaderElectionRecord(operator); err != nil {
- return err
- } else if newLeaderElectionRecord.HolderIdentity == leaderElectionRecord.HolderIdentity {
- return errors.New("Waiting for the new leader to be elected")
- } else {
- log.Info("New leader found: ", newLeaderElectionRecord.HolderIdentity)
- newLeaderPodName = newLeaderElectionRecord.HolderIdentity
- }
- return nil
- }); err != nil {
- suite.FailNow(err.Error())
- }
-
- suite.NoError(operator.WaitForJobState(mockTaskRunner, v1beta2.CompletedState))
-
- // check the new leader started processing the application
- logContains, err = utils.PodLogContains(mockTaskRunner.Namespace, newLeaderPodName,
- fmt.Sprintf(processingKeyLogRecordFormat, mockTaskRunner.Namespace, mockTaskRunner.Name))
-
- if suite.NoError(err) {
- suite.True(logContains)
- }
-}
-
-func getLeaderElectionRecord(operator utils.SparkOperatorInstallation) (*LeaderElectionRecord, error) {
- lockName := operator.Params["leaderElectionLockName"]
- var leaderElectionRecord *LeaderElectionRecord
- err := utils.RetryWithTimeout(electionRecordRetryTimeout, electionRecordRetryInterval, func() error {
- configMap, err := operator.K8sClients.CoreV1().ConfigMaps(operator.Namespace).Get(lockName, v1.GetOptions{})
- if err != nil {
- return err
- } else if configMap == nil {
- return errors.New("LeaderElectionRecord hasn't been created.")
- }
- leaderElectionRecordString := configMap.GetAnnotations()[LeaderElectionRecordAnnotationKey]
- if err := json.Unmarshal([]byte(leaderElectionRecordString), &leaderElectionRecord); err != nil {
- return err
- }
- if len(strings.TrimSpace(leaderElectionRecord.HolderIdentity)) == 0 {
- return errors.New("No leader is currently elected.")
- } else {
- log.Info("LeaderElectionRecord: ", *leaderElectionRecord)
- // check, that leader pod exists
- if _, err := operator.K8sClients.CoreV1().Pods(operator.Namespace).Get(leaderElectionRecord.HolderIdentity, v1.GetOptions{}); err != nil {
- return err
- }
- }
- return nil
- })
- return leaderElectionRecord, err
-}
-
-func convertStructToMap(params interface{}) (map[string]string, error) {
- paramsMap := make(map[string]string)
- fields := structs.Fields(params)
- for _, field := range fields {
- key := strcase.ToLowerCamel(field.Name())
- switch v := field.Value().(type) {
- default:
- return paramsMap, fmt.Errorf("unexpected type %T", v)
- case int:
- paramsMap[key] = strconv.Itoa(field.Value().(int))
- case string:
- paramsMap[key] = field.Value().(string)
- case bool:
- paramsMap[key] = strconv.FormatBool(field.Value().(bool))
- }
- }
- return paramsMap, nil
-}
diff --git a/tests/hdfs_kerberos_test.go b/tests/hdfs_kerberos_test.go
deleted file mode 100644
index 8cf5ecad..00000000
--- a/tests/hdfs_kerberos_test.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package tests
-
-import (
- "fmt"
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- "github.com/stretchr/testify/suite"
- "testing"
- "time"
-)
-
-// note: this shouldn't be changed as per this section:
-//https://github.com/mesosphere/spark-on-k8s-operator/blob/master/docs/user-guide.md#mounting-secrets
-const hadoopTokenFileName = "hadoop.token"
-
-var (
- resourceFolder = "resources/hdfs-kerberos"
- namespace = "hdfs-kerberos"
- hdfsKerberosDeployment = []Resource{
- {
- name: "configmaps",
- path: "configmaps",
- },
- {
- name: "volumes",
- path: "volumes",
- },
- {
- name: "kerberos",
- path: "kerberos-deployment.yaml",
- wait: true,
- },
- {
- name: "hdfs-namenode",
- path: "namenode-deployment.yaml",
- wait: true,
- },
- {
- name: "hdfs-datanode",
- path: "datanode-deployment.yaml",
- wait: true,
- },
- }
- eventLogDir = "hdfs://namenode.hdfs-kerberos.svc.cluster.local:9000/history"
- hadoopTokenSecret = "hadoop-token"
- hadoopTokenPath = fmt.Sprint("/tmp/", hadoopTokenFileName)
- waitTimeout = 5 * time.Minute
-)
-
-type Resource struct {
- name string
- path string
- wait bool
-}
-
-type HdfsIntegrationSuite struct {
- operator utils.SparkOperatorInstallation
- suite.Suite
-}
-
-func TestHdfsIntegrationSuite(t *testing.T) {
- suite.Run(t, new(HdfsIntegrationSuite))
-}
-
-func (suite *HdfsIntegrationSuite) SetupSuite() {
- if _, err := utils.Kubectl("create", "ns", namespace); err != nil {
- suite.FailNow("Error while creating namespace", err)
- }
- // deploy KDC and HDFS
- for _, resource := range hdfsKerberosDeployment {
- if _, err := utils.Kubectl("apply", "-f", fmt.Sprint(resourceFolder, "/", resource.path), "-n", namespace); err != nil {
- suite.FailNowf(err.Error(), "Error while creating \"%s\"", resource.name)
- }
- if resource.wait {
- if _, err := utils.Kubectl("wait", fmt.Sprint("deployments/", resource.name),
- "--for=condition=available",
- fmt.Sprintf("--timeout=%v", waitTimeout),
- "-n", namespace); err != nil {
- suite.FailNowf(err.Error(), "Error while waiting for resource \"%s\" to be deployed", resource.name)
- }
- }
- }
- // get the name of a Namenode pod
- nameNodePod, err := utils.Kubectl("get", "pods",
- "--selector=name=hdfs-namenode", "--output=jsonpath={.items[*].metadata.name}", "-n", namespace)
- if err != nil {
- suite.FailNow("Error while getting Namenode pod name", err)
- }
- // run init script to copy test data to HDFS and export delegation token
- if _, err := utils.Kubectl("exec", nameNodePod, "-n", namespace, "--", "init.sh"); err != nil {
- suite.FailNow("Error while running initialization script", err)
- }
-
- // copy delegation token from the pod to a local filesystem
- if _, err := utils.Kubectl("cp", fmt.Sprint(nameNodePod, ":", hadoopTokenPath[1:]),
- hadoopTokenPath, "-n", namespace); err != nil {
- suite.FailNow("Error while copying the delegation token", err)
- }
-}
-
-// invoked before each test
-func (suite *HdfsIntegrationSuite) BeforeTest(suiteName, testName string) {
- utils.Kubectl("create", "ns", utils.DefaultNamespace)
-
- // create a Secret with Hadoop delegation token
- if _, err := utils.Kubectl("create", "secret",
- "generic", hadoopTokenSecret, "--from-file", hadoopTokenPath, "-n", utils.DefaultNamespace); err != nil {
- suite.FailNow("Error while creating a Hadoop token secret", err)
- }
-
- // create ConfigMap with hadoop config files in Spark operator namespace
- utils.Kubectl("apply", "-f", fmt.Sprint(resourceFolder, "/configmaps/hadoop-conf.yaml"), "-n", utils.DefaultNamespace)
-
- suite.operator = utils.SparkOperatorInstallation{
- SkipNamespaceCleanUp: true, // cleanup is done in AfterTest function
- }
-
- if testName == "Test_Spark_Hdfs_Kerberos_SparkHistoryServer" {
- operatorParams := map[string]string{
- "enableHistoryServer": "true",
- "historyServerFsLogDirectory": eventLogDir,
- "delegationTokenSecret": hadoopTokenSecret,
- }
- suite.operator.Params = operatorParams
- }
-
- if err := suite.operator.InstallSparkOperator(); err != nil {
- suite.FailNow(err.Error())
- }
-}
-
-func (suite *HdfsIntegrationSuite) Test_Spark_Hdfs_Kerberos() {
- jobName := "spark-hdfs-kerberos"
- sparkJob := utils.SparkJob{
- Name: jobName,
- Template: fmt.Sprintf("%s.yaml", jobName),
- }
- if err := suite.operator.SubmitJob(&sparkJob); err != nil {
- suite.FailNow(err.Error())
- }
-
- if err := suite.operator.WaitUntilSucceeded(sparkJob); err != nil {
- suite.FailNow(err.Error())
- }
-}
-
-func (suite *HdfsIntegrationSuite) Test_Spark_Hdfs_Kerberos_SparkHistoryServer() {
- jobName := "spark-hdfs-kerberos"
- sparkJob := utils.SparkJob{
- Name: jobName,
- Template: fmt.Sprintf("%s.yaml", jobName),
- Params: map[string]interface{}{
- "SparkConf": map[string]string{
- "spark.eventLog.enabled": "true",
- "spark.eventLog.dir": eventLogDir,
- },
- },
- }
- if err := suite.operator.SubmitJob(&sparkJob); err != nil {
- suite.FailNow(err.Error())
- }
-
- if err := suite.operator.WaitUntilSucceeded(sparkJob); err != nil {
- suite.FailNow(err.Error())
- }
-
- // check the logs to verify the app has been parsed by Spark History Server
- historyServerPodName, _ := utils.Kubectl("get", "pods", "--namespace", suite.operator.Namespace,
- "--selector=app.kubernetes.io/name=spark-history-server", "--output=jsonpath={.items[*].metadata.name}")
-
- logRecord := fmt.Sprintf("FsHistoryProvider: Finished parsing %s/spark-", eventLogDir)
- utils.Retry(func() error {
- contains, err := utils.PodLogContains(suite.operator.Namespace, historyServerPodName, logRecord)
- if err != nil {
- return err
- } else if !contains {
- return fmt.Errorf("text is not present in the logs, retrying")
- }
- return nil
- })
-}
-
-func (suite *HdfsIntegrationSuite) AfterTest(suiteName, testName string) {
- suite.operator.CleanUp()
-}
-
-func (suite *HdfsIntegrationSuite) TearDownSuite() {
- utils.Kubectl("delete", "ns", namespace, "--wait=true")
-}
diff --git a/tests/manifests/assert-mock-task-runner.yaml b/tests/manifests/assert-mock-task-runner.yaml
new file mode 100644
index 00000000..5be85a2e
--- /dev/null
+++ b/tests/manifests/assert-mock-task-runner.yaml
@@ -0,0 +1,15 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: mock-task-runner
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/manifests/install-operator.yaml b/tests/manifests/install-operator.yaml
new file mode 100644
index 00000000..63d9955f
--- /dev/null
+++ b/tests/manifests/install-operator.yaml
@@ -0,0 +1,14 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ kubectl kudo install "${ROOT_DIR}"/operators/repository/spark/operator \
+ --instance=spark \
+ --namespace=$NAMESPACE \
+ --wait \
+ -p operatorImageName=$OPERATOR_DOCKER_REPO \
+ -p operatorVersion=$OPERATOR_VERSION \
+ -p sparkJobNamespace=$NAMESPACE
+assert:
+ - ../manifests/kudo-controller.yaml
+ - ../manifests/spark-instance.yaml
diff --git a/tests/manifests/kudo-controller.yaml b/tests/manifests/kudo-controller.yaml
new file mode 100644
index 00000000..acfa648a
--- /dev/null
+++ b/tests/manifests/kudo-controller.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: kudo-controller-manager-0
+ namespace: kudo-system
+status:
+ phase: Running
diff --git a/tests/manifests/spark-instance.yaml b/tests/manifests/spark-instance.yaml
new file mode 100644
index 00000000..069111b6
--- /dev/null
+++ b/tests/manifests/spark-instance.yaml
@@ -0,0 +1,8 @@
+apiVersion: kudo.dev/v1beta1
+kind: Instance
+metadata:
+ name: spark
+status:
+ planStatus:
+ deploy:
+ status: COMPLETE
diff --git a/tests/templates/spark-pi.yaml b/tests/manifests/spark-pi.yaml
similarity index 57%
rename from tests/templates/spark-pi.yaml
rename to tests/manifests/spark-pi.yaml
index 553b5beb..94d31056 100644
--- a/tests/templates/spark-pi.yaml
+++ b/tests/manifests/spark-pi.yaml
@@ -1,35 +1,34 @@
apiVersion: "sparkoperator.k8s.io/v1beta2"
kind: SparkApplication
metadata:
- name: {{ .Name }}
- namespace: {{ .Namespace }}
+ name: spark-pi
spec:
type: Scala
mode: cluster
- image: {{ .Image }}
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
imagePullPolicy: Always
mainClass: org.apache.spark.examples.SparkPi
- mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-{{ .SparkVersion }}.jar"
+ mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.0.0.jar"
arguments:
- - "10"
- {{- if .Params.BatchScheduler }}
- batchScheduler: {{ .Params.BatchScheduler }}
- {{- end }}
+ - "10"
sparkConf:
"spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
"spark.scheduler.minRegisteredResourcesRatio": "1.0"
- sparkVersion: {{ .SparkVersion }}
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
restartPolicy:
type: Never
driver:
cores: 1
memory: "512m"
labels:
- version: {{ .SparkVersion }}
- serviceAccount: {{ .ServiceAccount }}
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
executor:
cores: 1
- instances: {{ .ExecutorsCount }}
+ instances: 1
memory: "512m"
+ deleteOnTermination: false
labels:
- version: {{ .SparkVersion }}
+ version: 3.0.0
diff --git a/tests/metrics_test.go b/tests/metrics_test.go
deleted file mode 100644
index 8e110437..00000000
--- a/tests/metrics_test.go
+++ /dev/null
@@ -1,299 +0,0 @@
-package tests
-
-import (
- "bytes"
- "context"
- "fmt"
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- "github.com/prometheus/client_golang/api"
- "github.com/prometheus/client_golang/api/prometheus/v1"
- log "github.com/sirupsen/logrus"
- "github.com/stretchr/testify/suite"
- "io"
- v12 "k8s.io/api/core/v1"
- . "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/tools/clientcmd"
- "k8s.io/client-go/tools/portforward"
- "k8s.io/client-go/transport/spdy"
- "net/http"
- "net/url"
- "os/exec"
- "regexp"
- "strings"
- "testing"
- "time"
-)
-
-const dashboardsDir = "../operators/repository/spark/docs/latest/resources/dashboards"
-const jobName = "mock-task-runner"
-const jobTemplate = "spark-mock-task-runner-with-monitoring.yaml"
-const prometheusNamespace = "kubeaddons"
-const prometheusPort = 9090
-const queryTimeout = 5 * time.Second
-const queryRetryDelay = 1 * time.Second
-const contextTimeout = 5 * time.Second
-
-type MetricsTestSuite struct {
- operator utils.SparkOperatorInstallation
- suite.Suite
-}
-
-type PortForwardProps struct {
- Pod v12.Pod
- LocalPort int
- PodPort int
- Out, ErrOut io.Writer
- // StopCh channel is used to stop portforward
- StopCh <-chan struct{}
- // ReadyCh channel is updated when portforward is ready
- ReadyCh chan struct{}
-}
-
-func TestMetricsSuite(t *testing.T) {
- suite.Run(t, new(MetricsTestSuite))
-}
-
-func (suite *MetricsTestSuite) SetupSuite() {
- suite.operator = utils.SparkOperatorInstallation{
- Namespace: "spark-operator-metrics",
- Params: map[string]string{
- "enableMetrics": "true",
- },
- }
-
- if err := suite.operator.InstallSparkOperator(); err != nil {
- suite.FailNow("Error while installing the operator", err)
- }
-}
-
-func (suite *MetricsTestSuite) TearDownSuite() {
- suite.operator.CleanUp()
-}
-
-func (suite *MetricsTestSuite) TestMetricsInPrometheus() {
- // capture test start time for later use in Prometheus queries
- testStartTime := time.Now()
-
- // to initiate application-specific metrics generation, we need to create a workload by submitting
- // two applications with different results (successful and failed).
- if err := submitJobs(suite); err != nil {
- suite.FailNow("Error while submitting a job", err)
- }
-
- // get prometheus pod name
- prometheusPodName, err := utils.Kubectl("get", "pod",
- "--namespace", prometheusNamespace,
- "--selector", "app=prometheus",
- "--output", "jsonpath={.items[*].metadata.name}")
-
- if err != nil {
- suite.FailNow("Prometheus pod not found", err)
- }
-
- // start a port-forward as a go-routine to directly communicate with Prometheus API
- stopCh, readyCh := make(chan struct{}, 1), make(chan struct{}, 1)
- out, errOut := new(bytes.Buffer), new(bytes.Buffer)
- go func() {
- err := startPortForward(PortForwardProps{
- Pod: v12.Pod{
- ObjectMeta: ObjectMeta{
- Name: prometheusPodName,
- Namespace: prometheusNamespace,
- },
- },
- LocalPort: prometheusPort,
- PodPort: prometheusPort,
- Out: out,
- ErrOut: errOut,
- StopCh: stopCh,
- ReadyCh: readyCh,
- })
- if err != nil {
- suite.FailNow("Error while creating port-forward", err)
- }
- }()
-
- select {
- case <-readyCh:
- if len(errOut.String()) != 0 {
- suite.FailNow(errOut.String())
- } else if len(out.String()) != 0 {
- log.Info(out.String())
- }
- break
- }
-
- client, err := api.NewClient(api.Config{
- Address: fmt.Sprintf("http://localhost:%d", prometheusPort),
- })
- if err != nil {
- suite.Fail(err.Error(), "Error creating Prometheus client")
- }
-
- operatorPodName, err := suite.operator.GetOperatorPodName()
- if err != nil {
- suite.FailNow("Error getting operator pod name", err)
- }
-
- // collect and prepare Prometheus queries
- queries, err := collectQueries([]string{
- "$Spark_Operator_Instance", operatorPodName,
- "$app_name", jobName,
- "$namespace", suite.operator.Namespace,
- "\\\"", "\""})
-
- if err != nil {
- suite.FailNow("Error parsing Prometheus queries", err)
- }
-
- v1api := v1.NewAPI(client)
-
- timeRange := v1.Range{
- Start: testStartTime,
- End: time.Now().Add(10 * time.Minute),
- Step: 10 * time.Second,
- }
-
- //TODO (akirillov): after the upgrade to new Spark/K8s/Prometheus, some of the metric names changed
- // this should be addressed by refactoring the dashboard. For now, we consider the
- // test successful if at least 5 queries succeeded
- successful := make([]string, 0)
- failed := make([]string, 0)
-
- for _, query := range queries {
- if err := suite.queryPrometheus(query, v1api, timeRange); err != nil {
- log.Warnf("Error while executing the query \"%s\" Error: %v", query, err)
- failed = append(failed, query)
- }
-
- successful = append(successful, query)
- }
- // stop PortForward connection
- close(stopCh)
-
- log.Infof("Queries launched successfully:\n%v", successful)
- log.Infof("Failed queries:\n%v", failed)
-
- if len(successful) < 5 {
- suite.Fail("Insufficient number of successful queries. Check logs for details")
- }
-}
-
-func (suite *MetricsTestSuite) queryPrometheus(query string, v1api v1.API, timeRange v1.Range) error {
- return utils.RetryWithTimeout(queryTimeout, queryRetryDelay, func() error {
- ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
- defer cancel()
- log.Infof("Executing query: \"%s\"", query)
- result, warnings, err := v1api.QueryRange(ctx, query, timeRange)
- if err != nil {
- log.Errorf(": %v", err)
- return err
- }
- if len(warnings) > 0 {
- log.Warnf("Warnings: %v\n", warnings)
- }
- if len(result.String()) == 0 {
- return fmt.Errorf("no metrics found for query %v", query)
- }
- log.Infof("Result: %v", result)
- return nil
- })
-}
-
-// submitJobs creates two SparkApplications: the first one completes successfully, the second one with mis-configured
-// arguments and should fail. With this approach an operator will generate all the required metrics that used in queries
-// under test
-func submitJobs(suite *MetricsTestSuite) error {
- job := utils.SparkJob{
- Name: fmt.Sprintf("%s-failed", jobName),
- Template: jobTemplate,
- Params: map[string]interface{}{
- "args": []string{"1"},
- },
- }
-
- if err := suite.operator.SubmitJob(&job); err != nil {
- return err
- }
-
- job = utils.SparkJob{
- Name: jobName,
- Template: jobTemplate,
- Params: map[string]interface{}{
- "args": []string{"2", "60"},
- },
- }
-
- if err := suite.operator.SubmitJob(&job); err != nil {
- return err
- }
-
- if err := suite.operator.WaitUntilSucceeded(job); err != nil {
- return err
- }
- return nil
-}
-
-// this method 'grep's all prometheus queries from dashboards files located in 'dashboardsDir'
-// and replaces metric label placeholders with the real data
-func collectQueries(replacements []string) ([]string, error) {
-
- // define metrics which cannot be verified
- var excludedMetrics = []string{
- "spark_app_executor_failure_count",
- }
-
- command := exec.Command("grep", "--no-filename", "\"expr\"",
- fmt.Sprintf("%s/%s", dashboardsDir, "grafana_spark_operator.json"),
- fmt.Sprintf("%s/%s", dashboardsDir, "grafana_spark_applications.json"))
-
- output, err := command.CombinedOutput()
- if err != nil {
- log.Error(string(output))
- return nil, err
- }
- replacer := strings.NewReplacer(replacements...)
-
- var queries []string
- pattern := regexp.MustCompile("\"expr\": \"([a-z_\\d(]+{.*}\\)?)")
-
- for _, line := range strings.Split(string(output), "\n") {
- if len(line) > 0 {
- query := pattern.FindStringSubmatch(strings.TrimSpace(line))[1]
- query = replacer.Replace(query)
- for _, metric := range excludedMetrics {
- if !strings.Contains(query, metric) {
- queries = append(queries, query)
- }
- }
- }
- }
- return queries, nil
-}
-
-// this method creates port forwarding request based on 'PortForwardProps'
-func startPortForward(props PortForwardProps) error {
- path := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", props.Pod.Namespace, props.Pod.Name)
- config, err := clientcmd.BuildConfigFromFlags("", utils.KubeConfig)
-
- if err != nil {
- return err
- }
-
- roundTripper, upgrader, err := spdy.RoundTripperFor(config)
-
- if err != nil {
- return err
- }
-
- serverURL := url.URL{Scheme: "https", Path: path, Host: strings.TrimLeft(config.Host, "htps:/")}
- dialer := spdy.NewDialer(upgrader, &http.Client{Transport: roundTripper}, http.MethodPost, &serverURL)
-
- fw, err := portforward.New(dialer, []string{fmt.Sprintf("%d:%d", props.LocalPort, props.PodPort)},
- props.StopCh, props.ReadyCh, props.Out, props.ErrOut)
- if err != nil {
- return err
- }
-
- return fw.ForwardPorts()
-}
diff --git a/tests/network_test.go b/tests/network_test.go
deleted file mode 100644
index df8d69d9..00000000
--- a/tests/network_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package tests
-
-import (
- "errors"
- "fmt"
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- log "github.com/sirupsen/logrus"
- v12 "k8s.io/api/core/v1"
- "testing"
-)
-
-/*
- Test that `hostNetwork` in SparkApplication propagates to driver and executor pods
-*/
-func TestHostNetworkPropagation(t *testing.T) {
- spark := utils.SparkOperatorInstallation{}
- err := spark.InstallSparkOperator()
- defer spark.CleanUp()
-
- if err != nil {
- t.Fatal(err)
- }
-
- var testCases = []struct {
- driverHN bool
- executorHN bool
- }{
- {false, false},
- {true, false},
- {false, true},
- {true, true},
- }
-
- for i, tc := range testCases {
- log.Infof("Running test case:\n- driver host network:\t\t%v\n- executor host network:\t%v", tc.driverHN, tc.executorHN)
- jobName := fmt.Sprintf("host-network-test-job-%d", i)
- job := utils.SparkJob{
- Name: jobName,
- Template: "spark-mock-task-runner-job-host-network.yaml",
- Params: map[string]interface{}{
- "args": []string{"1", "600"},
- "driverHostNetwork": tc.driverHN,
- "executorHostNetwork": tc.executorHN,
- },
- }
-
- // Submit the job and wait for it to start
- err = spark.SubmitAndWaitForExecutors(&job)
- if err != nil {
- t.Fatal(err)
- }
-
- // Verify driver pod hostNetwork and dnsPolicy values
- driver, err := spark.DriverPod(job)
- if err != nil {
- t.Fatal(err)
- }
- err = verifyPodHostNetwork(&driver, tc.driverHN)
- log.Infof("Verifying driver %s spec values", driver.Name)
- if err != nil {
- t.Fatal(err)
- }
-
- // Verify executor pods hostNetwork and dnsPolicy values
- executors, err := spark.ExecutorPods(job)
- if err != nil {
- t.Fatal(err)
- }
- for _, executor := range executors {
- log.Infof("Verifying executor %s spec values", executor.Name)
- err = verifyPodHostNetwork(&executor, tc.executorHN)
- if err != nil {
- t.Fatal(err)
- }
- }
-
- // Terminate the job while it's running
- spark.DeleteJob(job)
- }
-}
-
-func verifyPodHostNetwork(pod *v12.Pod, expectedHostNetwork bool) error {
- log.Infof("Pod spec.hostNetwork: %v", pod.Spec.HostNetwork)
- log.Infof("Pod spec.dnspolicy: %v", pod.Spec.DNSPolicy)
-
- // Check spec values
- if pod.Spec.HostNetwork != expectedHostNetwork {
- return errors.New(fmt.Sprintf("Unexpected hostNetwork value for pod %v: %s. Should be %v", pod.Spec.HostNetwork, pod.Name, expectedHostNetwork))
- } else if expectedHostNetwork && pod.Spec.DNSPolicy != v12.DNSClusterFirstWithHostNet {
- return errors.New(fmt.Sprintf("Expected pod pod DNS policy to be \"dnsClusterFirstWithHostNet\", but it's %s", pod.Spec.DNSPolicy))
- }
-
- // Check pod IP
- log.Infof("Pod status.podIP: %v", pod.Status.PodIP)
- log.Infof("Pod status.hostIP: %v", pod.Status.HostIP)
- if expectedHostNetwork && pod.Status.PodIP != pod.Status.HostIP {
- return errors.New(fmt.Sprintf("Pod %s IP doesn't match the host IP", pod.Name))
- } else if !expectedHostNetwork && pod.Status.PodIP == pod.Status.HostIP {
- return errors.New(fmt.Sprintf("Pod %s IP matches the host IP", pod.Name))
- }
-
- return nil
-}
diff --git a/tests/resources/test-mount-config-map/spark-defaults.conf b/tests/resources/test-mount-config-map/spark-defaults.conf
deleted file mode 100644
index 730ff0f7..00000000
--- a/tests/resources/test-mount-config-map/spark-defaults.conf
+++ /dev/null
@@ -1 +0,0 @@
-# An empty configuration file for test purposes
diff --git a/tests/run.sh b/tests/run.sh
deleted file mode 100755
index c794738e..00000000
--- a/tests/run.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-# The following environment variables are used to parameterize test execution:
-# - KUBECONFIG
-# - OPERATOR_IMAGE
-# - SPARK_IMAGE
-# - SPARK_VERSION
-# Default values can be looked up in tests/utils/common.go
-pushd $TEST_DIR
-
-if [ -n "$TEAMCITY_VERSION" ]; then
- # Running in Teamcity, therefore we need to convert `go test` output accordingly
- go test -v -count=1 -json -timeout 60m .
-else
- go test -v -count=1 -timeout 60m .
-fi
-
-EXIT_CODE=$?
-
-popd
-
-exit $EXIT_CODE
diff --git a/tests/s3_test.go b/tests/s3_test.go
deleted file mode 100644
index d571939a..00000000
--- a/tests/s3_test.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package tests
-
-import (
- "fmt"
- . "github.com/google/uuid"
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- log "github.com/sirupsen/logrus"
-
- "github.com/stretchr/testify/suite"
- "testing"
-)
-
-const sparkApplicationName = "spark-s3-readwrite"
-const s3FileName = "README.md"
-
-type S3TestSuite struct {
- operator utils.SparkOperatorInstallation
- suite.Suite
- awsCredentials map[string][]byte
- s3BucketName string
- s3BucketPath string
-}
-
-func TestS3Suite(t *testing.T) {
- suite.Run(t, new(S3TestSuite))
-}
-
-func (suite *S3TestSuite) SetupSuite() {
- installOperator(suite)
- createAwsCredentialsSecret(suite)
- if s3BucketName, err := utils.GetS3BucketName(); err != nil {
- suite.FailNow("Failed to setup suite", err)
- } else {
- suite.s3BucketName = s3BucketName
- }
- if s3BucketPath, err := utils.GetS3BucketPath(); err != nil {
- suite.FailNow("Failed to setup suite", err)
- } else {
- suite.s3BucketPath = s3BucketPath
- }
-}
-
-func (suite *S3TestSuite) TearDownSuite() {
- suite.operator.CleanUp()
-}
-
-func installOperator(suite *S3TestSuite) {
- suite.operator = utils.SparkOperatorInstallation{}
-
- if err := suite.operator.InstallSparkOperator(); err != nil {
- suite.FailNow(err.Error())
- }
-}
-
-func createAwsCredentialsSecret(suite *S3TestSuite) {
- awsCredentials, err := utils.GetAwsCredentials()
- if err != nil {
- suite.FailNow(err.Error())
- }
- suite.awsCredentials = awsCredentials
- if err := utils.CreateSecretEncoded(
- suite.operator.K8sClients,
- utils.DefaultAwsSecretName,
- suite.operator.Namespace,
- awsCredentials,
- ); err != nil {
- suite.FailNow("Error while creating a Secret", err)
- }
-}
-
-// test verifies read/write access to S3 bucket using EnvironmentVariableCredentialsProvider for authentication
-// by reading a file from S3 bucket, counting the lines and writing the result to another S3 location.
-// AWS environment variables are propagated to a driver and executors via a Secret object
-func (suite *S3TestSuite) TestS3ReadWriteAccess() {
-
- testFolder := fmt.Sprint("s3a://", suite.s3BucketName, "/", suite.s3BucketPath, "/", sparkApplicationName)
- fileFolder := fmt.Sprint(testFolder, "/", s3FileName)
- uuid := New().String()
- writeToFolder := fmt.Sprint(testFolder, "/", uuid)
-
- params := map[string]interface{}{
- // the name of a Secret with AWS credentials
- "AwsSecretName": utils.DefaultAwsSecretName,
- "ReadUrl": fileFolder,
- "WriteUrl": writeToFolder,
- }
- if _, present := suite.awsCredentials[utils.AwsSessionToken]; present {
- params["AwsSessionToken"] = "true"
- }
-
- sparkS3App := utils.SparkJob{
- Name: sparkApplicationName,
- Template: fmt.Sprintf("%s.yaml", sparkApplicationName),
- Params: params,
- }
-
- if err := suite.operator.SubmitJob(&sparkS3App); err != nil {
- suite.FailNow("Error submitting SparkApplication", err)
- }
-
- if err := suite.operator.WaitUntilSucceeded(sparkS3App); err != nil {
- driverLog, _ := suite.operator.DriverLog(sparkS3App)
- log.Info("Driver logs:\n", driverLog)
- suite.FailNow(err.Error())
- }
-
- if driverLogContains, err := suite.operator.DriverLogContains(sparkS3App, "Wrote 105 lines"); err != nil {
- log.Warn("Error while getting pod logs", err)
- } else {
- suite.True(driverLogContains)
- }
-
- // clean up S3 folder
- if err := utils.AwsS3DeleteFolder(suite.s3BucketName, fmt.Sprint(testFolder, "/", uuid)); err != nil {
- log.Warn(err)
- }
-}
diff --git a/tests/security_test.go b/tests/security_test.go
deleted file mode 100644
index 61e4b63a..00000000
--- a/tests/security_test.go
+++ /dev/null
@@ -1,449 +0,0 @@
-package tests
-
-import (
- "errors"
- "fmt"
- "gotest.tools/assert"
- v1 "k8s.io/api/core/v1"
- "strconv"
- "strings"
- "testing"
-
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- log "github.com/sirupsen/logrus"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/kubernetes"
-)
-
-type securityTestCase interface {
- common() *commonTestCaseDetails
- getJobServiceAccount() string
- prepare(*kubernetes.Clientset) error
- cleanup(*kubernetes.Clientset)
- verify(*utils.SparkOperatorInstallation) error
-}
-
-type commonTestCaseDetails struct {
- name string
- instanceName string
- namespace string
- params map[string]string
-}
-
-func (c *commonTestCaseDetails) common() *commonTestCaseDetails {
- return c
-}
-
-func (c *commonTestCaseDetails) getJobServiceAccount() string {
- return c.instanceName + utils.DefaultServiceAccountSuffix
-}
-
-type serviceAccountTestCase struct {
- commonTestCaseDetails
- expectedOperatorSA string
- prepareOperatorSA bool
- expectedDriverSA string
- prepareDriverSA bool
-}
-
-func (tc *serviceAccountTestCase) getJobServiceAccount() string {
- return tc.expectedDriverSA
-}
-
-// Prepare SAs before installation if needed
-func (tc *serviceAccountTestCase) prepare(client *kubernetes.Clientset) error {
- if tc.prepareOperatorSA {
- err := utils.CreateServiceAccount(client, tc.expectedOperatorSA, tc.namespace)
- if err != nil {
- log.Errorf("Can't create operator service account '%s'", tc.expectedOperatorSA)
- return err
- }
- }
- if tc.prepareDriverSA {
- err := utils.CreateServiceAccount(client, tc.expectedDriverSA, tc.namespace)
- if err != nil {
- log.Errorf("Can't create spark driver service account '%s'", tc.expectedDriverSA)
- return err
- }
- }
-
- return nil
-}
-
-func (tc *serviceAccountTestCase) cleanup(*kubernetes.Clientset) {
- // Nothing to clean up
-}
-
-// Verify that SAs exists
-func (tc *serviceAccountTestCase) verify(spark *utils.SparkOperatorInstallation) error {
-
- _, err := spark.K8sClients.CoreV1().ServiceAccounts(spark.Namespace).Get(tc.expectedOperatorSA, metav1.GetOptions{})
- if err != nil {
- log.Errorf("Can't get operator service account '%s'", tc.expectedOperatorSA)
- return err
- }
-
- _, err = spark.K8sClients.CoreV1().ServiceAccounts(spark.Namespace).Get(tc.expectedDriverSA, metav1.GetOptions{})
- if err != nil {
- log.Errorf("Can't get Spark driver service account '%s'", tc.expectedDriverSA)
- return err
- }
-
- return nil
-}
-
-type rbacTestCase struct {
- commonTestCaseDetails
- prepareRBAC bool
-}
-
-func (tc *rbacTestCase) prepare(client *kubernetes.Clientset) error {
- if tc.prepareRBAC {
- log.Infof("Preparing RBAC entities before installing the operator")
- const rbacTemplate = "security_test_rbac.yaml"
- sparkSA := tc.instanceName + utils.DefaultServiceAccountSuffix
- const operatorSA = "spark-operator-test-service-account"
-
- // Create and apply RBAC template
- err := utils.KubectlApplyTemplate(tc.namespace, rbacTemplate, map[string]interface{}{
- "service-account": sparkSA,
- "operator-service-account": operatorSA,
- "service-account-namespace": tc.namespace,
- "instance-name": tc.instanceName,
- })
- if err != nil {
- return err
- }
-
- // Add additional parameters to use provided service accounts
- tc.params["createOperatorServiceAccount"] = "false"
- tc.params["createSparkServiceAccount"] = "false"
- tc.params["operatorServiceAccountName"] = operatorSA
- tc.params["sparkServiceAccountName"] = sparkSA
- }
- return nil
-}
-
-// Clean up cluster-wide resources at the end of the test
-func (tc *rbacTestCase) cleanup(*kubernetes.Clientset) {
- utils.DeleteResource("default", "clusterrole", "spark-operator-test-cluster-role")
- utils.DeleteResource("default", "clusterrolebinding", "spark-operator-test-cluster-role-binding")
-}
-
-func (tc *rbacTestCase) verify(spark *utils.SparkOperatorInstallation) error {
- // Verify spark and operator roles
- croles, err := spark.K8sClients.RbacV1().ClusterRoles().List(metav1.ListOptions{
- LabelSelector: "app.kubernetes.io/instance = " + spark.InstanceName,
- })
- if err != nil {
- return err
- } else if len(croles.Items) != 1 {
- return errors.New(fmt.Sprintf("Was expecting to find only one ClusterRole for the instance, but %d were found instead", len(croles.Items)))
- }
- log.Infof("Found a ClusterRole for instance %s: %s", spark.InstanceName, croles.Items[0].Name)
-
- roles, err := spark.K8sClients.RbacV1().Roles(spark.Namespace).List(metav1.ListOptions{
- LabelSelector: "app.kubernetes.io/instance = " + spark.InstanceName,
- })
- if err != nil {
- return err
- } else if len(roles.Items) != 1 {
- return errors.New(fmt.Sprintf("Was expecting to find only one Role for the instance, but %d were found instead", len(roles.Items)))
- }
- log.Infof("Found a Role for instance %s: %s", spark.InstanceName, roles.Items[0].Name)
-
- return nil
-}
-
-func TestServiceAccounts(t *testing.T) {
- instanceName := utils.GenerateInstanceName()
- testCases := []serviceAccountTestCase{
- {
- commonTestCaseDetails: commonTestCaseDetails{
- name: "DefaultConfiguration",
- instanceName: instanceName,
- namespace: "sa-test-default",
- params: map[string]string{
- "operatorServiceAccountName": "spark-operator-service-account",
- "sparkServiceAccountName": "spark-service-account",
- },
- },
- expectedOperatorSA: instanceName + "-spark-operator-service-account",
- expectedDriverSA: instanceName + "-spark-service-account",
- },
- {
- commonTestCaseDetails: commonTestCaseDetails{
- name: "ProvidedOperatorServiceAccount",
- instanceName: instanceName,
- namespace: "sa-test-operator",
- params: map[string]string{
- "operatorServiceAccountName": "custom-operator-sa",
- "createOperatorServiceAccount": "false",
- "sparkServiceAccountName": "spark-service-account",
- },
- },
- prepareOperatorSA: true,
- expectedOperatorSA: "custom-operator-sa",
- expectedDriverSA: instanceName + "-spark-service-account",
- },
- {
- commonTestCaseDetails: commonTestCaseDetails{
- name: "ProvidedSparkServiceAccount",
- namespace: "sa-test-spark",
- instanceName: instanceName,
- params: map[string]string{
- "operatorServiceAccountName": "spark-operator-service-account",
- "createSparkServiceAccount": "false",
- "sparkServiceAccountName": "custom-spark-sa",
- },
- },
- prepareDriverSA: true,
- expectedOperatorSA: instanceName + "-spark-operator-service-account",
- expectedDriverSA: "custom-spark-sa",
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- err := runTestCase(&tc)
- if err != nil {
- t.Errorf("Test case: %v\nfailed with error: %s", tc, err)
- }
- })
- }
-}
-
-func TestRoleBasedAccessControl(t *testing.T) {
- testCases := []rbacTestCase{
- {
- commonTestCaseDetails: commonTestCaseDetails{
- name: "CreateDefaultRBAC",
- namespace: "rbac-test-default",
- instanceName: utils.GenerateInstanceName(),
- params: map[string]string{
- "createRBAC": "true",
- },
- },
- },
- {
- commonTestCaseDetails: commonTestCaseDetails{
- name: "ProvidedRBAC",
- namespace: "rbac-test-provided",
- instanceName: utils.GenerateInstanceName(),
- params: map[string]string{
- "createRBAC": "false",
- },
- },
- prepareRBAC: true,
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- err := runTestCase(&tc)
- if err != nil {
- t.Errorf("Test case: %v\nfailed with error: %s", tc, err)
- }
- })
- }
-}
-
-func runTestCase(tc securityTestCase) error {
- client, err := utils.GetK8sClientSet()
- if err != nil {
- return err
- }
-
- utils.DropNamespace(client, tc.common().namespace)
- _, err = utils.CreateNamespace(client, tc.common().namespace)
- if err != nil {
- return err
- }
-
- err = tc.prepare(client)
- defer tc.cleanup(client)
- if err != nil {
- return err
- }
-
- // Install spark operator
- spark := utils.SparkOperatorInstallation{
- Namespace: tc.common().namespace,
- SkipNamespaceCleanUp: true,
- Params: tc.common().params,
- InstanceName: tc.common().instanceName,
- }
-
- err = spark.InstallSparkOperator()
- defer spark.CleanUp()
- if err != nil {
- return err
- }
-
- err = tc.verify(&spark)
- if err != nil {
- return err
- }
-
- // Run a test job
- jobName := "mock-task-runner"
- job := utils.SparkJob{
- Name: jobName,
- Template: "spark-mock-task-runner-job.yaml",
- ServiceAccount: tc.getJobServiceAccount(),
- Params: map[string]interface{}{
- "args": []string{"1", "15"},
- },
- }
-
- err = spark.SubmitJob(&job)
- if err != nil {
- return err
- }
-
- err = spark.WaitUntilSucceeded(job)
- if err != nil {
- return err
- }
-
- return err
-}
-
-func TestEnvBasedSecrets(t *testing.T) {
- err := runSecretTest("env-based-secret", "", "secretKey", "set to the key 'secretKey' in secret 'env-based-secret'")
-
- if err != nil {
- t.Error(err.Error())
- }
-}
-
-func TestFileBasedSecrets(t *testing.T) {
- err := runSecretTest("file-based-secret", "/mnt/secrets", "", "/mnt/secrets from file-based-secret-volume")
-
- if err != nil {
- t.Error(err.Error())
- }
-}
-
-func runSecretTest(secretName string, secretPath string, secretKey string, expectedSecret string) error {
- spark := utils.SparkOperatorInstallation{}
- err := spark.InstallSparkOperator()
- defer spark.CleanUp()
-
- if err != nil {
- return err
- }
-
- secretData := make(map[string]string)
- if secretKey != "" {
- secretData[secretKey] = "secretValue"
- } else {
- secretData["secretKey"] = "secretValue"
- }
-
- err = utils.CreateSecretPlain(spark.K8sClients, secretName, spark.Namespace, secretData)
- if err != nil {
- return err
- }
-
- jobName := "mock-task-runner"
- job := utils.SparkJob{
- Name: jobName,
- Template: "spark-mock-task-runner-job.yaml",
- Params: map[string]interface{}{
- "args": []string{"1", "15"},
- "SecretName": secretName,
- "SecretPath": secretPath,
- "SecretKey": secretKey,
- },
- }
-
- err = spark.SubmitJob(&job)
- if err != nil {
- return err
- }
-
- err = spark.WaitUntilSucceeded(job)
- if err != nil {
- return err
- }
-
- jobDescription, err := utils.Kubectl(
- "describe",
- "pod",
- "--namespace="+spark.Namespace,
- utils.DriverPodName(jobName),
- )
- if err != nil {
- return err
- }
-
- if strings.Contains(jobDescription, expectedSecret) {
- if secretKey != "" {
- log.Infof("Successfully set environment variable to the key '%s' in secret '%s'", secretKey, secretName)
- } else {
- log.Infof("Successfully mounted secret path '%s' from '%s-volume'", secretPath, secretName)
- }
- } else {
- if secretKey != "" {
- return fmt.Errorf("Unnable to set environment variable to the key '%s' in secret '%s'", secretKey, secretName)
- }
- return fmt.Errorf("Unnable to mount secret path '%s' from '%s-volume'", secretPath, secretName)
- }
-
- return nil
-}
-
-func TestPodSecurityContext(t *testing.T) {
-
- // run driver and executor as user "nobody"
- const uid = "65534"
- const gid = "65534"
-
- spark := utils.SparkOperatorInstallation{}
- err := spark.InstallSparkOperator()
- defer spark.CleanUp()
-
- assert.NilError(t, err)
-
- jobName := "mock-task-runner"
- job := utils.SparkJob{
- Name: jobName,
- Template: "spark-mock-task-runner-job.yaml",
- Params: map[string]interface{}{
- "args": []string{"1", "10"},
- "RunAsUser": uid,
- "RunAsGroup": gid,
- },
- }
-
- err = spark.SubmitAndWaitForExecutors(&job)
- assert.NilError(t, err)
-
- executorPods, err := spark.ExecutorPods(job)
-
- // verify uid propagated to the driver
- logContains, err := spark.DriverLogContains(job, fmt.Sprintf("myuid=%s", uid))
- assert.NilError(t, err)
- assert.Assert(t, logContains, "uid \"%s\" not found in log.")
-
- driver, err := spark.DriverPod(job)
- assert.NilError(t, err)
-
- uidInt, _ := strconv.ParseInt(uid, 10, 64)
- gidInt, _ := strconv.ParseInt(gid, 10, 64)
-
- verifyPodSecurityContext := func(pod v1.Pod) {
- securityContext := pod.Spec.SecurityContext
- assert.Check(t, *securityContext.RunAsUser == uidInt,
- fmt.Sprintf("uids don't match! %d != %d", *securityContext.RunAsUser, uidInt))
- assert.Check(t, *securityContext.RunAsGroup == gidInt,
- fmt.Sprintf("gids don't match! %d != %d", *securityContext.RunAsGroup, gidInt))
- }
-
- verifyPodSecurityContext(driver)
- verifyPodSecurityContext(executorPods[0])
-
- err = spark.WaitUntilSucceeded(job)
- assert.NilError(t, err)
-}
diff --git a/tests/smoke_test.go b/tests/smoke_test.go
deleted file mode 100644
index 3bdd9364..00000000
--- a/tests/smoke_test.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package tests
-
-import (
- "errors"
- "fmt"
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- log "github.com/sirupsen/logrus"
- v12 "k8s.io/api/core/v1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "strconv"
- "strings"
- "testing"
-)
-
-func TestShuffleAppDriverOutput(t *testing.T) {
- spark := utils.SparkOperatorInstallation{}
- err := spark.InstallSparkOperator()
- defer spark.CleanUp()
-
- if err != nil {
- t.Fatal(err)
- }
-
- const expectedExecutorCount = 2
- const expectedGroupCount = 12000
-
- jobName := "shuffle-app"
- job := utils.SparkJob{
- Name: jobName,
- Template: "spark-shuffle-job.yaml",
- ExecutorsCount: expectedExecutorCount,
- Params: map[string]interface{}{
- "args": []string{"4", strconv.Itoa(expectedGroupCount), "100", "4", "1500"},
- },
- }
-
- // Submit the job and wait for it to start
- err = spark.SubmitAndWaitForExecutors(&job)
- if err != nil {
- t.Fatal(err)
- }
-
- err = spark.WaitForOutput(job, fmt.Sprintf("Groups count: %d", expectedGroupCount))
- if err != nil {
- t.Error(err)
- }
-}
-
-func TestRunningAppDeletion(t *testing.T) {
- spark := utils.SparkOperatorInstallation{}
- err := spark.InstallSparkOperator()
- defer spark.CleanUp()
-
- if err != nil {
- t.Fatal(err)
- }
-
- jobName := "mock-task-runner"
- job := utils.SparkJob{
- Name: jobName,
- Template: "spark-mock-task-runner-job.yaml",
- Params: map[string]interface{}{
- "args": []string{"1", "600"},
- },
- }
-
- // Submit the job and wait for it to start
- err = spark.SubmitAndWaitForExecutors(&job)
- if err != nil {
- t.Fatal(err)
- }
-
- // Terminate the job while it's running
- spark.DeleteJob(job)
-
- // Make sure no executors or drivers left
- log.Info("Verifying that all executors and drivers are terminated")
- err = utils.Retry(func() error {
- // Get all pods named mock-task-runner*
- var jobPods []v12.Pod
- pods, _ := spark.K8sClients.CoreV1().Pods(spark.Namespace).List(v1.ListOptions{})
- for _, pod := range pods.Items {
- if strings.HasPrefix(pod.Name, jobName) {
- jobPods = append(jobPods, pod)
- }
- }
-
- if len(jobPods) != 0 {
- for _, pod := range jobPods {
- log.Infof("found %s - %s", pod.Name, pod.Status.Phase)
- }
-
- return errors.New("there are still pods left after the job termination")
- }
- return nil
- })
- if err != nil {
- t.Error(err.Error())
- }
-}
diff --git a/tests/templates/security_test_rbac.yaml b/tests/templates/security_test_rbac.yaml
deleted file mode 100644
index 4d37552a..00000000
--- a/tests/templates/security_test_rbac.yaml
+++ /dev/null
@@ -1,91 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ index . "service-account" }}
- labels:
- app.kubernetes.io/name: spark
- app.kubernetes.io/instance: {{ index . "instance-name" }}
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
- name: spark-driver-test-role
- labels:
- app.kubernetes.io/name: spark
- app.kubernetes.io/instance: {{ index . "instance-name" }}
-rules:
- - apiGroups: [""]
- resources: ["pods"]
- verbs: ["*"]
- - apiGroups: [""]
- resources: ["services"]
- verbs: ["*"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: spark-driver-role-test-binding
-subjects:
- - kind: ServiceAccount
- name: {{ index . "service-account" }}
-roleRef:
- kind: Role
- name: spark-driver-test-role
- apiGroup: rbac.authorization.k8s.io
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ index . "operator-service-account" }}
- labels:
- app.kubernetes.io/name: spark
- app.kubernetes.io/instance: {{ index . "instance-name" }}
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: spark-operator-test-cluster-role
- labels:
- app.kubernetes.io/name: spark
- app.kubernetes.io/instance: {{ index . "instance-name" }}
-rules:
- - apiGroups: [""]
- resources: ["pods"]
- verbs: ["*"]
- - apiGroups: [""]
- resources: ["services", "configmaps", "secrets"]
- verbs: ["create", "get", "delete", "update"]
- - apiGroups: ["extensions"]
- resources: ["ingresses"]
- verbs: ["create", "get", "delete"]
- - apiGroups: [""]
- resources: ["nodes"]
- verbs: ["get"]
- - apiGroups: [""]
- resources: ["events"]
- verbs: ["create", "update", "patch"]
- - apiGroups: ["apiextensions.k8s.io"]
- resources: ["customresourcedefinitions"]
- verbs: ["create", "get", "update", "delete"]
- - apiGroups: ["admissionregistration.k8s.io"]
- resources: ["mutatingwebhookconfigurations"]
- verbs: ["create", "get", "update", "delete"]
- - apiGroups: ["sparkoperator.k8s.io"]
- resources: ["sparkapplications", "scheduledsparkapplications", "sparkapplications/status", "scheduledsparkapplications/status"]
- verbs: ["*"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: spark-operator-test-cluster-role-binding
- labels:
- app.kubernetes.io/name: spark
- app.kubernetes.io/instance: {{ index . "instance-name" }}
-subjects:
- - kind: ServiceAccount
- name: {{ index . "operator-service-account" }}
- namespace: {{ index . "service-account-namespace" }}
-roleRef:
- kind: ClusterRole
- name: spark-operator-test-cluster-role
- apiGroup: rbac.authorization.k8s.io
diff --git a/tests/templates/spark-hdfs-kerberos.yaml b/tests/templates/spark-hdfs-kerberos.yaml
deleted file mode 100644
index 5e042ebc..00000000
--- a/tests/templates/spark-hdfs-kerberos.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-apiVersion: "sparkoperator.k8s.io/v1beta2"
-kind: SparkApplication
-metadata:
- name: {{ .Name }}
- namespace: {{ .Namespace }}
-spec:
- type: Scala
- mode: cluster
- image: {{ .Image }}
- imagePullPolicy: Always
- mainClass: org.apache.spark.examples.HdfsTest
- mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-{{ .SparkVersion }}.jar"
- arguments:
- - "hdfs://namenode.hdfs-kerberos.svc.cluster.local:9000/spark/README.txt"
- sparkConf:
- "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
- "spark.scheduler.minRegisteredResourcesRatio": "1.0"
- {{ range $key, $value := .Params.SparkConf }}
- "{{ $key }}": "{{ $value }}"
- {{ end }}
- hadoopConfigMap: hadoop-conf
- sparkVersion: {{ .SparkVersion }}
- restartPolicy:
- type: Never
- driver:
- cores: 1
- memory: "512m"
- labels:
- version: {{ .SparkVersion }}
- serviceAccount: {{ .ServiceAccount }}
- secrets:
- - name: hadoop-token
- path: /mnt/secrets
- secretType: HadoopDelegationToken
- executor:
- cores: 1
- instances: 1
- memory: "512m"
- labels:
- version: {{ .SparkVersion }}
- secrets:
- - name: hadoop-token
- path: /mnt/secrets
- secretType: HadoopDelegationToken
diff --git a/tests/templates/spark-mock-task-runner-job-mount-config.yaml b/tests/templates/spark-mock-task-runner-job-mount-config.yaml
deleted file mode 100644
index 3c86ac47..00000000
--- a/tests/templates/spark-mock-task-runner-job-mount-config.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-apiVersion: "sparkoperator.k8s.io/v1beta2"
-kind: SparkApplication
-metadata:
- name: {{ .Name }}
- namespace: {{ .Namespace }}
-spec:
- type: Scala
- mode: cluster
- image: {{ .Image }}
- imagePullPolicy: Always
- mainClass: MockTaskRunner
- mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
- arguments: {{ range $i, $arg := index .Params "args" }}
- - "{{ $arg }}"{{ end }}
- {{- if index .Params "sparkConfigMap" }}
- sparkConfigMap: {{ index .Params "sparkConfigMap" }}
- {{- end }}
- {{- if index .Params "hadoopConfigMap" }}
- hadoopConfigMap: {{ index .Params "hadoopConfigMap" }}
- {{- end}}
- sparkConf:
- "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
- "spark.scheduler.minRegisteredResourcesRatio": "1.0"
- sparkVersion: {{ .SparkVersion }}
- restartPolicy:
- type: Never
- driver:
- cores: 1
- memory: "512m"
- labels:
- version: {{ .SparkVersion }}
- metrics-exposed: "true"
- serviceAccount: {{ .ServiceAccount }}
- executor:
- cores: 1
- instances: {{ .ExecutorsCount }}
- labels:
- version: {{ .SparkVersion }}
- metrics-exposed: "true"
diff --git a/tests/templates/spark-mock-task-runner-job.yaml b/tests/templates/spark-mock-task-runner-job.yaml
deleted file mode 100644
index 53b6ea4c..00000000
--- a/tests/templates/spark-mock-task-runner-job.yaml
+++ /dev/null
@@ -1,132 +0,0 @@
-apiVersion: "sparkoperator.k8s.io/v1beta2"
-kind: SparkApplication
-metadata:
- name: {{ .Name }}
- namespace: {{ .Namespace }}
-spec:
- type: Scala
- mode: cluster
- image: {{ .Image }}
- imagePullPolicy: Always
- mainClass: MockTaskRunner
- mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
- arguments: {{ range $i, $arg := index .Params "args" }}
- - "{{ $arg }}"{{ end }}
- sparkConf:
- "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
- "spark.scheduler.minRegisteredResourcesRatio": "1.0"
- {{- if .Params.VolumeName }}
- "spark.local.dir": "{{ .Params.MountPath }}/tmp"
- {{- end }}
- sparkVersion: {{ .SparkVersion }}
- restartPolicy:
- type: Never
- {{- if .Params.VolumeName }}
- volumes:
- - name: {{ .Params.VolumeName }}
- hostPath:
- path: /data
- type: DirectoryOrCreate
- {{- end }}
- driver:
- cores: 1
- memory: "512m"
- labels:
- version: {{ .SparkVersion }}
- metrics-exposed: "true"
- serviceAccount: {{ .ServiceAccount }}
- {{- if .Params.VolumeName }}
- volumeMounts:
- - name: {{ .Params.VolumeName }}
- mountPath: {{ .Params.MountPath }}
- {{- end }}
- {{- if and .Params.SecretName .Params.SecretPath }}
- secrets:
- - name: {{ .Params.SecretName }}
- path: {{ .Params.SecretPath }}
- secretType: Opaque
- {{- else if and .Params.SecretName .Params.SecretKey }}
- envSecretKeyRefs:
- SECRET_ENV:
- name: {{ .Params.SecretName }}
- key: {{ .Params.SecretKey }}
- {{- end }}
- {{- if and .Params.RunAsUser .Params.RunAsGroup }}
- securityContext:
- runAsUser: {{ .Params.RunAsUser }}
- runAsGroup: {{ .Params.RunAsGroup }}
- runAsNonRoot: true
- {{- end }}
- {{- if .Params.DriverAffinity }}
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: {{ .Params.Label }}
- operator: DoesNotExist
- podAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: {{ .Params.Label }}
- operator: DoesNotExist
- topologyKey: kubernetes.io/hostname
- {{- end }}
- {{- if .Params.DriverTolerations }}
- tolerations:
- - key: {{ .Params.Label }}
- operator: Exists
- effect: NoSchedule
- {{- end }}
- executor:
- cores: 1
- instances: {{ .ExecutorsCount }}
- memory: "512m"
- labels:
- version: {{ .SparkVersion }}
- metrics-exposed: "true"
- {{- if .Params.VolumeName }}
- volumeMounts:
- - name: {{ .Params.VolumeName }}
- mountPath: {{ .Params.MountPath }}
- {{- end }}
- {{- if and .Params.SecretName .Params.SecretPath }}
- secrets:
- - name: {{ .Params.SecretName }}
- path: {{ .Params.SecretPath }}
- secretType: Opaque
- {{- else if and .Params.SecretName .Params.SecretKey }}
- envSecretKeyRefs:
- SECRET_ENV:
- name: {{ .Params.SecretName }}
- key: {{ .Params.SecretKey }}
- {{- end }}
- {{- if and .Params.RunAsUser .Params.RunAsGroup }}
- securityContext:
- runAsUser: {{ .Params.RunAsUser }}
- runAsGroup: {{ .Params.RunAsGroup }}
- runAsNonRoot: true
- {{- end }}
- {{- if .Params.ExecutorAffinity }}
- affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: {{ .Params.Label }}
- operator: DoesNotExist
- podAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: {{ .Params.Label }}
- operator: DoesNotExist
- topologyKey: kubernetes.io/hostname
- {{- end }}
- {{- if .Params.ExecutorTolerations }}
- tolerations:
- - key: {{ .Params.Label }}
- operator: Exists
- effect: NoSchedule
- {{- end }}
diff --git a/tests/templates/spark-mock-task-runner-with-monitoring.yaml b/tests/templates/spark-mock-task-runner-with-monitoring.yaml
deleted file mode 100644
index 2465b298..00000000
--- a/tests/templates/spark-mock-task-runner-with-monitoring.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-apiVersion: "sparkoperator.k8s.io/v1beta2"
-kind: SparkApplication
-metadata:
- name: {{ .Name }}
- namespace: {{ .Namespace }}
-spec:
- type: Scala
- mode: cluster
- image: {{ .Image }}
- imagePullPolicy: Always
- mainClass: MockTaskRunner
- mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
- arguments: {{ range $i, $arg := index .Params "args" }}
- - "{{ $arg }}"{{ end }}
- sparkConf:
- "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
- "spark.scheduler.minRegisteredResourcesRatio": "1.0"
- sparkVersion: {{ .SparkVersion }}
- restartPolicy:
- type: Never
- driver:
- cores: 1
- memory: "512m"
- labels:
- version: {{ .SparkVersion }}
- metrics-exposed: "true"
- serviceAccount: {{ .ServiceAccount }}
- executor:
- cores: 1
- instances: {{ .ExecutorsCount }}
- labels:
- version: {{ .SparkVersion }}
- metrics-exposed: "true"
- monitoring:
- exposeDriverMetrics: true
- exposeExecutorMetrics: true
- prometheus:
- jmxExporterJar: "/prometheus/jmx_prometheus_javaagent-0.11.0.jar"
- port: 8090
diff --git a/tests/templates/spark-pi-history-server-job.yaml b/tests/templates/spark-pi-history-server-job.yaml
deleted file mode 100644
index dbc91e75..00000000
--- a/tests/templates/spark-pi-history-server-job.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-apiVersion: "sparkoperator.k8s.io/v1beta2"
-kind: SparkApplication
-metadata:
- name: {{ .Name }}
- namespace: {{ .Namespace }}
-spec:
- type: Scala
- mode: cluster
- image: {{ .Image }}
- imagePullPolicy: Always
- mainClass: org.apache.spark.examples.SparkPi
- mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-{{ .SparkVersion }}.jar"
- arguments:
- - "10"
- sparkConf:
- "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
- "spark.scheduler.minRegisteredResourcesRatio": "1.0"
- "spark.eventLog.enabled": "true"
- "spark.eventLog.dir": "{{ .Params.AwsBucketPath }}"
- {{- if .Params.AwsSessionToken }}
- "spark.hadoop.fs.s3a.aws.credentials.provider": "org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider"
- {{- end }}
- deps:
- jars:
- - local:///opt/spark/examples/jars/scopt_2.12-3.7.1.jar
- sparkVersion: {{ .SparkVersion }}
- restartPolicy:
- type: Never
- driver:
- cores: 1
- memory: "512m"
- labels:
- version: {{ .SparkVersion }}
- serviceAccount: {{ .ServiceAccount }}
- env:
- - name: AWS_ACCESS_KEY_ID
- valueFrom:
- secretKeyRef:
- name: {{ .Params.AwsSecretName }}
- key: AWS_ACCESS_KEY_ID
- - name: AWS_SECRET_ACCESS_KEY
- valueFrom:
- secretKeyRef:
- name: {{ .Params.AwsSecretName }}
- key: AWS_SECRET_ACCESS_KEY
- - name: AWS_SESSION_TOKEN
- valueFrom:
- secretKeyRef:
- name: {{ .Params.AwsSecretName }}
- key: AWS_SESSION_TOKEN
- optional: true
- executor:
- cores: 1
- instances: {{ .ExecutorsCount }}
- memory: "512m"
- labels:
- version: {{ .SparkVersion }}
diff --git a/tests/templates/spark-s3-readwrite.yaml b/tests/templates/spark-s3-readwrite.yaml
deleted file mode 100644
index 6b9ee5e9..00000000
--- a/tests/templates/spark-s3-readwrite.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-apiVersion: "sparkoperator.k8s.io/v1beta2"
-kind: SparkApplication
-metadata:
- name: {{ .Name }}
- namespace: {{ .Namespace }}
-spec:
- type: Scala
- mode: cluster
- image: {{ .Image }}
- imagePullPolicy: Always
- mainClass: S3Job
- mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
- sparkConf:
- "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
- "spark.scheduler.minRegisteredResourcesRatio": "1.0"
- "spark.hadoop.fs.s3a.impl": "org.apache.hadoop.fs.s3a.S3AFileSystem"
- {{- if .Params.AwsSessionToken }}
- "spark.hadoop.fs.s3a.aws.credentials.provider": "org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider"
- {{- end }}
- sparkVersion: {{ .SparkVersion }}
- arguments:
- - "--readUrl"
- - {{ .Params.ReadUrl }}
- - "--writeUrl"
- - {{ .Params.WriteUrl }}
- restartPolicy:
- type: Never
- driver:
- cores: 1
- memory: "512m"
- labels:
- version: {{ .SparkVersion }}
- serviceAccount: {{ .ServiceAccount }}
- env:
- - name: AWS_ACCESS_KEY_ID
- valueFrom:
- secretKeyRef:
- name: {{ .Params.AwsSecretName }}
- key: AWS_ACCESS_KEY_ID
- - name: AWS_SECRET_ACCESS_KEY
- valueFrom:
- secretKeyRef:
- name: {{ .Params.AwsSecretName }}
- key: AWS_SECRET_ACCESS_KEY
- - name: AWS_SESSION_TOKEN
- valueFrom:
- secretKeyRef:
- name: {{ .Params.AwsSecretName }}
- key: AWS_SESSION_TOKEN
- optional: true
- executor:
- cores: 1
- instances: 1
- memory: "512m"
- labels:
- version: {{ .SparkVersion }}
diff --git a/tests/tenancy_test.go b/tests/tenancy_test.go
deleted file mode 100644
index 0d50c29c..00000000
--- a/tests/tenancy_test.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package tests
-
-import (
- "fmt"
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- log "github.com/sirupsen/logrus"
- "gotest.tools/assert"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "strings"
- "testing"
-)
-
-func TestTenancyTwoOperatorsDifferentNamespaces(t *testing.T) {
- operators := operatorBuilder(2, true, true)
- for _, operator := range operators {
- err := operator.InstallSparkOperator()
- assert.NilError(t, err)
- }
-
- t.Run("TestComponents", func(t *testing.T) {
- verifyComponents(t, operators)
- })
- t.Run("TestWorkloads", func(t *testing.T) {
- verifyWorkloads(t, operators)
- })
-
- // this test has been commented out due to the following limitations in KUDO:
- // https://kudo.dev/docs/what-is-kudo.html#limitations
-
- //t.Run("TestCRDsDeletion", func(t *testing.T) {
- // // verify CRDs are present after one of the operators is deleted
- // operators[0].CleanUp()
- // assert.Assert(t, crdsInstalled(t), "CRDs are not present!")
- //
- // // check that CRDs are deleted after no operator instances left
- // operators[1].CleanUp()
- // assert.Assert(t, !crdsInstalled(t), "CRDs are not deleted!")
- //})
-}
-
-func TestTenancyTwoOperatorsSingleNamespace(t *testing.T) {
- operators := operatorBuilder(2, false, true)
- for i, operator := range operators {
- if i > 0 {
- operator.SkipNamespaceCleanUp = true
- }
- err := operator.InstallSparkOperator()
- assert.NilError(t, err)
- defer operator.CleanUp()
- }
-
- t.Run("TestComponents", func(t *testing.T) {
- verifyComponents(t, operators)
- })
- t.Run("TestWorkloads", func(t *testing.T) {
- verifyWorkloads(t, operators)
- })
-}
-
-func TestTenancyTwoOperatorsSameNameDifferentNamespaces(t *testing.T) {
- operators := operatorBuilder(2, true, false)
- for _, operator := range operators {
- err := operator.InstallSparkOperator()
- assert.NilError(t, err)
- defer operator.CleanUp()
- }
-
- t.Run("TestComponents", func(t *testing.T) {
- verifyComponents(t, operators)
- })
- t.Run("TestWorkloads", func(t *testing.T) {
- verifyWorkloads(t, operators)
- })
-
-}
-
-func verifyComponents(t *testing.T, operators []*utils.SparkOperatorInstallation) {
- serviceAccounts := []string{"spark-operator-service-account", "spark-service-account"}
- services := []string{"webhook"}
-
- for _, operator := range operators {
- for _, service := range services {
- serviceName := fmt.Sprint(operator.InstanceName, "-", service)
- log.Infof("Checking Service \"%s\" is created in namespace \"%s\" for \"%s\"", serviceName,
- operator.Namespace, operator.InstanceName)
- result, err := operator.K8sClients.CoreV1().Services(operator.Namespace).Get(
- fmt.Sprint(serviceName), v1.GetOptions{})
- assert.NilError(t, err)
- assert.Equal(t, result.Labels["kudo.dev/instance"], operator.InstanceName)
- }
-
- for _, sa := range serviceAccounts {
- serviceAccount := fmt.Sprint(operator.InstanceName, "-", sa)
- log.Infof("Checking ServiceAccount \"%s\" is created in namespace \"%s\" for \"%s\"", serviceAccount,
- operator.Namespace, operator.InstanceName)
- result, err := operator.K8sClients.CoreV1().ServiceAccounts(operator.Namespace).Get(
- serviceAccount, v1.GetOptions{})
- assert.NilError(t, err)
- assert.Equal(t, result.Labels["kudo.dev/instance"], operator.InstanceName)
- }
-
- role := fmt.Sprintf("%s-spark-role", operator.InstanceName)
- log.Infof("Checking Role \"%s\" is created in namespace \"%s\" for \"%s\"", role,
- operator.Namespace, operator.InstanceName)
- result, err := operator.K8sClients.RbacV1().Roles(operator.Namespace).Get(role, v1.GetOptions{})
- assert.NilError(t, err)
- assert.Equal(t, result.Labels["kudo.dev/instance"], operator.InstanceName)
-
- clusterRole := fmt.Sprintf("%s-%s-cr", operator.InstanceName, operator.Namespace)
- _, err = operator.K8sClients.RbacV1().ClusterRoles().Get(clusterRole, v1.GetOptions{})
- assert.NilError(t, err)
-
- clusterRoleBinding := fmt.Sprintf("%s-%s-crb", operator.InstanceName, operator.Namespace)
- _, err = operator.K8sClients.RbacV1().ClusterRoleBindings().Get(clusterRoleBinding, v1.GetOptions{})
- assert.NilError(t, err)
-
- }
-}
-
-func verifyWorkloads(t *testing.T, operators []*utils.SparkOperatorInstallation) {
- for _, operator := range operators {
- job := utils.SparkJob{
- Name: "spark-pi",
- Namespace: operator.Namespace,
- Template: "spark-pi.yaml",
- }
-
- err := operator.SubmitJob(&job)
- assert.NilError(t, err)
-
- err = operator.WaitUntilSucceeded(job)
- assert.NilError(t, err)
- }
-}
-
-func crdsInstalled(t *testing.T) bool {
- output, err := utils.Kubectl("get", "crds", "-o=name")
-
- assert.NilError(t, err)
-
- return strings.Contains(output, "sparkapplications.sparkoperator.k8s.io") &&
- strings.Contains(output, "scheduledsparkapplications.sparkoperator.k8s.io")
-}
-
-func operatorBuilder(numberOfOperators int, separateNamespace bool, uniqueOperatorInstanceName bool) []*utils.SparkOperatorInstallation {
- const operatorInstanceName = "spark-operator"
- const operatorNamespace = "namespace"
-
- var operators []*utils.SparkOperatorInstallation
- for i := 1; i <= numberOfOperators; i++ {
- operator := utils.SparkOperatorInstallation{
- InstanceName: operatorInstanceName,
- Namespace: operatorNamespace,
- }
- if separateNamespace {
- operator.Namespace = fmt.Sprintf("%s-%d", operatorNamespace, i)
- }
- if uniqueOperatorInstanceName {
- operator.InstanceName = fmt.Sprintf("%s-%d", operatorInstanceName, i)
- }
- operator.Params = map[string]string{
- "sparkJobNamespace": operator.Namespace,
- }
- operators = append(operators, &operator)
- }
- return operators
-}
diff --git a/tests/test-app-deletion/00-install-operator.yaml b/tests/test-app-deletion/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-app-deletion/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-app-deletion/01-assert.yaml b/tests/test-app-deletion/01-assert.yaml
new file mode 100644
index 00000000..a739c6d1
--- /dev/null
+++ b/tests/test-app-deletion/01-assert.yaml
@@ -0,0 +1,23 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: mock-task-runner-driver
+status:
+ phase: Running
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ spark-role: executor
+ sparkoperator.k8s.io/app-name: mock-task-runner
+status:
+ phase: Running
diff --git a/tests/test-app-deletion/01-submit-spark-app.yaml b/tests/test-app-deletion/01-submit-spark-app.yaml
new file mode 100644
index 00000000..a05a41f3
--- /dev/null
+++ b/tests/test-app-deletion/01-submit-spark-app.yaml
@@ -0,0 +1,34 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: mock-task-runner
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: MockTaskRunner
+ mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ arguments:
+ - "1"
+ - "60"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
+ executor:
+ cores: 1
+ instances: 1
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
diff --git a/tests/test-app-deletion/02-assert.yaml b/tests/test-app-deletion/02-assert.yaml
new file mode 100644
index 00000000..50bf3790
--- /dev/null
+++ b/tests/test-app-deletion/02-assert.yaml
@@ -0,0 +1,17 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: v1
+kind: Event
+reason: SparkApplicationDeleted
+source:
+ component: spark-operator
+involvedObject:
+ apiVersion: sparkoperator.k8s.io/v1beta2
+ kind: SparkApplication
+ name: mock-task-runner
diff --git a/tests/test-app-deletion/02-delete-app.yaml b/tests/test-app-deletion/02-delete-app.yaml
new file mode 100644
index 00000000..4003d287
--- /dev/null
+++ b/tests/test-app-deletion/02-delete-app.yaml
@@ -0,0 +1,6 @@
+apiVersion: kudo.dev/v1alpha1
+kind: TestStep
+delete:
+- apiVersion: "sparkoperator.k8s.io/v1beta2"
+ kind: SparkApplication
+ name: mock-task-runner
diff --git a/tests/test-app-deletion/03-errors.yaml b/tests/test-app-deletion/03-errors.yaml
new file mode 100644
index 00000000..f3cad073
--- /dev/null
+++ b/tests/test-app-deletion/03-errors.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: mock-task-runner-driver
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ spark-role: executor
+ sparkoperator.k8s.io/app-name: mock-task-runner
+
+# verify no pods left after app deletion
diff --git a/tests/test-app-scheduling-with-volcano/00-assert.yaml b/tests/test-app-scheduling-with-volcano/00-assert.yaml
new file mode 100644
index 00000000..933a0f68
--- /dev/null
+++ b/tests/test-app-scheduling-with-volcano/00-assert.yaml
@@ -0,0 +1,26 @@
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: volcano-scheduler
+ namespace: volcano-system
+status:
+ availableReplicas: 1
+ readyReplicas: 1
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: volcano-admission
+ namespace: volcano-system
+status:
+ availableReplicas: 1
+ readyReplicas: 1
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: volcano-controllers
+ namespace: volcano-system
+status:
+ availableReplicas: 1
+ readyReplicas: 1
diff --git a/tests/resources/volcano/volcano-0.2.yaml b/tests/test-app-scheduling-with-volcano/00-install-volcano.yaml
similarity index 72%
rename from tests/resources/volcano/volcano-0.2.yaml
rename to tests/test-app-scheduling-with-volcano/00-install-volcano.yaml
index dcc38299..c2e0415f 100644
--- a/tests/resources/volcano/volcano-0.2.yaml
+++ b/tests/test-app-scheduling-with-volcano/00-install-volcano.yaml
@@ -23,7 +23,7 @@ data:
- name: proportion
- name: nodeorder
- name: binpack
-
+
---
apiVersion: v1
kind: ServiceAccount
@@ -36,57 +36,57 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: volcano-scheduler
rules:
- - apiGroups: ["apiextensions.k8s.io"]
- resources: ["customresourcedefinitions"]
- verbs: ["create", "get", "list", "watch", "delete"]
- - apiGroups: ["batch.volcano.sh"]
- resources: ["jobs"]
- verbs: ["get", "list", "watch", "update", "delete"]
- - apiGroups: ["batch.volcano.sh"]
- resources: ["jobs/status"]
- verbs: ["update", "patch"]
- - apiGroups: [""]
- resources: ["events"]
- verbs: ["create", "list", "watch", "update", "patch"]
- - apiGroups: [""]
- resources: ["pods", "pods/status"]
- verbs: ["create", "get", "list", "watch", "update", "bind", "updateStatus", "delete"]
- - apiGroups: [""]
- resources: ["pods/binding"]
- verbs: ["create"]
- - apiGroups: [""]
- resources: ["persistentvolumeclaims"]
- verbs: ["list", "watch"]
- - apiGroups: [""]
- resources: ["persistentvolumes"]
- verbs: ["list", "watch"]
- - apiGroups: ["scheduling.incubator.k8s.io", "scheduling.sigs.dev"]
- resources: ["podgroups"]
- verbs: ["list", "watch", "update"]
- - apiGroups: [""]
- resources: ["namespaces"]
- verbs: ["list", "watch"]
- - apiGroups: [""]
- resources: ["resourcequotas"]
- verbs: ["list", "watch"]
- - apiGroups: ["storage.k8s.io"]
- resources: ["storageclasses"]
- verbs: ["list", "watch"]
- - apiGroups: [""]
- resources: ["nodes"]
- verbs: ["list", "watch"]
- - apiGroups: ["policy"]
- resources: ["poddisruptionbudgets"]
- verbs: ["list", "watch"]
- - apiGroups: ["scheduling.k8s.io"]
- resources: ["priorityclasses"]
- verbs: ["get", "list", "watch"]
- - apiGroups: ["scheduling.incubator.k8s.io", "scheduling.sigs.dev"]
- resources: ["queues"]
- verbs: ["get", "list", "watch", "create", "delete"]
- - apiGroups: ["scheduling.incubator.k8s.io", "scheduling.sigs.dev"]
- resources: ["podgroups"]
- verbs: ["list", "watch", "update"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["create", "get", "list", "watch", "delete"]
+- apiGroups: ["batch.volcano.sh"]
+ resources: ["jobs"]
+ verbs: ["get", "list", "watch", "update", "delete"]
+- apiGroups: ["batch.volcano.sh"]
+ resources: ["jobs/status"]
+ verbs: ["update", "patch"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "list", "watch", "update", "patch"]
+- apiGroups: [""]
+ resources: ["pods", "pods/status"]
+ verbs: ["create", "get", "list", "watch", "update", "bind", "updateStatus", "delete"]
+- apiGroups: [""]
+ resources: ["pods/binding"]
+ verbs: ["create"]
+- apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["list", "watch"]
+- apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["list", "watch"]
+- apiGroups: ["scheduling.incubator.k8s.io", "scheduling.sigs.dev"]
+ resources: ["podgroups"]
+ verbs: ["list", "watch", "update"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["list", "watch"]
+- apiGroups: [""]
+ resources: ["resourcequotas"]
+ verbs: ["list", "watch"]
+- apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["list", "watch"]
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["list", "watch"]
+- apiGroups: ["policy"]
+ resources: ["poddisruptionbudgets"]
+ verbs: ["list", "watch"]
+- apiGroups: ["scheduling.k8s.io"]
+ resources: ["priorityclasses"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["scheduling.incubator.k8s.io", "scheduling.sigs.dev"]
+ resources: ["queues"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+- apiGroups: ["scheduling.incubator.k8s.io", "scheduling.sigs.dev"]
+ resources: ["podgroups"]
+ verbs: ["list", "watch", "update"]
---
kind: ClusterRoleBinding
@@ -94,9 +94,9 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: volcano-scheduler-role
subjects:
- - kind: ServiceAccount
- name: volcano-scheduler
- namespace: volcano-system
+- kind: ServiceAccount
+ name: volcano-scheduler
+ namespace: volcano-system
roleRef:
kind: ClusterRole
name: volcano-scheduler
@@ -121,23 +121,23 @@ spec:
app: volcano-scheduler
spec:
serviceAccount: volcano-scheduler
-
+
containers:
- - name: volcano-scheduler
- image: volcanosh/vc-scheduler:v0.2
- args:
- - --alsologtostderr
- - --scheduler-conf=/volcano.scheduler/volcano-scheduler.conf
- - -v=3
- - 2>&1
- imagePullPolicy: "IfNotPresent"
- volumeMounts:
- - name: scheduler-config
- mountPath: /volcano.scheduler
- volumes:
+ - name: volcano-scheduler
+ image: volcanosh/vc-scheduler:v0.2
+ args:
+ - --alsologtostderr
+ - --scheduler-conf=/volcano.scheduler/volcano-scheduler.conf
+ - -v=3
+ - 2>&1
+ imagePullPolicy: "IfNotPresent"
+ volumeMounts:
- name: scheduler-config
- configMap:
- name: volcano-scheduler-configmap
+ mountPath: /volcano.scheduler
+ volumes:
+ - name: scheduler-config
+ configMap:
+ name: volcano-scheduler-configmap
---
# Source: volcano/templates/admission.yaml
@@ -152,31 +152,31 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: volcano-admission
rules:
- - apiGroups: [""]
- resources: ["configmaps"]
- verbs: ["get", "list", "watch"]
- - apiGroups: ["admissionregistration.k8s.io"]
- resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"]
- verbs: ["get", "list", "watch", "create", "update"]
- # Rules below is used generate admission service secret
- - apiGroups: ["certificates.k8s.io"]
- resources: ["certificatesigningrequests"]
- verbs: ["get", "list", "create", "delete"]
- - apiGroups: ["certificates.k8s.io"]
- resources: ["certificatesigningrequests/approval"]
- verbs: ["create", "update"]
- - apiGroups: [""]
- resources: ["secrets"]
- verbs: ["create", "get", "patch"]
- - apiGroups: ["scheduling.incubator.k8s.io", "scheduling.sigs.dev"]
- resources: ["queues"]
- verbs: ["get", "list"]
- - apiGroups: [""]
- resources: ["services"]
- verbs: ["get"]
- - apiGroups: ["scheduling.incubator.k8s.io", "scheduling.sigs.dev"]
- resources: ["podgroups"]
- verbs: ["get", "list", "watch"]
+- apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"]
+ verbs: ["get", "list", "watch", "create", "update"]
+# Rules below is used generate admission service secret
+- apiGroups: ["certificates.k8s.io"]
+ resources: ["certificatesigningrequests"]
+ verbs: ["get", "list", "create", "delete"]
+- apiGroups: ["certificates.k8s.io"]
+ resources: ["certificatesigningrequests/approval"]
+ verbs: ["create", "update"]
+- apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["create", "get", "patch"]
+- apiGroups: ["scheduling.incubator.k8s.io", "scheduling.sigs.dev"]
+ resources: ["queues"]
+ verbs: ["get", "list"]
+- apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get"]
+- apiGroups: ["scheduling.incubator.k8s.io", "scheduling.sigs.dev"]
+ resources: ["podgroups"]
+ verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
@@ -184,9 +184,9 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: volcano-admission-role
subjects:
- - kind: ServiceAccount
- name: volcano-admission
- namespace: volcano-system
+- kind: ServiceAccount
+ name: volcano-admission
+ namespace: volcano-system
roleRef:
kind: ClusterRole
name: volcano-admission
@@ -211,30 +211,30 @@ spec:
app: volcano-admission
spec:
serviceAccount: volcano-admission
-
+
containers:
- - args:
- - --tls-cert-file=/admission.local.config/certificates/tls.crt
- - --tls-private-key-file=/admission.local.config/certificates/tls.key
- - --ca-cert-file=/admission.local.config/certificates/ca.crt
- - --webhook-namespace=volcano-system
- - --webhook-service-name=volcano-admission-service
- - --alsologtostderr
- - --port=443
- - -v=4
- - 2>&1
- image: volcanosh/vc-admission:v0.2
- imagePullPolicy: IfNotPresent
- name: admission
- volumeMounts:
- - mountPath: /admission.local.config/certificates
- name: admission-certs
- readOnly: true
+ - args:
+ - --tls-cert-file=/admission.local.config/certificates/tls.crt
+ - --tls-private-key-file=/admission.local.config/certificates/tls.key
+ - --ca-cert-file=/admission.local.config/certificates/ca.crt
+ - --webhook-namespace=volcano-system
+ - --webhook-service-name=volcano-admission-service
+ - --alsologtostderr
+ - --port=443
+ - -v=4
+ - 2>&1
+ image: volcanosh/vc-admission:v0.2
+ imagePullPolicy: IfNotPresent
+ name: admission
+ volumeMounts:
+ - mountPath: /admission.local.config/certificates
+ name: admission-certs
+ readOnly: true
volumes:
- - name: admission-certs
- secret:
- defaultMode: 420
- secretName: volcano-admission-secret
+ - name: admission-certs
+ secret:
+ defaultMode: 420
+ secretName: volcano-admission-secret
---
apiVersion: v1
@@ -246,9 +246,9 @@ metadata:
namespace: volcano-system
spec:
ports:
- - port: 443
- protocol: TCP
- targetPort: 443
+ - port: 443
+ protocol: TCP
+ targetPort: 443
selector:
app: volcano-admission
sessionAffinity: None
@@ -268,11 +268,11 @@ spec:
serviceAccountName: volcano-admission
restartPolicy: Never
containers:
- - name: main
- image: volcanosh/vc-admission:v0.2
- imagePullPolicy: IfNotPresent
- command: ["./gen-admission-secret.sh", "--service", "volcano-admission-service", "--namespace",
- "volcano-system", "--secret", "volcano-admission-secret"]
+ - name: main
+ image: volcanosh/vc-admission:v0.2
+ imagePullPolicy: IfNotPresent
+ command: ["./gen-admission-secret.sh", "--service", "volcano-admission-service", "--namespace",
+ "volcano-system", "--secret", "volcano-admission-secret"]
---
# Source: volcano/templates/controllers.yaml
@@ -288,42 +288,42 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: volcano-controllers
rules:
- - apiGroups: ["apiextensions.k8s.io"]
- resources: ["customresourcedefinitions"]
- verbs: ["create", "get", "list", "watch", "delete"]
- - apiGroups: ["batch"]
- resources: ["jobs"]
- verbs: ["create", "get", "list", "watch", "delete", "update"]
- - apiGroups: ["batch.volcano.sh"]
- resources: ["jobs"]
- verbs: ["get", "list", "watch", "update", "delete"]
- - apiGroups: ["batch.volcano.sh"]
- resources: ["jobs/status"]
- verbs: ["update", "patch"]
- - apiGroups: ["bus.volcano.sh"]
- resources: ["commands"]
- verbs: ["get", "list", "watch", "delete"]
- - apiGroups: [""]
- resources: ["events"]
- verbs: ["create", "list", "watch", "update", "patch"]
- - apiGroups: [""]
- resources: ["pods"]
- verbs: ["create", "get", "list", "watch", "update", "bind", "delete"]
- - apiGroups: [""]
- resources: ["persistentvolumeclaims"]
- verbs: ["get", "list", "watch", "create"]
- - apiGroups: [""]
- resources: ["services"]
- verbs: ["get", "list", "watch", "create", "delete"]
- - apiGroups: [""]
- resources: ["configmaps"]
- verbs: ["get", "list", "watch", "create", "delete", "update"]
- - apiGroups: ["scheduling.incubator.k8s.io", "scheduling.sigs.dev"]
- resources: ["podgroups", "queues", "queues/status"]
- verbs: ["get", "list", "watch", "create", "delete", "update"]
- - apiGroups: ["scheduling.k8s.io"]
- resources: ["priorityclasses"]
- verbs: ["get", "list", "watch", "create", "delete"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["create", "get", "list", "watch", "delete"]
+- apiGroups: ["batch"]
+ resources: ["jobs"]
+ verbs: ["create", "get", "list", "watch", "delete", "update"]
+- apiGroups: ["batch.volcano.sh"]
+ resources: ["jobs"]
+ verbs: ["get", "list", "watch", "update", "delete"]
+- apiGroups: ["batch.volcano.sh"]
+ resources: ["jobs/status"]
+ verbs: ["update", "patch"]
+- apiGroups: ["bus.volcano.sh"]
+ resources: ["commands"]
+ verbs: ["get", "list", "watch", "delete"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "list", "watch", "update", "patch"]
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["create", "get", "list", "watch", "update", "bind", "delete"]
+- apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "create"]
+- apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+- apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: ["get", "list", "watch", "create", "delete", "update"]
+- apiGroups: ["scheduling.incubator.k8s.io", "scheduling.sigs.dev"]
+ resources: ["podgroups", "queues", "queues/status"]
+ verbs: ["get", "list", "watch", "create", "delete", "update"]
+- apiGroups: ["scheduling.k8s.io"]
+ resources: ["priorityclasses"]
+ verbs: ["get", "list", "watch", "create", "delete"]
---
kind: ClusterRoleBinding
@@ -331,9 +331,9 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: volcano-controllers-role
subjects:
- - kind: ServiceAccount
- name: volcano-controllers
- namespace: volcano-system
+- kind: ServiceAccount
+ name: volcano-controllers
+ namespace: volcano-system
roleRef:
kind: ClusterRole
name: volcano-controllers
@@ -358,15 +358,15 @@ spec:
app: volcano-controller
spec:
serviceAccount: volcano-controllers
-
+
containers:
- - name: volcano-controllers
- image: volcanosh/vc-controllers:v0.2
- args:
- - --alsologtostderr
- - -v=4
- - 2>&1
- imagePullPolicy: "IfNotPresent"
+ - name: volcano-controllers
+ image: volcanosh/vc-controllers:v0.2
+ args:
+ - --alsologtostderr
+ - -v=4
+ - 2>&1
+ imagePullPolicy: "IfNotPresent"
---
# Source: volcano/templates/batch_v1alpha1_job.yaml
@@ -382,8 +382,8 @@ spec:
kind: Job
plural: jobs
shortNames:
- - vcjob
- - vj
+ - vcjob
+ - vj
scope: Namespaced
validation:
openAPIV3Schema:
@@ -419,7 +419,7 @@ spec:
description: The name of the volume claim.
type: object
required:
- - mountPath
+ - mountPath
type: array
minAvailable:
description: The minimal available pods to run for this Job
@@ -711,8 +711,8 @@ spec:
kind: PodGroup
plural: podgroups
shortNames:
- - pg
- - podgroup-v1alpha2
+ - pg
+ - podgroup-v1alpha2
scope: Namespaced
validation:
openAPIV3Schema:
@@ -760,8 +760,8 @@ spec:
kind: Queue
plural: queues
shortNames:
- - q
- - queue-v1alpha2
+ - q
+ - queue-v1alpha2
scope: Cluster
validation:
openAPIV3Schema:
@@ -794,4 +794,3 @@ spec:
version: v1alpha2
subresources:
status: {}
-
diff --git a/tests/test-app-scheduling-with-volcano/01-install-operator.yaml b/tests/test-app-scheduling-with-volcano/01-install-operator.yaml
new file mode 100644
index 00000000..b5ec3b71
--- /dev/null
+++ b/tests/test-app-scheduling-with-volcano/01-install-operator.yaml
@@ -0,0 +1,15 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ kubectl kudo install "${ROOT_DIR}"/operators/repository/spark/operator \
+ --instance=spark \
+ --namespace=$NAMESPACE \
+ --wait \
+ -p operatorImageName=$OPERATOR_DOCKER_REPO \
+ -p operatorVersion=$OPERATOR_VERSION \
+ -p sparkJobNamespace=$NAMESPACE \
+ -p enableBatchScheduler=true
+assert:
+ - ../manifests/kudo-controller.yaml
+ - ../manifests/spark-instance.yaml
diff --git a/tests/test-app-scheduling-with-volcano/02-assert.yaml b/tests/test-app-scheduling-with-volcano/02-assert.yaml
new file mode 120000
index 00000000..cdc81b22
--- /dev/null
+++ b/tests/test-app-scheduling-with-volcano/02-assert.yaml
@@ -0,0 +1 @@
+../manifests/assert-mock-task-runner.yaml
\ No newline at end of file
diff --git a/tests/templates/spark-mock-task-runner.yaml b/tests/test-app-scheduling-with-volcano/02-submit-spark-app.yaml
similarity index 56%
rename from tests/templates/spark-mock-task-runner.yaml
rename to tests/test-app-scheduling-with-volcano/02-submit-spark-app.yaml
index 185c8db0..359c5471 100644
--- a/tests/templates/spark-mock-task-runner.yaml
+++ b/tests/test-app-scheduling-with-volcano/02-submit-spark-app.yaml
@@ -1,37 +1,36 @@
apiVersion: "sparkoperator.k8s.io/v1beta2"
kind: SparkApplication
metadata:
- name: {{ .Name }}
- namespace: {{ .Namespace }}
+ name: mock-task-runner
spec:
type: Scala
mode: cluster
- image: {{ .Image }}
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
imagePullPolicy: Always
mainClass: MockTaskRunner
mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ arguments:
+ - "1"
+ - "1"
+ batchScheduler: volcano
sparkConf:
"spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
"spark.scheduler.minRegisteredResourcesRatio": "1.0"
- {{ range $key, $value := .Params.SparkConf }}
- "{{ $key }}": "{{ $value }}"
- {{ end }}
- sparkVersion: {{ .SparkVersion }}
- arguments:
- {{ range $i, $arg := index .Params.Args }}
- - "{{ $arg }}"
- {{ end }}
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
restartPolicy:
type: Never
driver:
cores: 1
memory: "512m"
labels:
- version: {{ .SparkVersion }}
- serviceAccount: {{ .ServiceAccount }}
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
executor:
cores: 1
- instances: {{ .ExecutorsCount }}
+ instances: 1
memory: "512m"
+ deleteOnTermination: false
labels:
- version: {{ .SparkVersion }}
+ version: 3.0.0
diff --git a/tests/test-app-scheduling-with-volcano/03-assert.yaml b/tests/test-app-scheduling-with-volcano/03-assert.yaml
new file mode 100644
index 00000000..d0bede7c
--- /dev/null
+++ b/tests/test-app-scheduling-with-volcano/03-assert.yaml
@@ -0,0 +1,17 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: v1
+involvedObject:
+ apiVersion: v1
+ kind: Pod
+ name: mock-task-runner-driver
+kind: Event
+reason: Scheduled
+source:
+ component: volcano
diff --git a/tests/test-app-scheduling-with-volcano/04-cleanup.yaml b/tests/test-app-scheduling-with-volcano/04-cleanup.yaml
new file mode 100644
index 00000000..f6c9dcca
--- /dev/null
+++ b/tests/test-app-scheduling-with-volcano/04-cleanup.yaml
@@ -0,0 +1,4 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- command: kubectl delete ns volcano-system
diff --git a/tests/test-app-scheduling-with-volcano/04-errors.yaml b/tests/test-app-scheduling-with-volcano/04-errors.yaml
new file mode 100644
index 00000000..a6e22db2
--- /dev/null
+++ b/tests/test-app-scheduling-with-volcano/04-errors.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: volcano-system
diff --git a/tests/test-ha/00-assert.yaml b/tests/test-ha/00-assert.yaml
new file mode 100644
index 00000000..5e88e9a1
--- /dev/null
+++ b/tests/test-ha/00-assert.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: spark-operator-lock
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: spark
+status:
+ availableReplicas: 3
diff --git a/tests/test-ha/00-install-operator.yaml b/tests/test-ha/00-install-operator.yaml
new file mode 100644
index 00000000..b8e45f36
--- /dev/null
+++ b/tests/test-ha/00-install-operator.yaml
@@ -0,0 +1,17 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ kubectl kudo install "${ROOT_DIR}"/operators/repository/spark/operator \
+ --namespace=$NAMESPACE \
+ --instance=spark \
+ --wait \
+ -p operatorImageName=$OPERATOR_DOCKER_REPO \
+ -p operatorVersion=$OPERATOR_VERSION \
+ -p sparkJobNamespace=$NAMESPACE \
+ -p enableLeaderElection=true \
+ -p leaderElectionLockNamespace=$NAMESPACE \
+ -p replicas=3
+assert:
+ - ../manifests/kudo-controller.yaml
+ - ../manifests/spark-instance.yaml
diff --git a/tests/test-ha/01-assert.yaml b/tests/test-ha/01-assert.yaml
new file mode 100644
index 00000000..a5eea214
--- /dev/null
+++ b/tests/test-ha/01-assert.yaml
@@ -0,0 +1,14 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: mock-task-runner-driver
+status:
+ phase: Running
diff --git a/tests/templates/spark-mock-task-runner-job-host-network.yaml b/tests/test-ha/01-submit-spark-app.yaml
similarity index 51%
rename from tests/templates/spark-mock-task-runner-job-host-network.yaml
rename to tests/test-ha/01-submit-spark-app.yaml
index 5ed377a8..8a67545f 100644
--- a/tests/templates/spark-mock-task-runner-job-host-network.yaml
+++ b/tests/test-ha/01-submit-spark-app.yaml
@@ -1,36 +1,35 @@
apiVersion: "sparkoperator.k8s.io/v1beta2"
kind: SparkApplication
metadata:
- name: {{ .Name }}
- namespace: {{ .Namespace }}
+ name: mock-task-runner
spec:
type: Scala
mode: cluster
- image: {{ .Image }}
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
imagePullPolicy: Always
mainClass: MockTaskRunner
mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
- arguments: {{ range $i, $arg := index .Params "args" }}
- - "{{ $arg }}"{{ end }}
+ arguments:
+ - "1"
+ - "10"
sparkConf:
"spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
"spark.scheduler.minRegisteredResourcesRatio": "1.0"
- sparkVersion: {{ .SparkVersion }}
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
restartPolicy:
type: Never
driver:
cores: 1
memory: "512m"
- hostNetwork: {{ index .Params "driverHostNetwork" }}
labels:
- version: {{ .SparkVersion }}
- metrics-exposed: "true"
- serviceAccount: {{ .ServiceAccount }}
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
executor:
cores: 1
- instances: {{ .ExecutorsCount }}
+ instances: 1
memory: "512m"
- hostNetwork: {{ index .Params "executorHostNetwork" }}
+ deleteOnTermination: false
labels:
- version: {{ .SparkVersion }}
- metrics-exposed: "true"
+ version: 3.0.0
diff --git a/tests/test-ha/02-assert.yaml b/tests/test-ha/02-assert.yaml
new file mode 120000
index 00000000..cdc81b22
--- /dev/null
+++ b/tests/test-ha/02-assert.yaml
@@ -0,0 +1 @@
+../manifests/assert-mock-task-runner.yaml
\ No newline at end of file
diff --git a/tests/test-ha/02-delete-leader-pod.yaml b/tests/test-ha/02-delete-leader-pod.yaml
new file mode 100644
index 00000000..46eb8424
--- /dev/null
+++ b/tests/test-ha/02-delete-leader-pod.yaml
@@ -0,0 +1,8 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ set -x
+
+ leader_pod=$(kubectl get cm spark-operator-lock -n $NAMESPACE -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' | jq -r .holderIdentity)
+ kubectl delete pod ${leader_pod} -n $NAMESPACE
diff --git a/tests/test-hdfs-kerberos-shs/00-assert.yaml b/tests/test-hdfs-kerberos-shs/00-assert.yaml
new file mode 100644
index 00000000..17213bd6
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/00-assert.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: hdfs-kerberos
diff --git a/tests/test-hdfs-kerberos-shs/00-create-ns.yaml b/tests/test-hdfs-kerberos-shs/00-create-ns.yaml
new file mode 100644
index 00000000..17213bd6
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/00-create-ns.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: hdfs-kerberos
diff --git a/tests/test-hdfs-kerberos-shs/01-assert.yaml b/tests/test-hdfs-kerberos-shs/01-assert.yaml
new file mode 100644
index 00000000..89d6af61
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/01-assert.yaml
@@ -0,0 +1,26 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: hdfs-datanode
+ namespace: hdfs-kerberos
+status:
+ availableReplicas: 1
+ readyReplicas: 1
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: kerberos
+ namespace: hdfs-kerberos
+status:
+ availableReplicas: 1
+ readyReplicas: 1
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: hdfs-namenode
+ namespace: hdfs-kerberos
+status:
+ availableReplicas: 1
+ readyReplicas: 1
diff --git a/tests/test-hdfs-kerberos-shs/01-deploy-kdc-and-hdfs.yaml b/tests/test-hdfs-kerberos-shs/01-deploy-kdc-and-hdfs.yaml
new file mode 100644
index 00000000..87e8652f
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/01-deploy-kdc-and-hdfs.yaml
@@ -0,0 +1,12 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+# the order is important here
+- command: kubectl apply -n hdfs-kerberos -f hdfs-kerberos/configmaps
+- command: kubectl apply -n hdfs-kerberos -f hdfs-kerberos/volumes
+- command: kubectl apply -n hdfs-kerberos -f hdfs-kerberos/kerberos-deployment.yaml
+- command: kubectl wait deployment/kerberos --for=condition=available --timeout=5m -n hdfs-kerberos
+- command: kubectl apply -n hdfs-kerberos -f hdfs-kerberos/namenode-deployment.yaml
+- command: kubectl wait deployment/hdfs-namenode --for=condition=available --timeout=5m -n hdfs-kerberos
+- command: kubectl apply -n hdfs-kerberos -f hdfs-kerberos/datanode-deployment.yaml
+- command: kubectl wait deployment/hdfs-datanode --for=condition=available --timeout=5m -n hdfs-kerberos
diff --git a/tests/test-hdfs-kerberos-shs/02-bootstrap.yaml b/tests/test-hdfs-kerberos-shs/02-bootstrap.yaml
new file mode 100644
index 00000000..9a934723
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/02-bootstrap.yaml
@@ -0,0 +1,17 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ set -x
+
+ ns=hdfs-kerberos
+
+ # get the name of a Namenode pod
+ namenode_pod=$(kubectl get pods --selector=name=hdfs-namenode --output=jsonpath={.items[*].metadata.name} -n $ns)
+
+ # run the init script to copy test data to HDFS and export delegation token
+ kubectl exec $namenode_pod -n $ns -- init.sh
+
+ # copy delegation token from the pod to a local filesystem
+ mkdir -p .tmp
+ kubectl cp $namenode_pod:tmp/hadoop.token .tmp/hadoop.token -n $ns
diff --git a/tests/test-hdfs-kerberos-shs/03-assert.yaml b/tests/test-hdfs-kerberos-shs/03-assert.yaml
new file mode 100644
index 00000000..a7cc66fa
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/03-assert.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: hadoop-token
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: hadoop-conf
diff --git a/tests/test-hdfs-kerberos-shs/03-create-secret.yaml b/tests/test-hdfs-kerberos-shs/03-create-secret.yaml
new file mode 100644
index 00000000..ebbf1abf
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/03-create-secret.yaml
@@ -0,0 +1,6 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: kubectl create secret generic hadoop-token --from-file .tmp/hadoop.token -n $NAMESPACE
+apply:
+ - ./hdfs-kerberos/configmaps/hadoop-conf.yaml
diff --git a/tests/test-hdfs-kerberos-shs/04-install-operator.yaml b/tests/test-hdfs-kerberos-shs/04-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/04-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-hdfs-kerberos-shs/05-assert.yaml b/tests/test-hdfs-kerberos-shs/05-assert.yaml
new file mode 100644
index 00000000..35e2ad73
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/05-assert.yaml
@@ -0,0 +1,15 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: spark-hdfs-test
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/test-hdfs-kerberos-shs/05-submit-spark-app.yaml b/tests/test-hdfs-kerberos-shs/05-submit-spark-app.yaml
new file mode 100644
index 00000000..3c3cc7e0
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/05-submit-spark-app.yaml
@@ -0,0 +1,43 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: spark-hdfs-test
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: org.apache.spark.examples.HdfsTest
+ mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.0.0.jar"
+ arguments:
+ - "hdfs://namenode.hdfs-kerberos.svc.cluster.local:9000/spark/README.txt"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ hadoopConfigMap: hadoop-conf
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
+ secrets:
+ - name: hadoop-token
+ path: /mnt/secrets
+ secretType: HadoopDelegationToken
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
+ secrets:
+ - name: hadoop-token
+ path: /mnt/secrets
+ secretType: HadoopDelegationToken
diff --git a/tests/test-hdfs-kerberos-shs/06-uninstall.yaml b/tests/test-hdfs-kerberos-shs/06-uninstall.yaml
new file mode 100644
index 00000000..95c80154
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/06-uninstall.yaml
@@ -0,0 +1,9 @@
+apiVersion: kudo.dev/v1alpha1
+kind: TestStep
+delete:
+- apiVersion: kudo.dev/v1beta1
+ kind: Instance
+ metadata:
+ name: spark
+errors:
+- ../../common/assert-operator.yaml
diff --git a/tests/test-hdfs-kerberos-shs/07-install-operator.yaml b/tests/test-hdfs-kerberos-shs/07-install-operator.yaml
new file mode 100644
index 00000000..55fd1110
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/07-install-operator.yaml
@@ -0,0 +1,17 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: |
+ kubectl kudo install "${ROOT_DIR}"/operators/repository/spark/operator \
+ --namespace=$NAMESPACE \
+ --instance=spark \
+ --wait \
+ -p operatorImageName=$OPERATOR_DOCKER_REPO \
+ -p operatorVersion=$OPERATOR_VERSION \
+ -p sparkJobNamespace=$NAMESPACE \
+ -p enableHistoryServer=true \
+ -p historyServerFsLogDirectory=hdfs://namenode.hdfs-kerberos.svc.cluster.local:9000/history \
+ -p delegationTokenSecret=hadoop-token
+assert:
+ - ../manifests/kudo-controller.yaml
+ - ../manifests/spark-instance.yaml
diff --git a/tests/test-hdfs-kerberos-shs/08-assert.yaml b/tests/test-hdfs-kerberos-shs/08-assert.yaml
new file mode 100644
index 00000000..66cd820f
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/08-assert.yaml
@@ -0,0 +1,15 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: spark-hdfs-shs-test
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/test-hdfs-kerberos-shs/08-submit-spark-app.yaml b/tests/test-hdfs-kerberos-shs/08-submit-spark-app.yaml
new file mode 100644
index 00000000..e443c690
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/08-submit-spark-app.yaml
@@ -0,0 +1,45 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: spark-hdfs-shs-test
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: org.apache.spark.examples.HdfsTest
+ mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.0.0.jar"
+ arguments:
+ - "hdfs://namenode.hdfs-kerberos.svc.cluster.local:9000/spark/README.txt"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ "spark.eventLog.enabled": "true"
+ "spark.eventLog.dir": hdfs://namenode.hdfs-kerberos.svc.cluster.local:9000/history
+ hadoopConfigMap: hadoop-conf
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
+ secrets:
+ - name: hadoop-token
+ path: /mnt/secrets
+ secretType: HadoopDelegationToken
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
+ secrets:
+ - name: hadoop-token
+ path: /mnt/secrets
+ secretType: HadoopDelegationToken
diff --git a/tests/test-hdfs-kerberos-shs/09-check-shs-log.yaml b/tests/test-hdfs-kerberos-shs/09-check-shs-log.yaml
new file mode 100644
index 00000000..8deb84bb
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/09-check-shs-log.yaml
@@ -0,0 +1,18 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: |
+ match="FsHistoryProvider: Finished parsing hdfs://namenode.hdfs-kerberos.svc.cluster.local:9000/history/spark-"
+ for try in `seq 5`
+ do
+ shs_log=$(kubectl logs -l app.kubernetes.io/name=spark-history-server -n $NAMESPACE)
+ if [ $(echo ${shs_log} | grep -c "${match}") -gt 0 ]; then
+ echo string found in the logs
+ exit 0
+ else
+ echo match not found - retrying, attempt: $try
+ fi
+ sleep 5
+ done
+ echo timed out waiting for the match, logs: ${shs_log}
+ exit 1
diff --git a/tests/test-hdfs-kerberos-shs/10-cleanup.yaml b/tests/test-hdfs-kerberos-shs/10-cleanup.yaml
new file mode 100644
index 00000000..5a70b28d
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/10-cleanup.yaml
@@ -0,0 +1,5 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - command: kubectl delete ns hdfs-kerberos
+ - command: rm -rf .tmp
diff --git a/tests/test-hdfs-kerberos-shs/10-errors.yaml b/tests/test-hdfs-kerberos-shs/10-errors.yaml
new file mode 100644
index 00000000..17213bd6
--- /dev/null
+++ b/tests/test-hdfs-kerberos-shs/10-errors.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: hdfs-kerberos
diff --git a/tests/resources/hdfs-kerberos/configmaps/hadoop-conf.yaml b/tests/test-hdfs-kerberos-shs/hdfs-kerberos/configmaps/hadoop-conf.yaml
similarity index 100%
rename from tests/resources/hdfs-kerberos/configmaps/hadoop-conf.yaml
rename to tests/test-hdfs-kerberos-shs/hdfs-kerberos/configmaps/hadoop-conf.yaml
diff --git a/tests/resources/hdfs-kerberos/configmaps/krb5.yaml b/tests/test-hdfs-kerberos-shs/hdfs-kerberos/configmaps/krb5.yaml
similarity index 100%
rename from tests/resources/hdfs-kerberos/configmaps/krb5.yaml
rename to tests/test-hdfs-kerberos-shs/hdfs-kerberos/configmaps/krb5.yaml
diff --git a/tests/resources/hdfs-kerberos/datanode-deployment.yaml b/tests/test-hdfs-kerberos-shs/hdfs-kerberos/datanode-deployment.yaml
similarity index 100%
rename from tests/resources/hdfs-kerberos/datanode-deployment.yaml
rename to tests/test-hdfs-kerberos-shs/hdfs-kerberos/datanode-deployment.yaml
diff --git a/tests/resources/hdfs-kerberos/kerberos-deployment.yaml b/tests/test-hdfs-kerberos-shs/hdfs-kerberos/kerberos-deployment.yaml
similarity index 100%
rename from tests/resources/hdfs-kerberos/kerberos-deployment.yaml
rename to tests/test-hdfs-kerberos-shs/hdfs-kerberos/kerberos-deployment.yaml
diff --git a/tests/resources/hdfs-kerberos/namenode-deployment.yaml b/tests/test-hdfs-kerberos-shs/hdfs-kerberos/namenode-deployment.yaml
similarity index 100%
rename from tests/resources/hdfs-kerberos/namenode-deployment.yaml
rename to tests/test-hdfs-kerberos-shs/hdfs-kerberos/namenode-deployment.yaml
diff --git a/tests/resources/hdfs-kerberos/volumes/server-keytab-pv.yaml b/tests/test-hdfs-kerberos-shs/hdfs-kerberos/volumes/server-keytab-pv.yaml
similarity index 100%
rename from tests/resources/hdfs-kerberos/volumes/server-keytab-pv.yaml
rename to tests/test-hdfs-kerberos-shs/hdfs-kerberos/volumes/server-keytab-pv.yaml
diff --git a/tests/resources/hdfs-kerberos/volumes/server-keytab.yaml b/tests/test-hdfs-kerberos-shs/hdfs-kerberos/volumes/server-keytab.yaml
similarity index 100%
rename from tests/resources/hdfs-kerberos/volumes/server-keytab.yaml
rename to tests/test-hdfs-kerberos-shs/hdfs-kerberos/volumes/server-keytab.yaml
diff --git a/tests/test-host-network/00-install-operator.yaml b/tests/test-host-network/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-host-network/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-host-network/01-assert.yaml b/tests/test-host-network/01-assert.yaml
new file mode 100644
index 00000000..203ef6de
--- /dev/null
+++ b/tests/test-host-network/01-assert.yaml
@@ -0,0 +1,28 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ spark-role: driver
+ sparkoperator.k8s.io/app-name: mock-task-runner
+ name: mock-task-runner-driver
+spec:
+ dnsPolicy: ClusterFirstWithHostNet
+ hostNetwork: true
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ spark-role: executor
+ sparkoperator.k8s.io/app-name: mock-task-runner
+spec:
+ dnsPolicy: ClusterFirstWithHostNet
+ hostNetwork: true
diff --git a/tests/test-host-network/01-submit-spark-app.yaml b/tests/test-host-network/01-submit-spark-app.yaml
new file mode 100644
index 00000000..bbd6847c
--- /dev/null
+++ b/tests/test-host-network/01-submit-spark-app.yaml
@@ -0,0 +1,37 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: mock-task-runner
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: MockTaskRunner
+ mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ arguments:
+ - "1"
+ - "15"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ hostNetwork: true
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ hostNetwork: true
+ labels:
+ version: 3.0.0
diff --git a/tests/test-host-network/02-assert.yaml b/tests/test-host-network/02-assert.yaml
new file mode 120000
index 00000000..cdc81b22
--- /dev/null
+++ b/tests/test-host-network/02-assert.yaml
@@ -0,0 +1 @@
+../manifests/assert-mock-task-runner.yaml
\ No newline at end of file
diff --git a/tests/test-job-submission/00-install-operator.yaml b/tests/test-job-submission/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-job-submission/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-job-submission/01-assert.yaml b/tests/test-job-submission/01-assert.yaml
new file mode 100644
index 00000000..1be03dd3
--- /dev/null
+++ b/tests/test-job-submission/01-assert.yaml
@@ -0,0 +1,15 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: spark-pi
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/test-job-submission/01-spark-pi.yaml b/tests/test-job-submission/01-spark-pi.yaml
new file mode 100644
index 00000000..85893671
--- /dev/null
+++ b/tests/test-job-submission/01-spark-pi.yaml
@@ -0,0 +1,4 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+apply:
+- ../manifests/spark-pi.yaml
diff --git a/tests/test-mount-spark-configmap/00-install-operator.yaml b/tests/test-mount-spark-configmap/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-mount-spark-configmap/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-mount-spark-configmap/01-assert.yaml b/tests/test-mount-spark-configmap/01-assert.yaml
new file mode 100644
index 00000000..05ef215c
--- /dev/null
+++ b/tests/test-mount-spark-configmap/01-assert.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: hadoop-configmap
diff --git a/tests/test-mount-spark-configmap/01-create-config-maps.yaml b/tests/test-mount-spark-configmap/01-create-config-maps.yaml
new file mode 100644
index 00000000..8401020d
--- /dev/null
+++ b/tests/test-mount-spark-configmap/01-create-config-maps.yaml
@@ -0,0 +1,4 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- command: kubectl create configmap hadoop-configmap --from-file=core-site.xml -n $NAMESPACE
diff --git a/tests/test-mount-spark-configmap/02-assert.yaml b/tests/test-mount-spark-configmap/02-assert.yaml
new file mode 100644
index 00000000..a5eea214
--- /dev/null
+++ b/tests/test-mount-spark-configmap/02-assert.yaml
@@ -0,0 +1,14 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: mock-task-runner-driver
+status:
+ phase: Running
diff --git a/tests/test-mount-spark-configmap/02-submit-spark-app.yaml b/tests/test-mount-spark-configmap/02-submit-spark-app.yaml
new file mode 100644
index 00000000..53d3504f
--- /dev/null
+++ b/tests/test-mount-spark-configmap/02-submit-spark-app.yaml
@@ -0,0 +1,35 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: mock-task-runner
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: MockTaskRunner
+ mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ arguments:
+ - "1"
+ - "20"
+ hadoopConfigMap: hadoop-configmap
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
+ executor:
+ cores: 1
+ instances: 1
+ labels:
+ version: 3.0.0
diff --git a/tests/test-mount-spark-configmap/03-run-command.yaml b/tests/test-mount-spark-configmap/03-run-command.yaml
new file mode 100644
index 00000000..9467d4d8
--- /dev/null
+++ b/tests/test-mount-spark-configmap/03-run-command.yaml
@@ -0,0 +1,5 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: kubectl exec mock-task-runner-driver -n $NAMESPACE -- sh -c
+ 'set -x; [ ! -z "$HADOOP_CONF_DIR" ] && [ -f "$HADOOP_CONF_DIR/core-site.xml" ]'
diff --git a/tests/test-mount-spark-configmap/04-assert.yaml b/tests/test-mount-spark-configmap/04-assert.yaml
new file mode 120000
index 00000000..cdc81b22
--- /dev/null
+++ b/tests/test-mount-spark-configmap/04-assert.yaml
@@ -0,0 +1 @@
+../manifests/assert-mock-task-runner.yaml
\ No newline at end of file
diff --git a/tests/resources/test-mount-config-map/core-site.xml b/tests/test-mount-spark-configmap/core-site.xml
similarity index 100%
rename from tests/resources/test-mount-config-map/core-site.xml
rename to tests/test-mount-spark-configmap/core-site.xml
diff --git a/tests/test-multy-tenancy/00-assert.yaml b/tests/test-multy-tenancy/00-assert.yaml
new file mode 100644
index 00000000..46da6f73
--- /dev/null
+++ b/tests/test-multy-tenancy/00-assert.yaml
@@ -0,0 +1,19 @@
+apiVersion: kudo.dev/v1beta1
+kind: Instance
+metadata:
+ name: spark-1
+ namespace: spark-namespace-1
+status:
+ planStatus:
+ deploy:
+ status: COMPLETE
+---
+apiVersion: kudo.dev/v1beta1
+kind: Instance
+metadata:
+ name: spark-2
+ namespace: spark-namespace-2
+status:
+ planStatus:
+ deploy:
+ status: COMPLETE
diff --git a/tests/test-multy-tenancy/00-two-instances-different-ns.yaml b/tests/test-multy-tenancy/00-two-instances-different-ns.yaml
new file mode 100644
index 00000000..b989cffe
--- /dev/null
+++ b/tests/test-multy-tenancy/00-two-instances-different-ns.yaml
@@ -0,0 +1,19 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ for i in `seq 2`
+ do
+ kubectl create ns spark-namespace-$i
+ done
+ - script: |
+ for i in `seq 2`
+ do
+ kubectl kudo install "${ROOT_DIR}"/operators/repository/spark/operator \
+ --instance=spark-$i \
+ --wait \
+ --namespace=spark-namespace-$i \
+ -p operatorImageName=$OPERATOR_DOCKER_REPO \
+ -p operatorVersion=$OPERATOR_VERSION \
+ -p sparkJobNamespace=spark-namespace-$i
+ done
diff --git a/tests/test-multy-tenancy/01-assert.yaml b/tests/test-multy-tenancy/01-assert.yaml
new file mode 100644
index 00000000..66771b67
--- /dev/null
+++ b/tests/test-multy-tenancy/01-assert.yaml
@@ -0,0 +1,17 @@
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: spark-pi
+ namespace: spark-namespace-1
+status:
+ applicationState:
+ state: COMPLETED
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: spark-pi
+ namespace: spark-namespace-2
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/test-multy-tenancy/01-submit-spark-app-1.yaml b/tests/test-multy-tenancy/01-submit-spark-app-1.yaml
new file mode 100644
index 00000000..24675f38
--- /dev/null
+++ b/tests/test-multy-tenancy/01-submit-spark-app-1.yaml
@@ -0,0 +1,35 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: spark-pi
+ namespace: spark-namespace-1
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: org.apache.spark.examples.SparkPi
+ mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.0.0.jar"
+ arguments:
+ - "10"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-1-spark-service-account
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
diff --git a/tests/test-multy-tenancy/01-submit-spark-app-2.yaml b/tests/test-multy-tenancy/01-submit-spark-app-2.yaml
new file mode 100644
index 00000000..3612ea3a
--- /dev/null
+++ b/tests/test-multy-tenancy/01-submit-spark-app-2.yaml
@@ -0,0 +1,35 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: spark-pi
+ namespace: spark-namespace-2
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: org.apache.spark.examples.SparkPi
+ mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.0.0.jar"
+ arguments:
+ - "10"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-2-spark-service-account
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
diff --git a/tests/test-multy-tenancy/02-cleanup.yaml b/tests/test-multy-tenancy/02-cleanup.yaml
new file mode 100644
index 00000000..15c3f8f5
--- /dev/null
+++ b/tests/test-multy-tenancy/02-cleanup.yaml
@@ -0,0 +1,8 @@
+apiVersion: kudo.dev/v1alpha1
+kind: TestStep
+commands:
+ - script: |
+ for i in `seq 2`
+ do
+ kubectl delete ns spark-namespace-$i
+ done
diff --git a/tests/test-multy-tenancy/02-errors.yaml b/tests/test-multy-tenancy/02-errors.yaml
new file mode 100644
index 00000000..0b4f77d6
--- /dev/null
+++ b/tests/test-multy-tenancy/02-errors.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: spark-namespace-1
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: spark-namespace-2
diff --git a/tests/test-multy-tenancy/03-assert.yaml b/tests/test-multy-tenancy/03-assert.yaml
new file mode 100644
index 00000000..e4d1d459
--- /dev/null
+++ b/tests/test-multy-tenancy/03-assert.yaml
@@ -0,0 +1,19 @@
+apiVersion: kudo.dev/v1beta1
+kind: Instance
+metadata:
+ name: spark
+ namespace: spark-namespace-1
+status:
+ planStatus:
+ deploy:
+ status: COMPLETE
+---
+apiVersion: kudo.dev/v1beta1
+kind: Instance
+metadata:
+ name: spark
+ namespace: spark-namespace-2
+status:
+ planStatus:
+ deploy:
+ status: COMPLETE
diff --git a/tests/test-multy-tenancy/03-two-instances-same-name-different-ns.yaml b/tests/test-multy-tenancy/03-two-instances-same-name-different-ns.yaml
new file mode 100644
index 00000000..fdb98d1a
--- /dev/null
+++ b/tests/test-multy-tenancy/03-two-instances-same-name-different-ns.yaml
@@ -0,0 +1,19 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ for i in `seq 2`
+ do
+ kubectl create ns spark-namespace-$i
+ done
+ - script: |
+ for i in `seq 2`
+ do
+ kubectl kudo install "${ROOT_DIR}"/operators/repository/spark/operator \
+ --instance=spark \
+ --wait \
+ --namespace=spark-namespace-$i \
+ -p operatorImageName=$OPERATOR_DOCKER_REPO \
+ -p operatorVersion=$OPERATOR_VERSION \
+ -p sparkJobNamespace=spark-namespace-$i
+ done
diff --git a/tests/test-multy-tenancy/04-assert.yaml b/tests/test-multy-tenancy/04-assert.yaml
new file mode 100644
index 00000000..66771b67
--- /dev/null
+++ b/tests/test-multy-tenancy/04-assert.yaml
@@ -0,0 +1,17 @@
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: spark-pi
+ namespace: spark-namespace-1
+status:
+ applicationState:
+ state: COMPLETED
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: spark-pi
+ namespace: spark-namespace-2
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/test-multy-tenancy/04-test-spark-app.yaml b/tests/test-multy-tenancy/04-test-spark-app.yaml
new file mode 100644
index 00000000..aadcf738
--- /dev/null
+++ b/tests/test-multy-tenancy/04-test-spark-app.yaml
@@ -0,0 +1,8 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ for i in `seq 2`
+ do
+ kubectl apply -f ../manifests/spark-pi.yaml -n spark-namespace-$i
+ done
diff --git a/tests/test-multy-tenancy/05-cleanup.yaml b/tests/test-multy-tenancy/05-cleanup.yaml
new file mode 100644
index 00000000..15c3f8f5
--- /dev/null
+++ b/tests/test-multy-tenancy/05-cleanup.yaml
@@ -0,0 +1,8 @@
+apiVersion: kudo.dev/v1alpha1
+kind: TestStep
+commands:
+ - script: |
+ for i in `seq 2`
+ do
+ kubectl delete ns spark-namespace-$i
+ done
diff --git a/tests/test-multy-tenancy/05-errors.yaml b/tests/test-multy-tenancy/05-errors.yaml
new file mode 100644
index 00000000..0b4f77d6
--- /dev/null
+++ b/tests/test-multy-tenancy/05-errors.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: spark-namespace-1
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: spark-namespace-2
diff --git a/tests/test-pod-affinity-and-toleration/00-install-operator.yaml b/tests/test-pod-affinity-and-toleration/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-pod-affinity-and-toleration/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-pod-affinity-and-toleration/01-assert.yaml b/tests/test-pod-affinity-and-toleration/01-assert.yaml
new file mode 100644
index 00000000..09157108
--- /dev/null
+++ b/tests/test-pod-affinity-and-toleration/01-assert.yaml
@@ -0,0 +1,76 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ spark-role: driver
+ sparkoperator.k8s.io/app-name: mock-task-runner
+ name: mock-task-runner-driver
+spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: NonExistingLabel
+ operator: DoesNotExist
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: NonExistingLabel
+ operator: DoesNotExist
+ topologyKey: kubernetes.io/hostname
+ tolerations:
+ - effect: NoExecute
+ key: node.kubernetes.io/not-ready
+ operator: Exists
+ tolerationSeconds: 300
+ - effect: NoExecute
+ key: node.kubernetes.io/unreachable
+ operator: Exists
+ tolerationSeconds: 300
+ - effect: NoSchedule
+ key: NonExistingLabel
+ operator: Exists
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ spark-role: executor
+ sparkoperator.k8s.io/app-name: mock-task-runner
+spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: NonExistingLabel
+ operator: DoesNotExist
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: NonExistingLabel
+ operator: DoesNotExist
+ topologyKey: kubernetes.io/hostname
+ tolerations:
+ - effect: NoExecute
+ key: node.kubernetes.io/not-ready
+ operator: Exists
+ tolerationSeconds: 300
+ - effect: NoExecute
+ key: node.kubernetes.io/unreachable
+ operator: Exists
+ tolerationSeconds: 300
+ - effect: NoSchedule
+ key: NonExistingLabel
+ operator: Exists
diff --git a/tests/test-pod-affinity-and-toleration/01-submit-spark-application.yaml b/tests/test-pod-affinity-and-toleration/01-submit-spark-application.yaml
new file mode 100644
index 00000000..03bb061e
--- /dev/null
+++ b/tests/test-pod-affinity-and-toleration/01-submit-spark-application.yaml
@@ -0,0 +1,71 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: mock-task-runner
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: MockTaskRunner
+ mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ arguments:
+ - "1"
+ - "10"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: NonExistingLabel
+ operator: DoesNotExist
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: NonExistingLabel
+ operator: DoesNotExist
+ topologyKey: kubernetes.io/hostname
+ tolerations:
+ - key: NonExistingLabel
+ operator: Exists
+ effect: NoSchedule
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: NonExistingLabel
+ operator: DoesNotExist
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: NonExistingLabel
+ operator: DoesNotExist
+ topologyKey: kubernetes.io/hostname
+ tolerations:
+ - key: NonExistingLabel
+ operator: Exists
+ effect: NoSchedule
diff --git a/tests/test-pod-affinity-and-toleration/02-assert.yaml b/tests/test-pod-affinity-and-toleration/02-assert.yaml
new file mode 120000
index 00000000..cdc81b22
--- /dev/null
+++ b/tests/test-pod-affinity-and-toleration/02-assert.yaml
@@ -0,0 +1 @@
+../manifests/assert-mock-task-runner.yaml
\ No newline at end of file
diff --git a/tests/test-python-support/00-install-operator.yaml b/tests/test-python-support/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-python-support/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-python-support/01-assert.yaml b/tests/test-python-support/01-assert.yaml
new file mode 100644
index 00000000..fd0f55de
--- /dev/null
+++ b/tests/test-python-support/01-assert.yaml
@@ -0,0 +1,15 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: spark-pi-python
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/templates/spark-pi-python.yaml b/tests/test-python-support/01-submit-spark-app.yaml
similarity index 56%
rename from tests/templates/spark-pi-python.yaml
rename to tests/test-python-support/01-submit-spark-app.yaml
index f77a9820..31372e23 100644
--- a/tests/templates/spark-pi-python.yaml
+++ b/tests/test-python-support/01-submit-spark-app.yaml
@@ -1,29 +1,31 @@
apiVersion: "sparkoperator.k8s.io/v1beta2"
kind: SparkApplication
metadata:
- name: {{ .Name }}
- namespace: {{ .Namespace }}
+ name: spark-pi-python
spec:
type: Python
mode: cluster
- image: {{ .Image }}
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
imagePullPolicy: Always
mainApplicationFile: "local:///opt/spark/examples/src/main/python/pi.py"
sparkConf:
"spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
"spark.scheduler.minRegisteredResourcesRatio": "1.0"
- sparkVersion: {{ .SparkVersion }}
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
restartPolicy:
type: Never
driver:
cores: 1
memory: "512m"
labels:
- version: {{ .SparkVersion }}
- serviceAccount: {{ .ServiceAccount }}
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
executor:
cores: 1
- instances: {{ .ExecutorsCount }}
+ instances: 1
memory: "512m"
+ deleteOnTermination: false
labels:
- version: {{ .SparkVersion }}
+ version: 3.0.0
diff --git a/tests/test-r-support/00-install-operator.yaml b/tests/test-r-support/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-r-support/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-r-support/01-assert.yaml b/tests/test-r-support/01-assert.yaml
new file mode 100644
index 00000000..0bc839cf
--- /dev/null
+++ b/tests/test-r-support/01-assert.yaml
@@ -0,0 +1,15 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: spark-r-als
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/templates/spark-r-als.yaml b/tests/test-r-support/01-submit-spark-app.yaml
similarity index 56%
rename from tests/templates/spark-r-als.yaml
rename to tests/test-r-support/01-submit-spark-app.yaml
index 11fc9835..6097186c 100644
--- a/tests/templates/spark-r-als.yaml
+++ b/tests/test-r-support/01-submit-spark-app.yaml
@@ -1,29 +1,31 @@
apiVersion: "sparkoperator.k8s.io/v1beta2"
kind: SparkApplication
metadata:
- name: {{ .Name }}
- namespace: {{ .Namespace }}
+ name: spark-r-als
spec:
type: R
mode: cluster
- image: {{ .Image }}
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
imagePullPolicy: Always
mainApplicationFile: "local:///opt/spark/examples/src/main/r/ml/als.R"
sparkConf:
"spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
"spark.scheduler.minRegisteredResourcesRatio": "1.0"
- sparkVersion: {{ .SparkVersion }}
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
restartPolicy:
type: Never
driver:
cores: 1
memory: "512m"
labels:
- version: {{ .SparkVersion }}
- serviceAccount: {{ .ServiceAccount }}
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
executor:
cores: 1
- instances: {{ .ExecutorsCount }}
+ instances: 1
memory: "512m"
+ deleteOnTermination: false
labels:
- version: {{ .SparkVersion }}
+ version: 3.0.0
diff --git a/tests/test-rbac/00-create-namespace.yaml b/tests/test-rbac/00-create-namespace.yaml
new file mode 100644
index 00000000..3b7f07e9
--- /dev/null
+++ b/tests/test-rbac/00-create-namespace.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: spark-rbac-test
diff --git a/tests/test-rbac/01-assert.yaml b/tests/test-rbac/01-assert.yaml
new file mode 100644
index 00000000..7b77b0de
--- /dev/null
+++ b/tests/test-rbac/01-assert.yaml
@@ -0,0 +1,47 @@
+apiVersion: kudo.dev/v1beta1
+kind: Instance
+metadata:
+ name: spark-default-rbac
+ namespace: spark-rbac-test
+status:
+ planStatus:
+ deploy:
+ status: COMPLETE
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ namespace: spark-rbac-test
+ name: spark-default-rbac-spark-role
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: spark-default-rbac-spark-rb
+ namespace: spark-rbac-test
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: spark-default-rbac-spark-role
+subjects:
+- kind: ServiceAccount
+ name: spark-default-rbac-spark-service-account
+ namespace: spark-rbac-test
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: spark-default-rbac-spark-rbac-test-cr
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: spark-default-rbac-spark-rbac-test-crb
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: spark-default-rbac-spark-rbac-test-cr
+subjects:
+- kind: ServiceAccount
+ name: spark-default-rbac-spark-operator-service-account
+ namespace: spark-rbac-test
diff --git a/tests/test-rbac/01-install-operator.yaml b/tests/test-rbac/01-install-operator.yaml
new file mode 100644
index 00000000..185571db
--- /dev/null
+++ b/tests/test-rbac/01-install-operator.yaml
@@ -0,0 +1,13 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: |
+ kubectl kudo install "${ROOT_DIR}"/operators/repository/spark/operator \
+ --namespace=spark-rbac-test \
+ --instance=spark-default-rbac \
+ --wait \
+ -p operatorImageName=$OPERATOR_DOCKER_REPO \
+ -p operatorVersion=$OPERATOR_VERSION \
+ -p sparkJobNamespace=spark-rbac-test
+assert:
+- ../manifests/kudo-controller.yaml
diff --git a/tests/test-rbac/02-uninstall-operator.yaml b/tests/test-rbac/02-uninstall-operator.yaml
new file mode 100644
index 00000000..7419499d
--- /dev/null
+++ b/tests/test-rbac/02-uninstall-operator.yaml
@@ -0,0 +1,7 @@
+apiVersion: kudo.dev/v1alpha1
+kind: TestStep
+delete:
+- apiVersion: kudo.dev/v1beta1
+ kind: Instance
+ metadata:
+ name: spark-default-rbac
diff --git a/tests/test-rbac/03-create-rbac.yaml b/tests/test-rbac/03-create-rbac.yaml
new file mode 100644
index 00000000..78d375f4
--- /dev/null
+++ b/tests/test-rbac/03-create-rbac.yaml
@@ -0,0 +1,80 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: spark-custom-rbac-spark-service-account
+ namespace: spark-rbac-test
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: spark-driver-test-role
+ namespace: spark-rbac-test
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["services"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: spark-driver-role-test-binding
+ namespace: spark-rbac-test
+subjects:
+- kind: ServiceAccount
+ name: spark-custom-rbac-spark-service-account
+roleRef:
+ kind: Role
+ name: spark-driver-test-role
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: spark-operator-custom-rbac-service-account
+ namespace: spark-rbac-test
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: spark-operator-custom-rbac-cluster-role
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["services", "configmaps", "secrets"]
+ verbs: ["create", "get", "delete", "update"]
+- apiGroups: ["extensions"]
+ resources: ["ingresses"]
+ verbs: ["create", "get", "delete"]
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "update", "patch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["create", "get", "update", "delete"]
+- apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["mutatingwebhookconfigurations"]
+ verbs: ["create", "get", "update", "delete"]
+- apiGroups: ["sparkoperator.k8s.io"]
+ resources: ["sparkapplications", "scheduledsparkapplications", "sparkapplications/status", "scheduledsparkapplications/status"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: spark-operator-custom-rbac-cluster-role-binding
+subjects:
+- kind: ServiceAccount
+ name: spark-operator-custom-rbac-service-account
+ namespace: spark-rbac-test
+roleRef:
+ kind: ClusterRole
+ name: spark-operator-custom-rbac-cluster-role
+ apiGroup: rbac.authorization.k8s.io
diff --git a/tests/test-rbac/04-assert.yaml b/tests/test-rbac/04-assert.yaml
new file mode 100644
index 00000000..c69c2a93
--- /dev/null
+++ b/tests/test-rbac/04-assert.yaml
@@ -0,0 +1,9 @@
+apiVersion: kudo.dev/v1beta1
+kind: Instance
+metadata:
+ name: spark-with-custom-rbac
+ namespace: spark-rbac-test
+status:
+ planStatus:
+ deploy:
+ status: COMPLETE
diff --git a/tests/test-rbac/04-install-operator.yaml b/tests/test-rbac/04-install-operator.yaml
new file mode 100644
index 00000000..f2a5c0b3
--- /dev/null
+++ b/tests/test-rbac/04-install-operator.yaml
@@ -0,0 +1,18 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: |
+ kubectl kudo install "$ROOT_DIR"/operators/repository/spark/operator \
+ --namespace=spark-rbac-test \
+ --instance=spark-with-custom-rbac \
+ --wait \
+ -p operatorImageName=$OPERATOR_DOCKER_REPO \
+ -p operatorVersion=$OPERATOR_VERSION \
+ -p sparkJobNamespace=spark-rbac-test \
+ -p createOperatorServiceAccount=false \
+ -p createSparkServiceAccount=false \
+ -p operatorServiceAccountName=spark-operator-custom-rbac-service-account \
+ -p sparkServiceAccountName=spark-custom-rbac-spark-service-account \
+ -p createRBAC=false
+assert:
+- ../manifests/kudo-controller.yaml
diff --git a/tests/test-rbac/05-assert.yaml b/tests/test-rbac/05-assert.yaml
new file mode 100644
index 00000000..b7f1a4d5
--- /dev/null
+++ b/tests/test-rbac/05-assert.yaml
@@ -0,0 +1,16 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: mock-task-runner
+ namespace: spark-rbac-test
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/test-rbac/05-submit-spark-app.yaml b/tests/test-rbac/05-submit-spark-app.yaml
new file mode 100644
index 00000000..835b1460
--- /dev/null
+++ b/tests/test-rbac/05-submit-spark-app.yaml
@@ -0,0 +1,36 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: mock-task-runner
+ namespace: spark-rbac-test
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: MockTaskRunner
+ mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ arguments:
+ - "1"
+ - "1"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-custom-rbac-spark-service-account
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
diff --git a/tests/test-rbac/06-delete-ns.yaml b/tests/test-rbac/06-delete-ns.yaml
new file mode 100644
index 00000000..5afc87eb
--- /dev/null
+++ b/tests/test-rbac/06-delete-ns.yaml
@@ -0,0 +1,4 @@
+apiVersion: kudo.dev/v1alpha1
+kind: TestStep
+commands:
+- command: kubectl delete ns spark-rbac-test
diff --git a/tests/test-rbac/06-errors.yaml b/tests/test-rbac/06-errors.yaml
new file mode 100644
index 00000000..3b7f07e9
--- /dev/null
+++ b/tests/test-rbac/06-errors.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: spark-rbac-test
diff --git a/tests/test-rpc-and-ssl/00-install-operator.yaml b/tests/test-rpc-and-ssl/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-rpc-and-ssl/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-rpc-and-ssl/01-assert.yaml b/tests/test-rpc-and-ssl/01-assert.yaml
new file mode 100644
index 00000000..79e3d583
--- /dev/null
+++ b/tests/test-rpc-and-ssl/01-assert.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: ssl-secrets
diff --git a/tests/test-rpc-and-ssl/01-create-secrets.yaml b/tests/test-rpc-and-ssl/01-create-secrets.yaml
new file mode 100644
index 00000000..50f58e40
--- /dev/null
+++ b/tests/test-rpc-and-ssl/01-create-secrets.yaml
@@ -0,0 +1,11 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- command: keytool -genkeypair -keystore keystore.jks -keyalg RSA -alias selfsigned
+ -dname "CN=sparkcert OU=KUDO O=D2IQ L=SF S=CA C=US" -storepass changeit -keypass changeit
+- command: keytool -exportcert -keystore keystore.jks -alias selfsigned -storepass changeit -file test.cert
+- command: keytool -importcert -keystore truststore.jks -alias selfsigned -storepass changeit -file test.cert -noprompt
+- command: kubectl create secret generic secrets --from-literal=key-password=changeit
+ --from-literal=keystore-password=changeit --from-literal=truststore-password=changeit --namespace=$NAMESPACE
+- command: kubectl create secret generic ssl-secrets --from-file=keystore.jks --from-file=truststore.jks --namespace=$NAMESPACE
+- script: rm *.jks *.cert
diff --git a/tests/test-rpc-and-ssl/02-assert.yaml b/tests/test-rpc-and-ssl/02-assert.yaml
new file mode 100644
index 00000000..4e4240f0
--- /dev/null
+++ b/tests/test-rpc-and-ssl/02-assert.yaml
@@ -0,0 +1,15 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: spark-mock-task-runner-with-auth
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/test-rpc-and-ssl/02-test-rpc-auth-and-encryption.yaml b/tests/test-rpc-and-ssl/02-test-rpc-auth-and-encryption.yaml
new file mode 100644
index 00000000..15a0a2b9
--- /dev/null
+++ b/tests/test-rpc-and-ssl/02-test-rpc-auth-and-encryption.yaml
@@ -0,0 +1,38 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: spark-mock-task-runner-with-auth
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: MockTaskRunner
+ mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.authenticate": "true"
+ "spark.network.crypto.enabled": "true"
+ "spark.authenticate.enableSaslEncryption": "true"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
+ arguments:
+ - "1"
+ - "1"
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
diff --git a/tests/test-rpc-and-ssl/03-assert.yaml b/tests/test-rpc-and-ssl/03-assert.yaml
new file mode 100644
index 00000000..a316d3db
--- /dev/null
+++ b/tests/test-rpc-and-ssl/03-assert.yaml
@@ -0,0 +1,14 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: spark-mock-task-runner-with-ssl-driver
+status:
+ phase: Running
diff --git a/tests/test-rpc-and-ssl/03-test-ssl.yaml b/tests/test-rpc-and-ssl/03-test-ssl.yaml
new file mode 100644
index 00000000..0bd29b04
--- /dev/null
+++ b/tests/test-rpc-and-ssl/03-test-ssl.yaml
@@ -0,0 +1,42 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: spark-mock-task-runner-with-ssl
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: MockTaskRunner
+ mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ "spark.ssl.enabled": "true"
+ "spark.ssl.protocol": "TLSv1.2"
+ "spark.ssl.keyStore": "/tmp/spark/ssl/keystore.jks"
+ "spark.ssl.trustStore": "/tmp/spark/ssl/truststore.jks"
+ "spark.kubernetes.driver.secrets.ssl-secrets": "/tmp/spark/ssl"
+ "spark.kubernetes.driver.secretKeyRef.SPARK_SSL_KEYPASSWORD": "secrets:key-password"
+ "spark.kubernetes.driver.secretKeyRef.SPARK_SSL_KEYSTOREPASSWORD": "secrets:keystore-password"
+ "spark.kubernetes.driver.secretKeyRef.SPARK_SSL_TRUSTSTOREPASSWORD": "secrets:truststore-password"
+ sparkVersion: 3.0.0
+ arguments:
+ - "1"
+ - "10"
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
diff --git a/tests/test-rpc-and-ssl/04-assert.yaml b/tests/test-rpc-and-ssl/04-assert.yaml
new file mode 100644
index 00000000..3b945897
--- /dev/null
+++ b/tests/test-rpc-and-ssl/04-assert.yaml
@@ -0,0 +1,15 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: spark-mock-task-runner-with-ssl
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/test-rpc-and-ssl/04-curl.yaml b/tests/test-rpc-and-ssl/04-curl.yaml
new file mode 100644
index 00000000..83a094fe
--- /dev/null
+++ b/tests/test-rpc-and-ssl/04-curl.yaml
@@ -0,0 +1,15 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ kubectl exec spark-mock-task-runner-with-ssl-driver -n $NAMESPACE -- sh -c \
+ "curl \
+ --insecure \
+ --location \
+ --include \
+ --connect-timeout 5 \
+ --max-time 10 \
+ --retry 10 \
+ --retry-delay 0 \
+ --retry-max-time 60 \
+ --retry-connrefused https://localhost:4440"
diff --git a/tests/test-s3/00-install-operator.yaml b/tests/test-s3/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-s3/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-s3/01-assert.yaml b/tests/test-s3/01-assert.yaml
new file mode 100644
index 00000000..d4b27db6
--- /dev/null
+++ b/tests/test-s3/01-assert.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: aws-credentials
diff --git a/tests/test-s3/01-create-secret.yaml b/tests/test-s3/01-create-secret.yaml
new file mode 100644
index 00000000..fb9b0920
--- /dev/null
+++ b/tests/test-s3/01-create-secret.yaml
@@ -0,0 +1,8 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: |
+ kubectl create secret generic aws-credentials -n $NAMESPACE \
+ --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \
+ --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \
+ --from-literal=AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN
diff --git a/tests/test-s3/02-create-bucket-folder.yaml b/tests/test-s3/02-create-bucket-folder.yaml
new file mode 100644
index 00000000..4f158018
--- /dev/null
+++ b/tests/test-s3/02-create-bucket-folder.yaml
@@ -0,0 +1,9 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ set -x
+
+ mkdir -p .tmp
+ folder_name=$(head /dev/urandom | LC_ALL=C LC_CTYPE=C tr -dc A-Za-z0-9 | head -c10)
+ echo "$AWS_BUCKET_PATH/spark-s3-readwrite/${folder_name}/" > .tmp/s3_bucket_path;
diff --git a/tests/test-s3/03-assert.yaml b/tests/test-s3/03-assert.yaml
new file mode 100644
index 00000000..534a3cc9
--- /dev/null
+++ b/tests/test-s3/03-assert.yaml
@@ -0,0 +1,15 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: spark-s3-readwrite
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/test-s3/03-submit-app.yaml b/tests/test-s3/03-submit-app.yaml
new file mode 100644
index 00000000..2ba55711
--- /dev/null
+++ b/tests/test-s3/03-submit-app.yaml
@@ -0,0 +1,8 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ sed \
+ -e "s|{{S3_READ_URL}}|s3a://$AWS_BUCKET_NAME/$AWS_BUCKET_PATH/spark-s3-readwrite/README.md|g" \
+ -e "s|{{S3_WRITE_URL}}|s3a://$AWS_BUCKET_NAME/$(cat .tmp/s3_bucket_path)|g" \
+ templates/spark-s3-readwrite.yaml | kubectl apply -n $NAMESPACE -f -
diff --git a/tests/test-s3/04-grep-log.yaml b/tests/test-s3/04-grep-log.yaml
new file mode 100644
index 00000000..8e52b686
--- /dev/null
+++ b/tests/test-s3/04-grep-log.yaml
@@ -0,0 +1,4 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: kubectl logs spark-s3-readwrite-driver -n $NAMESPACE | grep "Wrote 105 lines"
diff --git a/tests/test-s3/05-cleanup.yaml b/tests/test-s3/05-cleanup.yaml
new file mode 100644
index 00000000..77c736e3
--- /dev/null
+++ b/tests/test-s3/05-cleanup.yaml
@@ -0,0 +1,7 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ set -x
+ aws s3 rm --recursive s3://$AWS_BUCKET_NAME/$(cat .tmp/s3_bucket_path)
+ rm -rf .tmp
diff --git a/tests/test-s3/templates/spark-s3-readwrite.yaml b/tests/test-s3/templates/spark-s3-readwrite.yaml
new file mode 100644
index 00000000..eed64a7e
--- /dev/null
+++ b/tests/test-s3/templates/spark-s3-readwrite.yaml
@@ -0,0 +1,57 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: spark-s3-readwrite
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: S3Job
+ mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ "spark.hadoop.fs.s3a.impl": "org.apache.hadoop.fs.s3a.S3AFileSystem"
+ # uncomment the following line to enable Temporary AWS credentials support
+ # "spark.hadoop.fs.s3a.aws.credentials.provider": "org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider"
+ sparkVersion: 3.0.0
+ arguments:
+ - "--readUrl"
+ - "{{S3_READ_URL}}"
+ - "--writeUrl"
+ - "{{S3_WRITE_URL}}"
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: AWS_ACCESS_KEY_ID
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: AWS_SECRET_ACCESS_KEY
+ - name: AWS_SESSION_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: AWS_SESSION_TOKEN
+ optional: true
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
diff --git a/tests/test-security/00-install-operator.yaml b/tests/test-security/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-security/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-security/01-assert.yaml b/tests/test-security/01-assert.yaml
new file mode 100644
index 00000000..b3f72200
--- /dev/null
+++ b/tests/test-security/01-assert.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: spark-secret
diff --git a/tests/test-security/01-create-secret.yaml b/tests/test-security/01-create-secret.yaml
new file mode 100644
index 00000000..847390a4
--- /dev/null
+++ b/tests/test-security/01-create-secret.yaml
@@ -0,0 +1,4 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- command: kubectl create secret generic spark-secret --from-literal=secretKey=secretValue -n $NAMESPACE
diff --git a/tests/test-security/02-assert.yaml b/tests/test-security/02-assert.yaml
new file mode 100644
index 00000000..fa3cf2f6
--- /dev/null
+++ b/tests/test-security/02-assert.yaml
@@ -0,0 +1,36 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: mock-task-runner-driver
+ labels:
+ sparkoperator.k8s.io/app-name: mock-task-runner
+spec:
+ securityContext:
+ runAsGroup: 65534
+ runAsNonRoot: true
+ runAsUser: 65534
+status:
+ phase: Running
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ spark-role: executor
+ sparkoperator.k8s.io/app-name: mock-task-runner
+spec:
+ securityContext:
+ runAsUser: 65534
+ runAsNonRoot: true
+ runAsGroup: 65534
+status:
+ phase: Running
+
diff --git a/tests/test-security/02-submit-spark-app.yaml b/tests/test-security/02-submit-spark-app.yaml
new file mode 100644
index 00000000..c2e0087a
--- /dev/null
+++ b/tests/test-security/02-submit-spark-app.yaml
@@ -0,0 +1,59 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: mock-task-runner
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: MockTaskRunner
+ mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ arguments:
+ - "1"
+ - "15"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
+ envSecretKeyRefs:
+ SECRET_ENV:
+ name: spark-secret
+ key: secretKey
+ secrets:
+ - name: spark-secret
+ path: /mnt/secrets
+ secretType: Opaque
+ securityContext:
+ runAsUser: 65534
+ runAsGroup: 65534
+ runAsNonRoot: true
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
+ envSecretKeyRefs:
+ SECRET_ENV:
+ name: spark-secret
+ key: secretKey
+ secrets:
+ - name: spark-secret
+ path: /mnt/secrets
+ secretType: Opaque
+ securityContext:
+ runAsUser: 65534
+ runAsGroup: 65534
+ runAsNonRoot: true
diff --git a/tests/test-security/03-assert.yaml b/tests/test-security/03-assert.yaml
new file mode 120000
index 00000000..cdc81b22
--- /dev/null
+++ b/tests/test-security/03-assert.yaml
@@ -0,0 +1 @@
+../manifests/assert-mock-task-runner.yaml
\ No newline at end of file
diff --git a/tests/test-security/03-check-driver-pod.yaml b/tests/test-security/03-check-driver-pod.yaml
new file mode 100644
index 00000000..ba80398a
--- /dev/null
+++ b/tests/test-security/03-check-driver-pod.yaml
@@ -0,0 +1,6 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ kubectl exec mock-task-runner-driver --namespace=$NAMESPACE -- sh -c \
+ 'set -x; [ ! -z "$SECRET_ENV" ] && [ -f "/mnt/secrets/secretKey" ]'
diff --git a/tests/test-service-accounts/00-assert.yaml b/tests/test-service-accounts/00-assert.yaml
new file mode 100644
index 00000000..7ac4b1a6
--- /dev/null
+++ b/tests/test-service-accounts/00-assert.yaml
@@ -0,0 +1,22 @@
+apiVersion: kudo.dev/v1beta1
+kind: Instance
+metadata:
+ name: spark-default-sa
+status:
+ planStatus:
+ deploy:
+ status: COMPLETE
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ kudo.dev/instance: spark-default-sa
+ name: spark-default-sa-spark-service-account
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ kudo.dev/instance: spark-default-sa
+ name: spark-default-sa-spark-operator-service-account
diff --git a/tests/test-service-accounts/00-install-operator.yaml b/tests/test-service-accounts/00-install-operator.yaml
new file mode 100644
index 00000000..8b24156f
--- /dev/null
+++ b/tests/test-service-accounts/00-install-operator.yaml
@@ -0,0 +1,13 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ kubectl kudo install "${ROOT_DIR}"/operators/repository/spark/operator \
+ --namespace=$NAMESPACE \
+ --instance=spark-default-sa \
+ --wait \
+ -p sparkJobNamespace=$NAMESPACE \
+ -p operatorImageName=$OPERATOR_DOCKER_REPO \
+ -p operatorVersion=$OPERATOR_VERSION
+assert:
+- ../manifests/kudo-controller.yaml
diff --git a/tests/test-service-accounts/01-uninstall-operator.yaml b/tests/test-service-accounts/01-uninstall-operator.yaml
new file mode 100644
index 00000000..ce423096
--- /dev/null
+++ b/tests/test-service-accounts/01-uninstall-operator.yaml
@@ -0,0 +1,7 @@
+apiVersion: kudo.dev/v1alpha1
+kind: TestStep
+delete:
+- apiVersion: kudo.dev/v1beta1
+ kind: Instance
+ metadata:
+ name: spark-default-sa
diff --git a/tests/test-service-accounts/02-create-sa.yaml b/tests/test-service-accounts/02-create-sa.yaml
new file mode 100644
index 00000000..6029a9ba
--- /dev/null
+++ b/tests/test-service-accounts/02-create-sa.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: spark-operator-custom-sa
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: spark-custom-sa
diff --git a/tests/test-service-accounts/03-assert.yaml b/tests/test-service-accounts/03-assert.yaml
new file mode 100644
index 00000000..ce9923b6
--- /dev/null
+++ b/tests/test-service-accounts/03-assert.yaml
@@ -0,0 +1,8 @@
+apiVersion: kudo.dev/v1beta1
+kind: Instance
+metadata:
+ name: spark-with-custom-sa
+status:
+ planStatus:
+ deploy:
+ status: COMPLETE
diff --git a/tests/test-service-accounts/03-install-operator.yaml b/tests/test-service-accounts/03-install-operator.yaml
new file mode 100644
index 00000000..3b7872fe
--- /dev/null
+++ b/tests/test-service-accounts/03-install-operator.yaml
@@ -0,0 +1,16 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ kubectl kudo install "${ROOT_DIR}"/operators/repository/spark/operator \
+ --namespace=$NAMESPACE \
+ --instance=spark-with-custom-sa \
+ -p operatorImageName=$OPERATOR_DOCKER_REPO \
+ -p operatorVersion=$OPERATOR_VERSION \
+ -p sparkJobNamespace=$NAMESPACE \
+ -p createOperatorServiceAccount=false \
+ -p createSparkServiceAccount=false \
+ -p operatorServiceAccountName=spark-operator-custom-sa \
+ -p sparkServiceAccountName=spark-custom-sa
+assert:
+- ../manifests/kudo-controller.yaml
diff --git a/tests/test-service-accounts/04-assert.yaml b/tests/test-service-accounts/04-assert.yaml
new file mode 120000
index 00000000..cdc81b22
--- /dev/null
+++ b/tests/test-service-accounts/04-assert.yaml
@@ -0,0 +1 @@
+../manifests/assert-mock-task-runner.yaml
\ No newline at end of file
diff --git a/tests/test-service-accounts/04-submit-spark-app.yaml b/tests/test-service-accounts/04-submit-spark-app.yaml
new file mode 100644
index 00000000..445aea84
--- /dev/null
+++ b/tests/test-service-accounts/04-submit-spark-app.yaml
@@ -0,0 +1,35 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: mock-task-runner
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: MockTaskRunner
+ mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ arguments:
+ - "1"
+ - "1"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-custom-sa
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
diff --git a/tests/test-shuffle/00-install-operator.yaml b/tests/test-shuffle/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-shuffle/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-shuffle/01-assert.yaml b/tests/test-shuffle/01-assert.yaml
new file mode 100644
index 00000000..dabdd6ce
--- /dev/null
+++ b/tests/test-shuffle/01-assert.yaml
@@ -0,0 +1,15 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: sparkoperator.k8s.io/v1beta2
+kind: SparkApplication
+metadata:
+ name: shuffle-app
+status:
+ applicationState:
+ state: COMPLETED
diff --git a/tests/templates/spark-shuffle-job.yaml b/tests/test-shuffle/01-submit-app.yaml
similarity index 56%
rename from tests/templates/spark-shuffle-job.yaml
rename to tests/test-shuffle/01-submit-app.yaml
index 53c12007..a5bdc1ab 100644
--- a/tests/templates/spark-shuffle-job.yaml
+++ b/tests/test-shuffle/01-submit-app.yaml
@@ -1,34 +1,37 @@
apiVersion: "sparkoperator.k8s.io/v1beta2"
kind: SparkApplication
metadata:
- name: {{ .Name }}
- namespace: {{ .Namespace }}
+ name: shuffle-app
spec:
type: Scala
mode: cluster
- image: {{ .Image }}
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
imagePullPolicy: Always
mainClass: ShuffleApp
mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
- arguments: {{ range $i, $arg := index .Params "args" }}
- - "{{ $arg }}"{{ end }}
+ arguments:
+ - "4"
+ - "12000"
+ - "100"
+ - "4"
sparkConf:
"spark.scheduler.maxRegisteredResourcesWaitingTime": "3m"
"spark.scheduler.minRegisteredResourcesRatio": "1"
- sparkVersion: {{ .SparkVersion }}
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ sparkVersion: 3.0.0
restartPolicy:
type: Never
driver:
cores: 1
memory: "512m"
labels:
- version: {{ .SparkVersion }}
- metrics-exposed: "true"
- serviceAccount: {{ .ServiceAccount }}
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
executor:
cores: 1
- instances: {{ .ExecutorsCount }}
+ instances: 2
memory: "512m"
+ deleteOnTermination: false
labels:
- version: {{ .SparkVersion }}
- metrics-exposed: "true"
+ version: 3.0.0
diff --git a/tests/test-shuffle/02-check-driver-logs.yaml b/tests/test-shuffle/02-check-driver-logs.yaml
new file mode 100644
index 00000000..f9783af6
--- /dev/null
+++ b/tests/test-shuffle/02-check-driver-logs.yaml
@@ -0,0 +1,5 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: |
+ kubectl logs shuffle-app-driver -n $NAMESPACE | grep "Groups count: 12000"
diff --git a/tests/test-spark-history-server/00-assert.yaml b/tests/test-spark-history-server/00-assert.yaml
new file mode 100644
index 00000000..677f0b04
--- /dev/null
+++ b/tests/test-spark-history-server/00-assert.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: spark-conf
+type: Opaque
diff --git a/tests/test-spark-history-server/00-create-secret.yaml b/tests/test-spark-history-server/00-create-secret.yaml
new file mode 100644
index 00000000..1eef0cd4
--- /dev/null
+++ b/tests/test-spark-history-server/00-create-secret.yaml
@@ -0,0 +1,9 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ sed \
+ -e "s|{{AWS_ACCESS_KEY_ID}}|$AWS_ACCESS_KEY_ID|g" \
+ -e "s|{{AWS_SECRET_ACCESS_KEY}}|$AWS_SECRET_ACCESS_KEY|g" \
+ -e "s|{{AWS_SESSION_TOKEN}}|$AWS_SESSION_TOKEN|g" templates/spark-conf.yaml | \
+ kubectl apply -n $NAMESPACE -f -
diff --git a/tests/test-spark-history-server/01-create-bucket-folder.yaml b/tests/test-spark-history-server/01-create-bucket-folder.yaml
new file mode 100644
index 00000000..16c3df42
--- /dev/null
+++ b/tests/test-spark-history-server/01-create-bucket-folder.yaml
@@ -0,0 +1,10 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: |
+ set -x
+
+ mkdir -p .tmp
+ echo "$AWS_BUCKET_PATH/spark-history-server/$(head /dev/urandom | LC_ALL=C LC_CTYPE=C tr -dc A-Za-z0-9 | head -c10)/" > .tmp/s3_bucket_path
+
+ aws s3api put-object --bucket $AWS_BUCKET_NAME --key $(cat .tmp/s3_bucket_path)
diff --git a/tests/test-spark-history-server/02-assert.yaml b/tests/test-spark-history-server/02-assert.yaml
new file mode 100644
index 00000000..f45da4fc
--- /dev/null
+++ b/tests/test-spark-history-server/02-assert.yaml
@@ -0,0 +1,21 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 200
+collectors:
+- selector: app.kubernetes.io/name=spark-history-server
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ app.kubernetes.io/instance: spark
+ app.kubernetes.io/name: spark-history-server
+status:
+ phase: Running
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: spark-history-server
+status:
+ availableReplicas: 1
diff --git a/tests/test-spark-history-server/02-install-operator.yaml b/tests/test-spark-history-server/02-install-operator.yaml
new file mode 100644
index 00000000..768429be
--- /dev/null
+++ b/tests/test-spark-history-server/02-install-operator.yaml
@@ -0,0 +1,17 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ kubectl kudo install "${ROOT_DIR}"/operators/repository/spark/operator \
+ --namespace=$NAMESPACE \
+ --instance=spark \
+ --wait \
+ -p operatorImageName=$OPERATOR_DOCKER_REPO \
+ -p operatorVersion=$OPERATOR_VERSION \
+ -p sparkJobNamespace=$NAMESPACE \
+ -p enableHistoryServer=true \
+ -p historyServerFsLogDirectory=s3a://$AWS_BUCKET_NAME/$(cat .tmp/s3_bucket_path) \
+ -p historyServerSparkConfSecret=spark-conf
+assert:
+- ../manifests/kudo-controller.yaml
+- ../manifests/spark-instance.yaml
diff --git a/tests/test-spark-history-server/03-assert.yaml b/tests/test-spark-history-server/03-assert.yaml
new file mode 100644
index 00000000..d4b27db6
--- /dev/null
+++ b/tests/test-spark-history-server/03-assert.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: aws-credentials
diff --git a/tests/test-spark-history-server/03-create-secret.yaml b/tests/test-spark-history-server/03-create-secret.yaml
new file mode 100644
index 00000000..a86e07ed
--- /dev/null
+++ b/tests/test-spark-history-server/03-create-secret.yaml
@@ -0,0 +1,8 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ kubectl create secret generic -n $NAMESPACE aws-credentials \
+ --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \
+ --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \
+ --from-literal=AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN
diff --git a/tests/test-spark-history-server/04-assert.yaml b/tests/test-spark-history-server/04-assert.yaml
new file mode 120000
index 00000000..cdc81b22
--- /dev/null
+++ b/tests/test-spark-history-server/04-assert.yaml
@@ -0,0 +1 @@
+../manifests/assert-mock-task-runner.yaml
\ No newline at end of file
diff --git a/tests/test-spark-history-server/04-submit-app.yaml b/tests/test-spark-history-server/04-submit-app.yaml
new file mode 100644
index 00000000..06d9f5a2
--- /dev/null
+++ b/tests/test-spark-history-server/04-submit-app.yaml
@@ -0,0 +1,6 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ sed -e "s|{{EVENT_LOG_DIR}}|s3a://$AWS_BUCKET_NAME/$(cat .tmp/s3_bucket_path)|g" \
+ templates/mock-task-runner.yaml | kubectl apply -n $NAMESPACE -f -
diff --git a/tests/test-spark-history-server/05-check-shs-api.yaml b/tests/test-spark-history-server/05-check-shs-api.yaml
new file mode 100644
index 00000000..0c1bcc5a
--- /dev/null
+++ b/tests/test-spark-history-server/05-check-shs-api.yaml
@@ -0,0 +1,12 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ set -x
+
+ shs_pod=$(kubectl get pod -n $NAMESPACE -l app.kubernetes.io/name=spark-history-server -o=jsonpath='{.items[0].metadata.name}')
+ spark_app=$(kubectl get pod -n $NAMESPACE -o=jsonpath='{.items[0].metadata.labels.spark-app-selector}')
+
+ kubectl exec ${shs_pod} -n $NAMESPACE -- sh -c \
+ "curl --fail --show-error --retry 10 --retry-delay 0 --retry-max-time 60 --retry-connrefused \
+ http://localhost:18080/api/v1/applications/${spark_app}/jobs"
diff --git a/tests/test-spark-history-server/06-cleanup.yaml b/tests/test-spark-history-server/06-cleanup.yaml
new file mode 100644
index 00000000..1e65c001
--- /dev/null
+++ b/tests/test-spark-history-server/06-cleanup.yaml
@@ -0,0 +1,6 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+ - script: |
+ aws s3 rm --recursive s3://$AWS_BUCKET_NAME/$(cat .tmp/s3_bucket_path)
+ rm -rf .tmp
diff --git a/tests/test-spark-history-server/templates/mock-task-runner.yaml b/tests/test-spark-history-server/templates/mock-task-runner.yaml
new file mode 100644
index 00000000..78743558
--- /dev/null
+++ b/tests/test-spark-history-server/templates/mock-task-runner.yaml
@@ -0,0 +1,60 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: mock-task-runner
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: MockTaskRunner
+ mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ arguments:
+ - "1"
+ - "20"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ "spark.eventLog.enabled": "true"
+ "spark.eventLog.dir": "{{EVENT_LOG_DIR}}"
+ # uncomment the following line to enable Temporary AWS credentials support
+ # "spark.hadoop.fs.s3a.aws.credentials.provider": "org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider"
+
+ deps:
+ jars:
+ - local:///opt/spark/examples/jars/scopt_2.12-3.7.1.jar
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ serviceAccount: spark-spark-service-account
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: AWS_ACCESS_KEY_ID
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: AWS_SECRET_ACCESS_KEY
+ - name: AWS_SESSION_TOKEN
+ valueFrom:
+ secretKeyRef:
+ name: aws-credentials
+ key: AWS_SESSION_TOKEN
+ optional: true
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
diff --git a/tests/test-spark-history-server/templates/spark-conf.yaml b/tests/test-spark-history-server/templates/spark-conf.yaml
new file mode 100644
index 00000000..3bd37f74
--- /dev/null
+++ b/tests/test-spark-history-server/templates/spark-conf.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: spark-conf
+type: Opaque
+stringData:
+ spark-defaults.conf: |
+ spark.hadoop.fs.s3a.access.key {{AWS_ACCESS_KEY_ID}}
+ spark.hadoop.fs.s3a.secret.key {{AWS_SECRET_ACCESS_KEY}}
+ spark.hadoop.fs.s3a.session.token {{AWS_SESSION_TOKEN}}
+ spark.hadoop.fs.s3a.impl org.apache.hadoop.fs.s3a.S3AFileSystem
+ # uncomment the following line to enable Temporary AWS credentials support
+ # spark.hadoop.fs.s3a.aws.credentials.provider org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider
+ spark.history.fs.update.interval 2s
diff --git a/tests/test-spark-operator-installation/00-install-operator.yaml b/tests/test-spark-operator-installation/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-spark-operator-installation/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-volume-mounts/00-install-operator.yaml b/tests/test-volume-mounts/00-install-operator.yaml
new file mode 120000
index 00000000..3636bf82
--- /dev/null
+++ b/tests/test-volume-mounts/00-install-operator.yaml
@@ -0,0 +1 @@
+../manifests/install-operator.yaml
\ No newline at end of file
diff --git a/tests/test-volume-mounts/01-assert.yaml b/tests/test-volume-mounts/01-assert.yaml
new file mode 100644
index 00000000..a5eea214
--- /dev/null
+++ b/tests/test-volume-mounts/01-assert.yaml
@@ -0,0 +1,14 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestAssert
+timeout: 120
+collectors:
+- selector: spark-role=driver
+- selector: spark-role=executor
+- selector: app.kubernetes.io/instance=spark
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: mock-task-runner-driver
+status:
+ phase: Running
diff --git a/tests/test-volume-mounts/01-submit-spark-app.yaml b/tests/test-volume-mounts/01-submit-spark-app.yaml
new file mode 100644
index 00000000..ba9d1f82
--- /dev/null
+++ b/tests/test-volume-mounts/01-submit-spark-app.yaml
@@ -0,0 +1,49 @@
+apiVersion: "sparkoperator.k8s.io/v1beta2"
+kind: SparkApplication
+metadata:
+ name: mock-task-runner
+spec:
+ type: Scala
+ mode: cluster
+ image: mesosphere/spark-dev:e0f9eb2dcc71b2de6d3e0ce8a0f26c059430b946
+ imagePullPolicy: Always
+ mainClass: MockTaskRunner
+ mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar"
+ arguments:
+ - "1"
+ - "15"
+ sparkConf:
+ "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s"
+ "spark.scheduler.minRegisteredResourcesRatio": "1.0"
+ "spark.kubernetes.submission.connectionTimeout": "60000"
+ "spark.kubernetes.submission.requestTimeout": "60000"
+ "spark.local.dir": "/opt/spark/work-dir/tmp"
+ sparkVersion: 3.0.0
+ restartPolicy:
+ type: Never
+ volumes:
+ - name: test-volume
+ hostPath:
+ path: /data
+ type: DirectoryOrCreate
+ driver:
+ cores: 1
+ memory: "512m"
+ labels:
+ version: 3.0.0
+ metrics-exposed: "true"
+ serviceAccount: spark-spark-service-account
+ volumeMounts:
+ - name: test-volume
+ mountPath: /opt/spark/work-dir
+ executor:
+ cores: 1
+ instances: 1
+ memory: "512m"
+ deleteOnTermination: false
+ labels:
+ version: 3.0.0
+ metrics-exposed: "true"
+ volumeMounts:
+ - name: test-volume
+ mountPath: /opt/spark/work-dir
diff --git a/tests/test-volume-mounts/02-assert.yaml b/tests/test-volume-mounts/02-assert.yaml
new file mode 120000
index 00000000..cdc81b22
--- /dev/null
+++ b/tests/test-volume-mounts/02-assert.yaml
@@ -0,0 +1 @@
+../manifests/assert-mock-task-runner.yaml
\ No newline at end of file
diff --git a/tests/test-volume-mounts/02-run-command.yaml b/tests/test-volume-mounts/02-run-command.yaml
new file mode 100644
index 00000000..c5147076
--- /dev/null
+++ b/tests/test-volume-mounts/02-run-command.yaml
@@ -0,0 +1,6 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- command: kubectl exec mock-task-runner-driver --namespace=$NAMESPACE --
+ sh -c "touch /opt/spark/work-dir/tmp/testfile; ls -l /opt/spark/work-dir/tmp/"
+# assert that folder exists, and it is writable
diff --git a/tests/utils/aws_s3.go b/tests/utils/aws_s3.go
deleted file mode 100644
index 65ea227b..00000000
--- a/tests/utils/aws_s3.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package utils
-
-import (
- "fmt"
- "os"
- "strings"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/s3"
-
- log "github.com/sirupsen/logrus"
-)
-
-const AwsAccessKeyId = "AWS_ACCESS_KEY_ID"
-const AwsSecretAccessKey = "AWS_SECRET_ACCESS_KEY"
-const AwsSessionToken = "AWS_SESSION_TOKEN"
-const AwsBucketName = "AWS_BUCKET_NAME"
-const AwsBucketPath = "AWS_BUCKET_PATH"
-
-// this method looks up for the following AWS environment variables:
-//
-// - AWS_ACCESS_KEY_ID (required)
-// - AWS_SECRET_ACCESS_KEY (required)
-// - AWS_SESSION_TOKEN (optional)
-//
-// returns a map with env variable as a key with a value converted to a []byte.
-// the error is returned if one of the required variables is not set.
-func GetAwsCredentials() (map[string][]byte, error) {
- awsAccessKeyId, err := checkEnvVar(AwsAccessKeyId)
- if err != nil {
- return nil, err
- }
-
- awsSecretAccessKey, err := checkEnvVar(AwsSecretAccessKey)
- if err != nil {
- return nil, err
- }
-
- awsEnvVars := map[string][]byte{
- AwsAccessKeyId: []byte(awsAccessKeyId),
- AwsSecretAccessKey: []byte(awsSecretAccessKey),
- }
-
- // support for Temporary Security Credentials
- if awsSessionToken, isPresent := os.LookupEnv(AwsSessionToken); isPresent && len(awsSessionToken) > 0 {
- awsEnvVars[AwsSessionToken] = []byte(awsSessionToken)
- }
- return awsEnvVars, nil
-}
-
-// method returns the name of S3 bucket from AWS_BUCKET_NAME env variable
-func GetS3BucketName() (string, error) {
- return checkEnvVar(AwsBucketName)
-}
-
-// method returns the S3 bucket path from AWS_BUCKET_PATH env variable
-func GetS3BucketPath() (string, error) {
- return checkEnvVar(AwsBucketPath)
-}
-
-// method checks env variable and returns its value
-// returns error if variable is not set or empty
-func checkEnvVar(varName string) (string, error) {
- value, isPresent := os.LookupEnv(varName)
- if !isPresent || len(value) == 0 {
- return "", fmt.Errorf("%s env variable is not set", varName)
- }
- return value, nil
-}
-
-// AwsS3CreateFolder will create a object in bucketName with folderPath as Key
-func AwsS3CreateFolder(bucketName string, folderPath string) error {
- file := strings.NewReader("")
- folderPath = strings.Trim(folderPath, "/")
- filePath := fmt.Sprintf("/%s/.tmp", folderPath)
-
- sess, err := session.NewSession(&aws.Config{
- Region: aws.String("us-west-2"),
- })
-
- if err != nil {
- return err
- }
-
- // Create S3 Service Client
- svcClient := s3.New(sess)
-
- // Put a tmp file to S3 bucket
- _, err = svcClient.PutObject(&s3.PutObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(filePath),
- Body: file,
- })
- if err != nil {
- log.Errorf("Unable to put Key '%s' in bucket '%s', %v", filePath, bucketName, err)
- return err
- }
- log.Infof("Successfully created folder '%s' in bucket '%s'", folderPath, bucketName)
- return nil
-}
-
-// AwsS3DeleteFolder deletes all objects at folderPath in bucketName
-func AwsS3DeleteFolder(bucketName string, folderPath string) error {
- folderPath = strings.Trim(folderPath, "/")
- folderPath = folderPath + "/"
-
- sess, err := session.NewSession(&aws.Config{
- Region: aws.String("us-west-2"),
- })
-
- if err != nil {
- return err
- }
-
- // Create S3 Service Client
- svcClient := s3.New(sess)
-
- // Fetch all the items present in the given folder
- response, err := svcClient.ListObjectsV2(&s3.ListObjectsV2Input{
- Bucket: aws.String(bucketName),
- Prefix: aws.String(folderPath),
- })
- if err != nil {
- log.Errorf("Unable to list items in bucket '%s' at key '%s', %v", bucketName, folderPath, err)
- return err
- }
-
- for _, item := range response.Contents {
- // Delete an item
- itemKey := "/" + *item.Key
- _, err = svcClient.DeleteObject(&s3.DeleteObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(itemKey),
- })
-
- if err != nil {
- log.Errorf("Unable to delete key '%s' in bucket '%s', %v", itemKey, bucketName, err)
- return err
- }
-
- err = svcClient.WaitUntilObjectNotExists(&s3.HeadObjectInput{
- Bucket: aws.String(bucketName),
- Key: aws.String(itemKey),
- })
-
- if err != nil {
- log.Errorf("Error occurred while waiting for object '%s' to be deleted, %v", itemKey, err)
- return err
- }
-
- log.Infof("Object '%s' successfully deleted\n", itemKey)
- }
- return nil
-}
diff --git a/tests/utils/common.go b/tests/utils/common.go
deleted file mode 100644
index 50b27b32..00000000
--- a/tests/utils/common.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package utils
-
-import (
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "strings"
- "time"
-
- log "github.com/sirupsen/logrus"
-)
-
-const DefaultNamespace = "kudo-spark-operator-testing"
-const OperatorName = "spark"
-const DefaultServiceAccountSuffix = "-spark-service-account"
-const DefaultAwsSecretName = "aws-credentials"
-const rootDirName = "tests"
-const cmdLogFormat = "> %s %v\n%s"
-const DefaultRetryInterval = 5 * time.Second
-const DefaultRetryTimeout = 5 * time.Minute
-
-var OperatorImage = GetenvOr("OPERATOR_IMAGE", "mesosphere/kudo-spark-operator:2.4.5-1.0.1")
-var SparkImage = GetenvOr("SPARK_IMAGE", "mesosphere/spark:spark-2.4.5-hadoop-2.9-k8s")
-var SparkVersion = GetenvOr("SPARK_VERSION", "3.0.0")
-var TestDir = GetenvOr("TEST_DIR", goUpToRootDir())
-var KubeConfig = GetenvOr("KUBECONFIG", filepath.Join(os.Getenv("HOME"), ".kube", "config"))
-
-func init() {
- log.SetFormatter(&log.TextFormatter{
- ForceColors: true,
- FullTimestamp: true,
- })
-
- log.Info(" -- Test run parameters --")
- log.Infof("Operator image:\t\t\t%s", OperatorImage)
- log.Infof("Spark image:\t\t\t%s", SparkImage)
- log.Infof("Spark version:\t\t\t%s", SparkVersion)
- log.Infof("Test directory:\t\t\t%s", TestDir)
- log.Infof("k8s config path:\t\t\t%s", KubeConfig)
-}
-
-func GetenvOr(key string, defaultValue string) string {
- val := os.Getenv(key)
- if len(val) == 0 {
- val = defaultValue
- }
- return val
-}
-
-func goUpToRootDir() string {
- workDir, _ := os.Getwd()
- for path.Base(workDir) != rootDirName {
- workDir = path.Dir(workDir)
- if workDir == "/" {
- panic("Can't find root test directory")
- }
- }
- return workDir
-}
-
-func Retry(fn func() error) error {
- return RetryWithTimeout(DefaultRetryTimeout, DefaultRetryInterval, fn)
-}
-
-func RetryWithTimeout(timeout time.Duration, interval time.Duration, fn func() error) error {
- timeoutPoint := time.Now().Add(timeout)
- var err error
-
- for err = fn(); err != nil && timeoutPoint.After(time.Now()); {
- log.Warn(err.Error())
- time.Sleep(interval)
- log.Warnf("Retrying... Timeout in %d seconds", int(timeoutPoint.Sub(time.Now()).Seconds()))
- err = fn()
- }
- return err
-}
-
-func RunAndLogCommandOutput(cmd *exec.Cmd) (string, error) {
- out, err := cmd.CombinedOutput()
-
- if err == nil {
- log.Infof(cmdLogFormat, cmd.Path, cmd.Args, out)
- } else {
- log.Errorf(cmdLogFormat, cmd.Path, cmd.Args, out)
- }
- return strings.TrimSpace(string(out)), err
-}
diff --git a/tests/utils/job.go b/tests/utils/job.go
deleted file mode 100644
index 2537c94b..00000000
--- a/tests/utils/job.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package utils
-
-import (
- "errors"
- "fmt"
- "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
- log "github.com/sirupsen/logrus"
- "io/ioutil"
- v12 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1"
- "os"
-)
-
-type SparkJob struct {
- Name string
- Namespace string
- Image string
- SparkVersion string
- Template string
- ServiceAccount string
- Params map[string]interface{}
- Drivers int
- ExecutorsCount int
-}
-
-func (spark *SparkOperatorInstallation) SubmitJob(job *SparkJob) error {
-
- // Set default values
- if job.Namespace == "" {
- job.Namespace = spark.Namespace
- }
- if job.Image == "" {
- job.Image = SparkImage
- }
- if job.SparkVersion == "" {
- job.SparkVersion = SparkVersion
- }
- if job.ServiceAccount == "" {
- job.ServiceAccount = spark.InstanceName + DefaultServiceAccountSuffix
- }
- if job.ExecutorsCount == 0 {
- job.ExecutorsCount = 1
- }
-
- yamlFile := createSparkJob(*job)
- defer os.Remove(yamlFile)
-
- content, err := ioutil.ReadFile(yamlFile)
- if err != nil {
- log.Fatal(err)
- }
-
- log.Infof("Submitting the job:\n" + string(content))
- err = KubectlApply(job.Namespace, yamlFile)
-
- return err
-}
-
-func (spark *SparkOperatorInstallation) DriverPod(job SparkJob) (v12.Pod, error) {
- pod, err := spark.K8sClients.CoreV1().Pods(job.Namespace).Get(DriverPodName(job.Name), v1.GetOptions{})
- return *pod, err
-}
-
-func (spark *SparkOperatorInstallation) ExecutorPods(job SparkJob) ([]v12.Pod, error) {
- pods, err := spark.K8sClients.CoreV1().Pods(job.Namespace).List(v1.ListOptions{
- LabelSelector: fmt.Sprintf("spark-role=executor,sparkoperator.k8s.io/app-name=%s", job.Name),
- })
-
- if err != nil {
- return nil, err
- }
-
- return pods.Items, nil
-}
-
-func (spark *SparkOperatorInstallation) DriverLog(job SparkJob) (string, error) {
- driverPodName := DriverPodName(job.Name)
- return getPodLog(spark.K8sClients, job.Namespace, driverPodName, 0)
-}
-
-func (spark *SparkOperatorInstallation) DriverLogContains(job SparkJob, text string) (bool, error) {
- driverPodName := DriverPodName(job.Name)
- return podLogContains(spark.K8sClients, job.Namespace, driverPodName, text)
-}
-
-func (spark *SparkOperatorInstallation) SubmitAndWaitForExecutors(job *SparkJob) error {
- // Submit the job and wait for it to start
- err := spark.SubmitJob(job)
- if err != nil {
- return err
- }
-
- err = spark.WaitForJobState(*job, v1beta2.RunningState)
- if err != nil {
- return err
- }
-
- // Wait for correct number of executors to show up
- err = Retry(func() error {
- executors, err := spark.GetExecutorState(*job)
- if err != nil {
- return err
- } else if len(executors) != job.ExecutorsCount {
- return errors.New(fmt.Sprintf("The number of executors is %d, but %d is expected", len(executors), job.ExecutorsCount))
- }
- return nil
- })
- return err
-}
-
-func (spark *SparkOperatorInstallation) WaitForOutput(job SparkJob, text string) error {
- log.Infof("Waiting for the following text to appear in the driver log: %s", text)
- err := Retry(func() error {
- if contains, err := spark.DriverLogContains(job, text); !contains {
- if err != nil {
- return err
- } else {
- return errors.New("The driver log doesn't contain the text")
- }
- } else {
- log.Info("The text was found!")
- return nil
- }
- })
-
- if err != nil {
- log.Errorf("The text '%s' haven't appeared in the log in %s", text, DefaultRetryTimeout.String())
- logPodLogTail(spark.K8sClients, job.Namespace, DriverPodName(job.Name), 0) // 0 - print logs since pod's creation
- }
- return err
-}
-
-func (spark *SparkOperatorInstallation) WaitUntilSucceeded(job SparkJob) error {
- driverPodName := DriverPodName(job.Name)
- err := waitForPodStatusPhase(spark.K8sClients, driverPodName, job.Namespace, "Succeeded")
- if err != nil {
- logPodLogTail(spark.K8sClients, job.Namespace, DriverPodName(job.Name), 0)
- }
- return err
-}
-
-func DriverPodName(jobName string) string {
- return jobName + "-driver"
-}
diff --git a/tests/utils/k8s.go b/tests/utils/k8s.go
deleted file mode 100644
index 48070ad5..00000000
--- a/tests/utils/k8s.go
+++ /dev/null
@@ -1,278 +0,0 @@
-package utils
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "os/exec"
- "strings"
-
- log "github.com/sirupsen/logrus"
- v1 "k8s.io/api/core/v1"
- apiErrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/tools/clientcmd"
-)
-
-/* client-go util methods */
-
-func GetK8sClientSet() (*kubernetes.Clientset, error) {
- config, err := clientcmd.BuildConfigFromFlags("", KubeConfig)
- if err != nil {
- log.Errorf("Can't build config [kubeconfig = %s]: %s", KubeConfig, err)
- return nil, err
- }
-
- return kubernetes.NewForConfig(config)
-}
-
-func CreateNamespace(clientSet *kubernetes.Clientset, name string) (*v1.Namespace, error) {
- log.Infof("Creating namespace %s", name)
- namespace := v1.Namespace{
- ObjectMeta: metav1.ObjectMeta{
- Name: name,
- },
- }
-
- return clientSet.CoreV1().Namespaces().Create(&namespace)
-}
-
-func DropNamespace(clientSet *kubernetes.Clientset, name string) error {
- log.Infof("Deleting namespace %s", name)
- gracePeriod := int64(0)
- propagationPolicy := metav1.DeletePropagationForeground
- options := metav1.DeleteOptions{
- GracePeriodSeconds: &gracePeriod,
- PropagationPolicy: &propagationPolicy,
- }
-
- err := clientSet.CoreV1().Namespaces().Delete(name, &options)
- if err != nil {
- log.Warnf("Can't delete namespace '%s':%s", name, err)
- return err
- }
-
- return Retry(func() error {
- _, err := clientSet.CoreV1().Namespaces().Get(name, metav1.GetOptions{})
- if err == nil {
- return errors.New(fmt.Sprintf("Namespace '%s' still exists", name))
- } else if statusErr, ok := err.(*apiErrors.StatusError); !ok || statusErr.Status().Reason != metav1.StatusReasonNotFound {
- return err
- } else {
- log.Info(fmt.Sprintf("Namespace '%s' successfully deleted", name))
- return nil
- }
- })
-}
-
-func CreateServiceAccount(clientSet *kubernetes.Clientset, name string, namespace string) error {
- log.Infof("Creating a service account %s/%s", namespace, name)
- sa := v1.ServiceAccount{
- ObjectMeta: metav1.ObjectMeta{
- Name: name,
- },
- }
- _, err := clientSet.CoreV1().ServiceAccounts(namespace).Create(&sa)
- return err
-}
-
-// Creates a secret with unencoded (plain) string data
-func CreateSecretPlain(clientSet *kubernetes.Clientset, name string, namespace string, secretData map[string]string) error {
- log.Infof("Creating a secret %s/%s", namespace, name)
- secret := v1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: name,
- },
- StringData: secretData,
- }
-
- _, err := clientSet.CoreV1().Secrets(namespace).Create(&secret)
- return err
-}
-
-// Creates a secret used to store arbitrary data, encoded using base64
-func CreateSecretEncoded(clientSet *kubernetes.Clientset, name string, namespace string, secretData map[string][]byte) error {
- log.Infof("Creating a secret %s/%s", namespace, name)
- secret := v1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: name,
- },
- Data: secretData,
- }
-
- _, err := clientSet.CoreV1().Secrets(namespace).Create(&secret)
- return err
-}
-
-func getPodLog(clientSet *kubernetes.Clientset, namespace string, pod string, tailLines int64) (string, error) {
- opts := v1.PodLogOptions{}
- if tailLines > 0 {
- opts.TailLines = &tailLines
- }
- req := clientSet.CoreV1().Pods(namespace).GetLogs(pod, &opts)
-
- logSteam, err := req.Stream()
- if err != nil {
- return "", err
- }
- defer logSteam.Close()
-
- logBuffer := new(bytes.Buffer)
- _, err = io.Copy(logBuffer, logSteam)
- if err != nil {
- return "", err
- }
-
- return logBuffer.String(), nil
-}
-
-func podLogContains(clientSet *kubernetes.Clientset, namespace string, pod string, text string) (bool, error) {
- opts := v1.PodLogOptions{}
- req := clientSet.CoreV1().Pods(namespace).GetLogs(pod, &opts)
-
- logSteam, err := req.Stream()
- if err != nil {
- return false, err
- }
- defer logSteam.Close()
-
- scanner := bufio.NewScanner(logSteam)
- for scanner.Scan() {
- if strings.Contains(scanner.Text(), text) {
- return true, nil
- }
- }
-
- if err = scanner.Err(); err != nil {
- return false, err
- } else {
- return false, nil
- }
-}
-
-func PodLogContains(namespace string, pod string, text string) (bool, error) {
- podLog, err := Kubectl("logs", pod, "--namespace", namespace)
- if err != nil {
- return false, nil
- }
- return strings.Contains(podLog, text), nil
-}
-
-func logPodLogTail(clientSet *kubernetes.Clientset, namespace string, pod string, lines int64) error {
- logTail, err := getPodLog(clientSet, namespace, pod, lines)
- if err == nil {
- log.Infof("pod logs:\n%s", logTail)
- }
- return err
-}
-
-func waitForPodStatusPhase(clientSet *kubernetes.Clientset, podName string, namespace string, status string) error {
- log.Infof("Waiting for pod %s to enter phase %s", podName, status)
-
- return Retry(func() error {
- pod, err := clientSet.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
- if err == nil && string(pod.Status.Phase) != status {
- err = errors.New("Expected pod status to be " + status + ", but it's " + string(pod.Status.Phase))
- } else if string(pod.Status.Phase) == status {
- log.Infof("\"%s\" completed successfully.", podName)
- }
- return err
- })
-}
-
-func IsEnvVarPresentInPod(envVar v1.EnvVar, pod v1.Pod) bool {
- for _, e := range pod.Spec.Containers[0].Env {
- if e.Name == envVar.Name && e.Value == envVar.Value {
- log.Infof("Found %s=%s environment variable in first container of pod %s/%s", e.Name, e.Value, pod.Namespace, pod.Name)
- return true
- }
- }
- return false
-}
-
-/* ConfigMap */
-
-func CreateConfigMap(clientSet *kubernetes.Clientset, name string, namespace string) error {
- log.Infof("Creating ConfigMap %s/%s", namespace, name)
- _, err := clientSet.CoreV1().ConfigMaps(namespace).Create(&v1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{Name: name},
- })
- return err
-}
-
-func AddFileToConfigMap(clientSet *kubernetes.Clientset, configMapName string, namespace string, key string, filepath string) error {
- log.Infof("Adding %s to the ConfigMap %s/%s under key %s", filepath, namespace, configMapName, key)
- configMap, err := clientSet.CoreV1().ConfigMaps(namespace).Get(configMapName, metav1.GetOptions{})
- if err != nil {
- return err
- }
-
- fileContent, err := ioutil.ReadFile(filepath)
- if err != nil {
- return err
- }
-
- if configMap.Data == nil {
- configMap.Data = make(map[string]string)
- }
-
- configMap.Data[key] = string(fileContent)
- configMap, err = clientSet.CoreV1().ConfigMaps(namespace).Update(configMap)
-
- return err
-}
-
-func DeleteConfigName(clientSet *kubernetes.Clientset, name string, namespace string) error {
- log.Infof("Deleting ConfigMap %s/%s", namespace, name)
- gracePeriod := int64(0)
- propagationPolicy := metav1.DeletePropagationForeground
- options := metav1.DeleteOptions{
- GracePeriodSeconds: &gracePeriod,
- PropagationPolicy: &propagationPolicy,
- }
- return clientSet.CoreV1().ConfigMaps(namespace).Delete(name, &options)
-}
-
-/* kubectl helpers */
-
-func Kubectl(args ...string) (string, error) {
- cmd := exec.Command("kubectl", args...)
- return RunAndLogCommandOutput(cmd)
-
-}
-
-func DeleteResource(namespace string, resource string, name string) error {
- _, err := Kubectl("delete", resource, name, "--namespace", namespace, "--ignore-not-found=true")
- return err
-}
-
-func KubectlApply(namespace string, filename string) error {
- log.Infof("Applying file %s with kubectl", filename)
- return kubectlRunFile("apply", namespace, filename)
-}
-
-func KubectlApplyTemplate(namespace string, template string, params map[string]interface{}) error {
- file, err := populateYamlTemplate(template, params)
- defer os.Remove(file)
- if err != nil {
- return err
- }
-
- return KubectlApply(namespace, file)
-}
-
-func KubectlDelete(namespace string, filename string) error {
- log.Infof("Deleting objects from file %s with kubectl", filename)
- return kubectlRunFile("delete", namespace, filename)
-}
-
-func kubectlRunFile(method string, namespace string, filename string) error {
- kubectl := exec.Command("kubectl", method, "--namespace", namespace, "-f", filename)
- _, err := RunAndLogCommandOutput(kubectl)
- return err
-}
diff --git a/tests/utils/kudo.go b/tests/utils/kudo.go
deleted file mode 100644
index d73a79d2..00000000
--- a/tests/utils/kudo.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package utils
-
-import (
- "fmt"
- log "github.com/sirupsen/logrus"
- "k8s.io/apimachinery/pkg/api/errors"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "os/exec"
-)
-
-const KudoNamespace = "kudo-system"
-const KudoService = "kudo-controller-manager-service"
-const KudoCmd = "kubectl-kudo"
-
-func InstallKudo() error {
- installed := isKudoInstalled()
-
- log.Infof("Kudo status: %v", installed)
-
- var args []string
- if installed {
- args = []string{"init", "--client-only"}
- } else {
- args = []string{"init"}
- }
- kudoInit := exec.Command(KudoCmd, args...)
- _, err := RunAndLogCommandOutput(kudoInit)
-
- return err
-}
-
-func UninstallKudo() error {
- // To be implemented
- return nil
-}
-
-func installKudoPackage(namespace string, operatorDir string, instance string, params map[string]string) error {
- var args []string
- args = append(args, "--namespace", namespace)
- args = append(args, "install", operatorDir)
- args = append(args, "--instance", instance)
- for k, v := range params {
- args = append(args, "-p", fmt.Sprintf(`%s=%s`, k, v))
- }
-
- cmd := exec.Command(KudoCmd, args...)
- _, err := RunAndLogCommandOutput(cmd)
-
- return err
-}
-
-func unistallKudoPackage(namespace string, instance string) error {
- var args = []string{"uninstall"}
- args = append(args, "--instance", instance)
- args = append(args, "--namespace", namespace)
-
- cmd := exec.Command(KudoCmd, args...)
- _, err := RunAndLogCommandOutput(cmd)
-
- return err
-}
-
-func isKudoInstalled() bool {
- log.Info("Checking if KUDO is installed")
- clients, err := GetK8sClientSet()
- if err != nil {
- panic(err)
- }
-
- _, err = clients.CoreV1().Services(KudoNamespace).Get(KudoService, v1.GetOptions{})
- if err != nil {
- if se, ok := err.(*errors.StatusError); ok && se.ErrStatus.Reason == v1.StatusReasonNotFound {
- return false
- } else {
- panic(err)
- }
- }
-
- return true
-}
diff --git a/tests/utils/spark_operator.go b/tests/utils/spark_operator.go
deleted file mode 100644
index df1356b2..00000000
--- a/tests/utils/spark_operator.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package utils
-
-import (
- "errors"
- "fmt"
- "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
- operator "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned"
- petname "github.com/dustinkirkland/golang-petname"
- log "github.com/sirupsen/logrus"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/tools/clientcmd"
- "strings"
-)
-
-const operatorDir = "../operators/repository/spark/operator"
-
-type SparkOperatorInstallation struct {
- Namespace string
- InstanceName string
- SkipNamespaceCleanUp bool
- K8sClients *kubernetes.Clientset
- SparkClients *operator.Clientset
- Params map[string]string
-}
-
-func (spark *SparkOperatorInstallation) InstallSparkOperator() error {
- if !isKudoInstalled() {
- return errors.New("can't install Spark operator without KUDO")
- }
-
- clientSet, err := GetK8sClientSet()
- if err != nil {
- log.Fatal(err)
- panic(err)
- }
- spark.K8sClients = clientSet
-
- sparkClientSet, err := getSparkOperatorClientSet()
- if err != nil {
- log.Fatal(err)
- panic(err)
- }
- spark.SparkClients = sparkClientSet
-
- // Set default namespace and instance name not specified
- if spark.Namespace == "" {
- spark.Namespace = DefaultNamespace
- }
- if spark.InstanceName == "" {
- spark.InstanceName = GenerateInstanceName()
- }
-
- if !spark.SkipNamespaceCleanUp {
- spark.CleanUp()
-
- _, err = CreateNamespace(spark.K8sClients, spark.Namespace)
- if err != nil {
- return err
- }
- }
-
- log.Infof("Installing KUDO spark operator in %s", spark.Namespace)
-
- // Handle parameters
- if spark.Params == nil {
- spark.Params = make(map[string]string)
- }
- if strings.Contains(OperatorImage, ":") {
- // handle the case, when using local docker registry (e.g. registry:5000/kudo-spark-operator:2.4.5-1.0.0)
- tagIndex := strings.LastIndex(OperatorImage, ":")
- spark.Params["operatorImageName"] = OperatorImage[0:tagIndex]
- spark.Params["operatorVersion"] = OperatorImage[tagIndex+1:]
- } else {
- spark.Params["operatorImageName"] = OperatorImage
- }
-
- err = installKudoPackage(spark.Namespace, operatorDir, spark.InstanceName, spark.Params)
- if err != nil {
- return err
- }
-
- return spark.waitForInstanceStatus("COMPLETE")
-}
-
-func (spark *SparkOperatorInstallation) CleanUp() {
- // So far multiple Spark operator instances in one namespace is not a supported configuration, so whole namespace will be cleaned
- log.Infof("Uninstalling ALL kudo spark operator instances and versions from %s", spark.Namespace)
- instances, _ := getInstanceNames(spark.Namespace)
-
- if instances != nil {
- for _, instance := range instances {
- unistallKudoPackage(spark.Namespace, instance)
- }
- }
- DropNamespace(spark.K8sClients, spark.Namespace)
-}
-
-func getSparkOperatorClientSet() (*operator.Clientset, error) {
- config, err := clientcmd.BuildConfigFromFlags("", KubeConfig)
- if err != nil {
- panic(err.Error())
- }
-
- return operator.NewForConfig(config)
-}
-
-func (spark *SparkOperatorInstallation) waitForInstanceStatus(targetStatus string) error {
- log.Infof("Waiting for %s/%s to reach status %s", spark.Namespace, spark.InstanceName, targetStatus)
- return Retry(func() error {
- status, err := spark.getInstanceStatus()
- if err == nil && status != targetStatus {
- err = errors.New(fmt.Sprintf("%s status is %s, but waiting for %s", spark.InstanceName, status, targetStatus))
- }
- return err
- })
-}
-
-func (spark *SparkOperatorInstallation) getInstanceStatus() (string, error) {
- status, err := Kubectl("get", "instances.kudo.dev", spark.InstanceName, "--namespace", spark.Namespace, `-o=jsonpath={.status.planStatus.deploy.status}`)
- status = strings.Trim(status, `'`)
-
- return status, err
-}
-
-func (spark *SparkOperatorInstallation) WaitForJobState(job SparkJob, state v1beta2.ApplicationStateType) error {
- log.Infof("Waiting for SparkApplication \"%s\" to reach \"%s\" state", job.Name, state)
- err := Retry(func() error {
- app, err := spark.SparkClients.SparkoperatorV1beta2().SparkApplications(spark.Namespace).Get(job.Name, v1.GetOptions{})
- if err != nil {
- return err
- } else if app.Status.AppState.State != state {
- return errors.New(fmt.Sprintf("SparkApplication \"%s\" state is %s", job.Name, app.Status.AppState.State))
- }
- return nil
- })
-
- if err == nil {
- log.Infof("SparkApplication \"%s\" is now \"%s\"", job.Name, state)
- }
-
- return err
-}
-
-func (spark *SparkOperatorInstallation) GetExecutorState(job SparkJob) (map[string]v1beta2.ExecutorState, error) {
- log.Infof("Getting %s executors status", job.Name)
- app, err := spark.SparkClients.SparkoperatorV1beta2().SparkApplications(spark.Namespace).Get(job.Name, v1.GetOptions{})
- if err != nil {
- return nil, err
- } else {
- for k, v := range app.Status.ExecutorState {
- log.Infof("%s is %s", k, v)
- }
- return app.Status.ExecutorState, err
- }
-}
-
-func (spark *SparkOperatorInstallation) DeleteJob(job SparkJob) {
- log.Infof("Deleting job %s", job.Name)
- gracePeriod := int64(0)
- propagationPolicy := v1.DeletePropagationForeground
- options := v1.DeleteOptions{
- GracePeriodSeconds: &gracePeriod,
- PropagationPolicy: &propagationPolicy,
- }
- spark.SparkClients.SparkoperatorV1beta2().SparkApplications(spark.Namespace).Delete(job.Name, &options)
-}
-
-func getInstanceNames(namespace string) ([]string, error) {
- jsonpathExpr := `-o=jsonpath={range .items[?(@.metadata.labels.kudo\.dev/operator=="spark")]}{.metadata.name}{"\n"}`
- out, err := Kubectl("get", "instances.kudo.dev", "--namespace", namespace, jsonpathExpr)
-
- if err != nil {
- return nil, err
- }
-
- if len(out) > 0 {
- names := strings.Split(out, "\n")
- return names, nil
- } else {
- return nil, nil
- }
-}
-
-func (spark *SparkOperatorInstallation) GetOperatorPodName() (string, error) {
- return Kubectl("get", "pod",
- "--selector", "app.kubernetes.io/name=spark",
- "--namespace", spark.Namespace,
- "-o=jsonpath={.items[*].metadata.name}")
-}
-
-func GenerateInstanceName() string {
- return fmt.Sprintf("spark-%s", petname.Generate(2, "-"))
-}
diff --git a/tests/utils/templates.go b/tests/utils/templates.go
deleted file mode 100644
index 34ad685c..00000000
--- a/tests/utils/templates.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package utils
-
-import (
- "fmt"
- log "github.com/sirupsen/logrus"
- "io/ioutil"
- "path"
- "text/template"
-)
-
-var templates *template.Template
-
-func init() {
- var err error
- templates, err = template.ParseGlob(path.Join(TestDir, "templates/*.yaml"))
-
- if err != nil {
- log.Fatal("Can't parse templates")
- panic(err)
- }
-
- templatesLogDetails := "Parsed templates:"
- for _, t := range templates.Templates() {
- templatesLogDetails += fmt.Sprintf("\n- %s", t.Name())
- }
- log.Debug(templatesLogDetails)
-}
-
-func createSparkJob(job SparkJob) string {
- file, err := ioutil.TempFile("/tmp", "job-")
- if err != nil {
- log.Fatal(err)
- panic(err)
- }
-
- err = templates.ExecuteTemplate(file, job.Template, job)
- if err != nil {
- log.Fatal(err)
- panic(err)
- }
-
- return file.Name()
-}
-
-func populateYamlTemplate(name string, params map[string]interface{}) (string, error) {
- file, err := ioutil.TempFile("/tmp", "k8s-")
- if err != nil {
- log.Fatalf("Can't create a temporary file for template %s: %s", name, err)
- return "", err
- }
-
- err = templates.ExecuteTemplate(file, name, params)
- if err != nil {
- log.Fatalf("Can't populate a yaml template %s: %s", name, err)
- return "", err
- }
-
- return file.Name(), nil
-}
diff --git a/tests/utils/utils_test.go b/tests/utils/utils_test.go
deleted file mode 100644
index c51820f7..00000000
--- a/tests/utils/utils_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package utils
-
-import (
- log "github.com/sirupsen/logrus"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "os"
- "testing"
-)
-
-func TestClientGo(t *testing.T) {
- clientSet, err := GetK8sClientSet()
- if err != nil {
- t.Error(err.Error())
- }
-
- pods, err := clientSet.CoreV1().Pods("").List(v1.ListOptions{})
- if err != nil {
- t.Error(err.Error())
- }
-
- log.Infof("There are %d pods in the cluster\n", len(pods.Items))
-}
-
-func TestTemplating(t *testing.T) {
- tmpFilePath := createSparkJob(SparkJob{
- Name: "test",
- Namespace: "foo",
- Image: "bar",
- SparkVersion: "baz",
- Template: "spark-piyaml",
- })
- defer os.Remove(tmpFilePath)
-
- log.Infof("Created a temp file at %s", tmpFilePath)
-}
-
-func TestKudoInstall(t *testing.T) {
- err := InstallKudo()
- if err != nil {
- t.Error(err)
- }
-}
diff --git a/tests/volcano_test.go b/tests/volcano_test.go
deleted file mode 100644
index 5dfa4489..00000000
--- a/tests/volcano_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package tests
-
-import (
- "fmt"
- "github.com/mesosphere/kudo-spark-operator/tests/utils"
- "github.com/stretchr/testify/suite"
- "testing"
- "time"
-)
-
-const volcanoInstallerPath = "resources/volcano/volcano-0.2.yaml"
-const volcanoDeploymentWaitTimeout = 5 * time.Minute
-
-type VolcanoIntegrationTestSuite struct {
- operator utils.SparkOperatorInstallation
- suite.Suite
-}
-
-func TestVolcanoSuite(t *testing.T) {
- suite.Run(t, new(VolcanoIntegrationTestSuite))
-}
-
-func (suite *VolcanoIntegrationTestSuite) SetupSuite() {
- suite.operator = utils.SparkOperatorInstallation{
- Params: map[string]string{
- "enableBatchScheduler": "true",
- },
- }
-
- if err := suite.operator.InstallSparkOperator(); err != nil {
- suite.FailNow(err.Error())
- }
- // deploy volcano resources
- _, err := utils.Kubectl("apply", "-f", volcanoInstallerPath)
- suite.NoError(err)
-
- // wait until all deployments within a namespace are completed
- utils.Kubectl("wait", "deployment",
- "--all",
- "--for", "condition=available",
- "--namespace", "volcano-system",
- "--timeout", volcanoDeploymentWaitTimeout.String())
-}
-
-func (suite *VolcanoIntegrationTestSuite) TestAppRunOnVolcano() {
- jobName := "spark-pi"
- sparkPi := utils.SparkJob{
- Name: jobName,
- Template: fmt.Sprintf("%s.yaml", jobName),
- Params: map[string]interface{}{
- "BatchScheduler": "volcano",
- },
- }
- if err := suite.operator.SubmitJob(&sparkPi); err != nil {
- suite.FailNow(err.Error())
- }
-
- if err := suite.operator.WaitUntilSucceeded(sparkPi); err != nil {
- suite.FailNow(err.Error())
- }
-
- // assert that the driver pod was scheduled by volcano.
- //
- // the code below obtain the first pod's event (which is usually created by a scheduler,
- // that handles pod assignment to a node in a cluster) and extracts the name of the event producer.
- driverPodName := utils.DriverPodName(jobName)
- component, err := utils.Kubectl("get", "events",
- "--namespace", sparkPi.Namespace,
- "--field-selector", fmt.Sprint("involvedObject.name=", driverPodName),
- "-o", "jsonpath={.items[0].source.component}")
- // assertion verifies the event producer name is equal to 'volcano' instead of 'default-scheduler'.
- if suite.NoError(err) {
- suite.Equal("volcano", component)
- }
-
- // assert that the pod was successfully assigned to a node by checking the event message
- message, err := utils.Kubectl("get", "events",
- "--namespace", sparkPi.Namespace,
- "--field-selector", fmt.Sprint("involvedObject.name=", driverPodName),
- "-o", "jsonpath={.items[0].message}")
- if suite.NoError(err) {
- suite.Contains(message, fmt.Sprintf("Successfully assigned %s/%s", sparkPi.Namespace, driverPodName))
- }
-}
-
-func (suite *VolcanoIntegrationTestSuite) TearDownSuite() {
- suite.operator.CleanUp()
- // delete blocks until all resources are deleted
- utils.Kubectl("delete", "-f", volcanoInstallerPath)
-}