|
| 1 | +package e2e |
| 2 | + |
| 3 | +import ( |
| 4 | + "testing" |
| 5 | + "time" |
| 6 | + |
| 7 | + . "github.com/onsi/gomega" |
| 8 | + mcadv1beta1 "github.com/project-codeflare/multi-cluster-app-dispatcher/pkg/apis/controller/v1beta1" |
| 9 | + |
| 10 | + batchv1 "k8s.io/api/batch/v1" |
| 11 | + corev1 "k8s.io/api/core/v1" |
| 12 | + |
| 13 | + . "github.com/project-codeflare/codeflare-operator/test/support" |
| 14 | +) |
| 15 | + |
| 16 | +func TestInstascaleNodepool(t *testing.T) { |
| 17 | + |
| 18 | + test := With(t) |
| 19 | + test.T().Parallel() |
| 20 | + |
| 21 | + namespace := test.NewTestNamespace() |
| 22 | + |
| 23 | + // Test configuration |
| 24 | + config, err := TestConfig(test, namespace.Name) |
| 25 | + test.Expect(err).To(BeNil()) |
| 26 | + |
| 27 | + //create OCM connection |
| 28 | + connection, err := CreateConnection(test) |
| 29 | + test.Expect(err).To(BeNil()) |
| 30 | + |
| 31 | + defer connection.Close() |
| 32 | + |
| 33 | + // check existing cluster resources |
| 34 | + // look for node pool with aw name - expect not to find it |
| 35 | + foundNodePool, err := CheckNodePools(connection, TestName) |
| 36 | + test.Expect(err).NotTo(HaveOccurred()) |
| 37 | + test.Expect(foundNodePool).To(BeFalse()) |
| 38 | + |
| 39 | + // Setup batch job and AppWrapper |
| 40 | + job, aw, err := JobAppwrapperSetup(test, namespace, config) |
| 41 | + test.Expect(err).To(BeNil()) |
| 42 | + |
| 43 | + // time.Sleep is used twice throughout the test, each for 30 seconds. Can look into using sync package waitGroup instead if that makes more sense |
| 44 | + // wait for required resources to scale up before checking them again |
| 45 | + time.Sleep(TestTimeoutThirtySeconds) |
| 46 | + |
| 47 | + // look for node pool with aw name - expect to find it |
| 48 | + foundNodePool, err = CheckNodePools(connection, TestName) |
| 49 | + test.Expect(err).NotTo(HaveOccurred()) |
| 50 | + test.Expect(foundNodePool).To(BeTrue()) |
| 51 | + |
| 52 | + // Assert that the job has completed |
| 53 | + test.T().Logf("Waiting for Job %s/%s to complete", job.Namespace, job.Name) |
| 54 | + test.Eventually(Job(test, job.Namespace, job.Name), TestTimeoutLong).Should( |
| 55 | + Or( |
| 56 | + WithTransform(ConditionStatus(batchv1.JobComplete), Equal(corev1.ConditionTrue)), |
| 57 | + WithTransform(ConditionStatus(batchv1.JobFailed), Equal(corev1.ConditionTrue)), |
| 58 | + )) |
| 59 | + |
| 60 | + // Assert the job has completed successfully |
| 61 | + test.Expect(GetJob(test, job.Namespace, job.Name)). |
| 62 | + To(WithTransform(ConditionStatus(batchv1.JobComplete), Equal(corev1.ConditionTrue))) |
| 63 | + |
| 64 | + test.Eventually(AppWrapper(test, namespace, aw.Name), TestTimeoutShort). |
| 65 | + Should(WithTransform(AppWrapperState, Equal(mcadv1beta1.AppWrapperStateCompleted))) |
| 66 | + |
| 67 | + // allow time for the resources to scale down before checking them again |
| 68 | + time.Sleep(TestTimeoutMedium) |
| 69 | + |
| 70 | + |
| 71 | + // look for node pool with aw name - expect not to find it |
| 72 | + foundNodePool, err = CheckNodePools(connection, TestName) |
| 73 | + test.Expect(err).NotTo(HaveOccurred()) |
| 74 | + test.Expect(foundNodePool).To(BeFalse()) |
| 75 | +} |
0 commit comments