Skip to content

Commit

Permalink
Attempt at fixing AKS tests
Browse files Browse the repository at this point in the history
Signed-off-by: Parthvi Vala <[email protected]>
  • Loading branch information
valaparthvi committed Feb 7, 2025
1 parent e7c487f commit 2f1816d
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 5 deletions.
2 changes: 2 additions & 0 deletions hosted/aks/p1/p1_provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,7 @@ var _ = Describe("P1Provisioning", func() {
Eventually(func() bool {
cluster, err = ctx.RancherAdminClient.Management.Cluster.ByID(cluster.ID)
Expect(err).NotTo(HaveOccurred())
GinkgoLogr.Info(fmt.Sprintf("cluster.Transitioning=%s cluster.TransitioningMessage=%s", cluster.Transitioning, cluster.TransitioningMessage))
return cluster.Transitioning == "error" && cluster.TransitioningMessage == "at least one NodePool with mode System is required"
}, "1m", "2s").Should(BeTrue())
})
Expand Down Expand Up @@ -476,6 +477,7 @@ var _ = Describe("P1Provisioning", func() {
Eventually(func() bool {
cluster, err = ctx.RancherAdminClient.Management.Cluster.ByID(cluster.ID)
Expect(err).To(BeNil())
GinkgoLogr.Info(fmt.Sprintf("cluster.State=%s cluster.Transitioning=%s cluster.TransitioningMessage=%s", cluster.State, cluster.Transitioning, cluster.TransitioningMessage))
return cluster.State == "provisioning" && cluster.Transitioning == "error" && strings.Contains(cluster.TransitioningMessage, "an AKSClusterConfig exists with the same name")
}, "30s", "2s").Should(BeTrue())

Expand Down
14 changes: 9 additions & 5 deletions hosted/aks/p1/p1_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -294,9 +294,9 @@ func updateMonitoringCheck(cluster *management.Cluster, client *rancher.Client)
func updateSystemNodePoolCountToZeroCheck(cluster *management.Cluster, client *rancher.Client) {
updateFunc := func(cluster *management.Cluster) {
nodepools := *cluster.AKSConfig.NodePools
for i, nodepool := range nodepools {
if nodepool.Mode == "System" {
nodepools[i].Count = pointer.Int64(0)
for i := range nodepools {
if nodepools[i].Mode == "System" {
*nodepools[i].Count = 0
}
}
cluster.AKSConfig.NodePools = &nodepools
Expand All @@ -307,6 +307,7 @@ func updateSystemNodePoolCountToZeroCheck(cluster *management.Cluster, client *r
Eventually(func() bool {
cluster, err = client.Management.Cluster.ByID(cluster.ID)
Expect(err).NotTo(HaveOccurred())
GinkgoLogr.Info(fmt.Sprintf("cluster.Transitioning=%s cluster.TransitioningMessage=%s", cluster.Transitioning, cluster.TransitioningMessage))
return cluster.Transitioning == "error" && strings.Contains(cluster.TransitioningMessage, "It must be greater or equal to minCount:1 and less than or equal to maxCount:1000")
}, "1m", "2s").Should(BeTrue())
}
Expand Down Expand Up @@ -345,18 +346,21 @@ func updateSystemNodePoolCheck(cluster *management.Cluster, client *rancher.Clie
}
}

err = clusters.WaitClusterToBeUpgraded(client, cluster.ID)
Expect(err).To(BeNil())

Eventually(func() bool {
cluster, err = client.Management.Cluster.ByID(cluster.ID)
Expect(err).To(BeNil())
for _, np := range *cluster.AKSStatus.UpstreamSpec.NodePools {
if np.Mode == systemMode {
if !((np.EnableAutoScaling != nil && *np.EnableAutoScaling == true) && (*np.MaxCount == maxCount) && (*np.MinCount == minCount) && (*np.Count == count)) {
if !((np.EnableAutoScaling != nil && *np.EnableAutoScaling) && (*np.MaxCount == maxCount) && (*np.MinCount == minCount) && (*np.Count == count)) {
return false
}
}
}
return true
}, "15m", "15s").Should(BeTrue(), "Failed while upstream nodepool update")
}, "7m", "5s").Should(BeTrue(), "Failed while upstream nodepool update")
}

// Qase ID: 230 and 291
Expand Down

0 comments on commit 2f1816d

Please sign in to comment.