Skip to content

Commit e7c487f

Browse files
committed
Bump shepherd to the latest commit and Attempt#2 at fixing EKS Sync tests
Signed-off-by: Parthvi Vala <[email protected]>
1 parent 4b3f8c5 commit e7c487f

File tree

3 files changed

+12
-18
lines changed

3 files changed

+12
-18
lines changed

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ require (
1414
github.com/rancher-sandbox/ele-testhelpers v0.0.0-20241114104736-0d5b41ca9158
1515
github.com/rancher-sandbox/qase-ginkgo v1.0.1
1616
github.com/rancher/rancher v0.0.0-00010101000000-000000000000
17-
github.com/rancher/shepherd v0.0.0-20250128173158-b3e7a07abe9a // rancher/shepherd main commit
17+
github.com/rancher/shepherd v0.0.0-20250205140852-ba6d2793aaff // rancher/shepherd main commit
1818
github.com/sirupsen/logrus v1.9.3
1919
k8s.io/apimachinery v0.31.1
2020
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8

go.sum

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -260,8 +260,8 @@ github.com/rancher/rancher/pkg/apis v0.0.0-20241127174121-c051d99dcded h1:h2gsuj
260260
github.com/rancher/rancher/pkg/apis v0.0.0-20241127174121-c051d99dcded/go.mod h1:VJQNArsPMiuWlvlnfGbD5gZtGvttnsEJSbrLsHfBtuA=
261261
github.com/rancher/rke v1.7.0-rc.5 h1:kBRwXTW8CYPXvCcPLISiwGTCvJ8K/+b35D5ES0IcduM=
262262
github.com/rancher/rke v1.7.0-rc.5/go.mod h1:+x++Mvl0A3jIzNLiu8nkraqZXiHg6VPWv0Xl4iQCg+A=
263-
github.com/rancher/shepherd v0.0.0-20250128173158-b3e7a07abe9a h1:2M68MtzP2PwInsKQNhaRTZ0W5I84iJw1KIUBBc+mpRY=
264-
github.com/rancher/shepherd v0.0.0-20250128173158-b3e7a07abe9a/go.mod h1:relMIZBbmYQyZUgVWfomrpHKO0we3AmbrUD0EFYoXyc=
263+
github.com/rancher/shepherd v0.0.0-20250205140852-ba6d2793aaff h1:ifkYlyFJEZzpo8uY0PX0oE8/36ZkDWiAPpL/EU2x5Og=
264+
github.com/rancher/shepherd v0.0.0-20250205140852-ba6d2793aaff/go.mod h1:relMIZBbmYQyZUgVWfomrpHKO0we3AmbrUD0EFYoXyc=
265265
github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20240301001845-4eacc2dabbde h1:x5VZI/0TUx1MeZirh6e0OMAInhCmq6yRvD6897458Ng=
266266
github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20240301001845-4eacc2dabbde/go.mod h1:04o7UUy7ZFiMDEtHEjO1yS7IkO8TcsgjBl93Fcjq7Gg=
267267
github.com/rancher/wrangler v1.1.2 h1:oXbXo9k7y/H4drUpb4RM1c++vT9O3rpoNEfyusGykiU=

hosted/eks/k8s_chart_support/k8s_chart_support_suite_test.go

Lines changed: 9 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -94,23 +94,14 @@ func commonchecks(client *rancher.Client, cluster *management.Cluster) {
9494
By("downgrading the chart version", func() {
9595
helpers.DowngradeProviderChart(downgradedVersion)
9696
})
97+
9798
configNodeGroups := *cluster.EKSConfig.NodeGroups
9899
initialNodeCount := *configNodeGroups[0].DesiredSize
99-
var upgradeSuccessful bool
100100

101101
By("making a change(scaling nodegroup up) to the cluster to validate functionality after chart downgrade", func() {
102102
var err error
103-
cluster, err = helper.ScaleNodeGroup(cluster, client, initialNodeCount+increaseBy, false, true)
103+
cluster, err = helper.ScaleNodeGroup(cluster, client, initialNodeCount+increaseBy, true, true)
104104
Expect(err).To(BeNil())
105-
106-
// We do not use WaitClusterToBeUpgraded because it has been flaky here and times out
107-
Eventually(func() bool {
108-
configNodeGroups = *cluster.EKSConfig.NodeGroups
109-
for i := range configNodeGroups {
110-
upgradeSuccessful = *configNodeGroups[i].DesiredSize == initialNodeCount+increaseBy
111-
}
112-
return upgradeSuccessful
113-
}, tools.SetTimeout(15*time.Minute), 10*time.Second).Should(BeTrue())
114105
})
115106

116107
By("uninstalling the operator chart", func() {
@@ -134,11 +125,14 @@ func commonchecks(client *rancher.Client, cluster *management.Cluster) {
134125
Eventually(func() bool {
135126
GinkgoLogr.Info("Waiting for the node count change to appear in EKSStatus.UpstreamSpec ...")
136127
Expect(err).To(BeNil())
137-
upstreamNodeGroups := *cluster.EKSStatus.UpstreamSpec.NodeGroups
138-
for i := range upstreamNodeGroups {
139-
upgradeSuccessful = *upstreamNodeGroups[i].DesiredSize == initialNodeCount
128+
cluster, err = client.Management.Cluster.ByID(cluster.ID)
129+
Expect(err).To(BeNil())
130+
for _, ng := range *cluster.EKSStatus.UpstreamSpec.NodeGroups {
131+
if *ng.DesiredSize != initialNodeCount {
132+
return false
133+
}
140134
}
141-
return upgradeSuccessful
135+
return true
142136
}, tools.SetTimeout(15*time.Minute), 10*time.Second).Should(BeTrue())
143137

144138
})

0 commit comments

Comments
 (0)