@@ -25,6 +25,7 @@ import (
25
25
26
26
. "github.com/onsi/ginkgo/v2"
27
27
. "github.com/onsi/gomega"
28
+ appsv1 "k8s.io/api/apps/v1"
28
29
corev1 "k8s.io/api/core/v1"
29
30
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
30
31
"k8s.io/utils/pointer"
@@ -97,9 +98,11 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() CommonSpecInpu
97
98
98
99
framework .DeployUnevictablePod (ctx , framework.DeployPodAndWaitInput {
99
100
WorkloadClusterProxy : workloadClusterProxy ,
101
+ MachineDeployment : machineDeployments [0 ],
100
102
DeploymentName : fmt .Sprintf ("%s-%s" , "unevictable-pod" , util .RandomString (3 )),
101
103
Namespace : namespace .Name + "-unevictable-workload" ,
102
104
WaitForDeploymentAvailableInterval : input .E2EConfig .GetIntervals (specName , "wait-deployment-available" ),
105
+ ModifyDeployment : func (deployment * appsv1.Deployment ) {},
103
106
})
104
107
105
108
By ("Scale the machinedeployment down to zero. If we didn't have the NodeDrainTimeout duration, the node drain process would block this operator." )
@@ -122,6 +125,7 @@ func NodeDrainTimeoutSpec(ctx context.Context, inputGetter func() CommonSpecInpu
122
125
DeploymentName : fmt .Sprintf ("%s-%s" , "unevictable-pod" , util .RandomString (3 )),
123
126
Namespace : namespace .Name + "-unevictable-workload" ,
124
127
WaitForDeploymentAvailableInterval : input .E2EConfig .GetIntervals (specName , "wait-deployment-available" ),
128
+ ModifyDeployment : func (deployment * appsv1.Deployment ) {},
125
129
})
126
130
127
131
By ("Scale down the controlplane of the workload cluster and make sure that nodes running workload can be deleted even the draining process is blocked." )
0 commit comments