Skip to content

Commit a8edd83

Browse files
committed
WIP: e2e: add initial SGX EPC cgroups tests
Signed-off-by: Mikko Ylinen <[email protected]>
1 parent 897ca52 commit a8edd83

File tree

2 files changed

+112
-2
lines changed

2 files changed

+112
-2
lines changed

scripts/set-version.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@ if [ $# != 1 ] || [ "$1" = "?" ] || [ "$1" = "--help" ]; then
1515
exit 1
1616
fi
1717

18-
files=$(git grep -l '^TAG?*=\|intel/accel-config-demo:\|intel/crypto-perf:\|intel/opae-nlb-demo:\|intel/openssl-qat-engine:\|intel/dlb-libdlb-demo:\|intel/sgx-sdk-demo:\|intel/intel-[^ ]*:\|version=\|appVersion:\|tag:' Makefile deployments demo/*accel-config*.yaml demo/*fpga*.yaml demo/*openssl*.yaml demo/dlb-libdlb*.yaml pkg/controllers/*/*_test.go build/docker/*.Dockerfile test/e2e/*/*.go)
18+
files=$(git grep -l '^TAG?*=\|intel/accel-config-demo:\|intel/crypto-perf:\|intel/opae-nlb-demo:\|intel/openssl-qat-engine:\|intel/dlb-libdlb-demo:\|intel/stress-ng-gramine:\|intel/sgx-sdk-demo:\|intel/intel-[^ ]*:\|version=\|appVersion:\|tag:' Makefile deployments demo/*accel-config*.yaml demo/*fpga*.yaml demo/*openssl*.yaml demo/dlb-libdlb*.yaml pkg/controllers/*/*_test.go build/docker/*.Dockerfile test/e2e/*/*.go)
1919

2020
for file in $files; do
21-
sed -i -e "s;\(^TAG?*=\|intel/accel-config-demo:\|intel/crypto-perf:\|intel/opae-nlb-demo:\|intel/openssl-qat-engine:\|intel/dlb-libdlb-demo:\|intel/sgx-sdk-demo:\|intel/intel-[^ ]*:\|version=\|appVersion: [^ ]\|tag: [^ ]\)[^ \"]*;\1$1;g" "$file";
21+
sed -i -e "s;\(^TAG?*=\|intel/accel-config-demo:\|intel/crypto-perf:\|intel/opae-nlb-demo:\|intel/openssl-qat-engine:\|intel/dlb-libdlb-demo:\|intel/stress-ng-gramine:\|intel/sgx-sdk-demo:\|intel/intel-[^ ]*:\|version=\|appVersion: [^ ]\|tag: [^ ]\)[^ \"]*;\1$1;g" "$file";
2222
done

test/e2e/sgx/sgx.go

+110
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ package sgx
1616

1717
import (
1818
"context"
19+
"fmt"
1920
"path/filepath"
2021
"time"
2122

@@ -28,6 +29,7 @@ import (
2829
"k8s.io/apimachinery/pkg/labels"
2930
"k8s.io/kubernetes/test/e2e/framework"
3031
e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
32+
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
3133
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
3234
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3335
admissionapi "k8s.io/pod-security-admission/api"
@@ -38,6 +40,7 @@ const (
3840
timeout = time.Second * 120
3941
kustomizationWebhook = "deployments/sgx_admissionwebhook/overlays/default-with-certmanager/kustomization.yaml"
4042
kustomizationPlugin = "deployments/sgx_plugin/base/kustomization.yaml"
43+
stressNGImage = "intel/stress-ng-gramine:devel"
4144
)
4245

4346
func init() {
@@ -80,6 +83,9 @@ func describe() {
8083
})
8184

8285
ginkgo.Context("When SGX resources are available", func() {
86+
var nodeWithEPC string
87+
var epcCapacity int64
88+
8389
ginkgo.BeforeEach(func(ctx context.Context) {
8490
ginkgo.By("checking if the resource is allocatable")
8591
if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, "sgx.intel.com/epc", 150*time.Second); err != nil {
@@ -91,6 +97,20 @@ func describe() {
9197
if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, "sgx.intel.com/provision", 30*time.Second); err != nil {
9298
framework.Failf("unable to wait for nodes to have positive allocatable provision resource: %v", err)
9399
}
100+
101+
nodelist, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
102+
if err != nil {
103+
framework.Failf("failed to list Nodes: %v", err)
104+
}
105+
106+
// we have at least one node with sgx.intel.com/epc capacity
107+
for _, item := range nodelist.Items {
108+
if q, ok := item.Status.Allocatable["sgx.intel.com/epc"]; ok && q.Value() > 0 {
109+
epcCapacity = q.Value()
110+
nodeWithEPC = item.Name
111+
break
112+
}
113+
}
94114
})
95115

96116
ginkgo.It("deploys a sgx-sdk-demo pod requesting SGX enclave resources [App:sgx-sdk-demo]", func(ctx context.Context) {
@@ -120,6 +140,96 @@ func describe() {
120140
gomega.Expect(err).To(gomega.BeNil(), utils.GetPodLogs(ctx, f, pod.ObjectMeta.Name, "testcontainer"))
121141
})
122142

143+
ginkgo.It("deploys simultaneous SGX EPC stressor jobs with equal EPC limits but no memory limits [App:sgx-epc-cgroup]", func(ctx context.Context) {
144+
parallelism := int32(10) // TODO: add more
145+
completions := int32(10)
146+
quantity := resource.NewQuantity(epcCapacity/int64(parallelism), resource.BinarySI)
147+
148+
testArgs := []string{
149+
"stress-ng-edmm",
150+
"--vm",
151+
"1",
152+
"--vm-bytes",
153+
fmt.Sprintf("%db", epcCapacity/int64(parallelism)),
154+
"--page-in",
155+
"-t",
156+
"30",
157+
}
158+
job := e2ejob.NewTestJobOnNode("success", "sgx-epc-stressjob", v1.RestartPolicyNever, parallelism, completions, nil, 1, nodeWithEPC)
159+
160+
job.Spec.Template.Spec.Containers[0].Image = stressNGImage
161+
job.Spec.Template.Spec.Containers[0].Args = testArgs
162+
job.Spec.Template.Spec.Containers[0].Resources = v1.ResourceRequirements{
163+
Requests: v1.ResourceList{"sgx.intel.com/epc": *quantity},
164+
Limits: v1.ResourceList{"sgx.intel.com/epc": *quantity},
165+
}
166+
167+
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
168+
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
169+
170+
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
171+
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
172+
})
173+
174+
ginkgo.It("deploys one SGX EPC stressor job with a known enclave size and no memory limits [App:sgx-epc-cgroup]", func(ctx context.Context) {
175+
quantity := resource.NewQuantity(epcCapacity, resource.BinarySI)
176+
177+
testArgs := []string{
178+
"stress-ng",
179+
"--vm",
180+
"1",
181+
"--vm-bytes",
182+
"128m",
183+
"--page-in",
184+
"-t",
185+
"30",
186+
}
187+
job := e2ejob.NewTestJobOnNode("success", "sgx-epc-stressjob", v1.RestartPolicyNever, 1, 1, nil, 1, nodeWithEPC)
188+
189+
job.Spec.Template.Spec.Containers[0].Image = stressNGImage
190+
job.Spec.Template.Spec.Containers[0].Args = testArgs
191+
job.Spec.Template.Spec.Containers[0].Resources = v1.ResourceRequirements{
192+
Requests: v1.ResourceList{"sgx.intel.com/epc": *quantity},
193+
Limits: v1.ResourceList{"sgx.intel.com/epc": *quantity},
194+
}
195+
196+
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
197+
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
198+
199+
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, 1)
200+
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
201+
})
202+
203+
ginkgo.It("deploys one SGX EPC stressor job with dynamic EPC allocation and memory limit set to kill once enough EPC pages are reclaimed [App:sgx-epc-cgroup]", func(ctx context.Context) {
204+
quantity := resource.NewQuantity(epcCapacity/10, resource.BinarySI)
205+
206+
//TODO: add another job that needs to survive
207+
testArgs := []string{
208+
"stress-ng-edmm",
209+
"--bigheap",
210+
"1",
211+
"--bigheap-growth",
212+
"1M",
213+
"--page-in",
214+
"-t",
215+
"60",
216+
}
217+
job := e2ejob.NewTestJobOnNode("success", "sgx-epc-stressjob", v1.RestartPolicyNever, 1, 1, nil, 1, nodeWithEPC)
218+
219+
job.Spec.Template.Spec.Containers[0].Image = stressNGImage
220+
job.Spec.Template.Spec.Containers[0].Args = testArgs
221+
job.Spec.Template.Spec.Containers[0].Resources = v1.ResourceRequirements{
222+
Requests: v1.ResourceList{"sgx.intel.com/epc": *quantity},
223+
Limits: v1.ResourceList{"sgx.intel.com/epc": *quantity,
224+
v1.ResourceMemory: *quantity},
225+
}
226+
227+
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
228+
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
229+
err = e2ejob.WaitForJobFailed(f.ClientSet, f.Namespace.Name, job.Name)
230+
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
231+
})
232+
123233
ginkgo.When("there is no app to run [App:noapp]", func() {
124234
ginkgo.It("does nothing", func() {})
125235
})

0 commit comments

Comments
 (0)