Skip to content

Commit

Permalink
optimize v2 test (sealerio#993)
Browse files Browse the repository at this point in the history
  • Loading branch information
bxy4543 authored Dec 30, 2021
1 parent 8645fde commit e634e0a
Show file tree
Hide file tree
Showing 9 changed files with 124 additions and 120 deletions.
204 changes: 101 additions & 103 deletions test/sealer_apply_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
package test

import (
"strconv"
"strings"
"time"

Expand All @@ -24,7 +23,6 @@ import (
. "github.com/onsi/ginkgo"

"github.com/alibaba/sealer/test/suites/apply"
"github.com/alibaba/sealer/test/suites/image"
"github.com/alibaba/sealer/test/testhelper"
"github.com/alibaba/sealer/test/testhelper/settings"
)
Expand All @@ -41,110 +39,110 @@ var _ = Describe("sealer apply", func() {
apply.MarshalClusterToFile(rawClusterFilePath, rawCluster)
}
})
Context("check regular scenario that provider is ali cloud", func() {
var tempFile string
BeforeEach(func() {
tempFile = testhelper.CreateTempFile()
})

AfterEach(func() {
apply.DeleteClusterByFile(settings.GetClusterWorkClusterfile(rawCluster.Name))
testhelper.RemoveTempFile(tempFile)
testhelper.DeleteFileLocally(settings.GetClusterWorkClusterfile(rawCluster.Name))
})

It("init, scale up, scale down, clean up", func() {
// 1,init cluster to 2 nodes and write to disk
By("start to init cluster")
sess, err := testhelper.Start(apply.SealerApplyCmd(rawClusterFilePath))
testhelper.CheckErr(err)
testhelper.CheckExit0(sess, settings.MaxWaiteTime)
apply.CheckNodeNumLocally(2)

result := testhelper.GetFileDataLocally(settings.GetClusterWorkClusterfile(rawCluster.Name))
err = testhelper.WriteFile(tempFile, []byte(result))
testhelper.CheckErr(err)
By("Wait for the cluster to be ready", func() {
apply.WaitAllNodeRunning()
})
//2,scale up cluster to 6 nodes and write to disk
By("Use join command to add 3master and 3node for scale up cluster in cloud mode", func() {
apply.SealerJoin(strconv.Itoa(2), strconv.Itoa(2))
apply.CheckNodeNumLocally(6)
})

result = testhelper.GetFileDataLocally(settings.GetClusterWorkClusterfile(rawCluster.Name))
err = testhelper.WriteFile(tempFile, []byte(result))
testhelper.CheckErr(err)
usedCluster := apply.LoadClusterFileFromDisk(tempFile)

//3,scale down cluster to 4 nodes and write to disk
By("start to scale down cluster")
usedCluster.Spec.Nodes.Count = "1"
usedCluster.Spec.Masters.Count = "3"
apply.WriteClusterFileToDisk(usedCluster, tempFile)
sess, err = testhelper.Start(apply.SealerApplyCmd(tempFile))
testhelper.CheckErr(err)
testhelper.CheckExit0(sess, settings.MaxWaiteTime)
apply.CheckNodeNumLocally(4)

})
/* Context("check regular scenario that provider is ali cloud", func() {
var tempFile string
BeforeEach(func() {
tempFile = testhelper.CreateTempFile()
})
AfterEach(func() {
apply.DeleteClusterByFile(settings.GetClusterWorkClusterfile(rawCluster.Name))
testhelper.RemoveTempFile(tempFile)
testhelper.DeleteFileLocally(settings.GetClusterWorkClusterfile(rawCluster.Name))
})
It("init, scale up, scale down, clean up", func() {
// 1,init cluster to 2 nodes and write to disk
By("start to init cluster")
sess, err := testhelper.Start(apply.SealerApplyCmd(rawClusterFilePath))
testhelper.CheckErr(err)
testhelper.CheckExit0(sess, settings.MaxWaiteTime)
apply.CheckNodeNumLocally(2)
result := testhelper.GetFileDataLocally(settings.GetClusterWorkClusterfile(rawCluster.Name))
err = testhelper.WriteFile(tempFile, []byte(result))
testhelper.CheckErr(err)
By("Wait for the cluster to be ready", func() {
apply.WaitAllNodeRunning()
})
//2,scale up cluster to 6 nodes and write to disk
By("Use join command to add 3master and 3node for scale up cluster in cloud mode", func() {
apply.SealerJoin(strconv.Itoa(2), strconv.Itoa(2))
apply.CheckNodeNumLocally(6)
})
result = testhelper.GetFileDataLocally(settings.GetClusterWorkClusterfile(rawCluster.Name))
err = testhelper.WriteFile(tempFile, []byte(result))
testhelper.CheckErr(err)
usedCluster := apply.LoadClusterFileFromDisk(tempFile)
//3,scale down cluster to 4 nodes and write to disk
By("start to scale down cluster")
usedCluster.Spec.Nodes.Count = "1"
usedCluster.Spec.Masters.Count = "3"
apply.WriteClusterFileToDisk(usedCluster, tempFile)
sess, err = testhelper.Start(apply.SealerApplyCmd(tempFile))
testhelper.CheckErr(err)
testhelper.CheckExit0(sess, settings.MaxWaiteTime)
apply.CheckNodeNumLocally(4)
})
})

Context("check regular scenario that provider is container", func() {
tempFile := testhelper.CreateTempFile()
BeforeEach(func() {
rawCluster.Spec.Provider = settings.CONTAINER
apply.MarshalClusterToFile(tempFile, rawCluster)
apply.CheckDockerAndSwapOff()
})

AfterEach(func() {
apply.DeleteClusterByFile(settings.GetClusterWorkClusterfile(rawCluster.Name))
testhelper.RemoveTempFile(tempFile)
testhelper.DeleteFileLocally(settings.GetClusterWorkClusterfile(rawCluster.Name))
})

It("init, scale up, scale down, clean up", func() {
// 1,init cluster to 2 nodes and write to disk
By("start to init cluster")
sess, err := testhelper.Start(apply.SealerApplyCmd(tempFile))
testhelper.CheckErr(err)
testhelper.CheckExit0(sess, settings.MaxWaiteTime)
apply.CheckNodeNumLocally(2)

result := testhelper.GetFileDataLocally(settings.GetClusterWorkClusterfile(rawCluster.Name))
err = testhelper.WriteFile(tempFile, []byte(result))
testhelper.CheckErr(err)

By("Wait for the cluster to be ready", func() {
apply.WaitAllNodeRunning()
})
//2,scale up cluster to 6 nodes and write to disk
By("Use join command to add 2master and 1node for scale up cluster in cloud mode", func() {
apply.SealerJoin(strconv.Itoa(2), strconv.Itoa(1))
apply.CheckNodeNumLocally(5)
})
result = testhelper.GetFileDataLocally(settings.GetClusterWorkClusterfile(rawCluster.Name))
err = testhelper.WriteFile(tempFile, []byte(result))
testhelper.CheckErr(err)
usedCluster := apply.LoadClusterFileFromDisk(tempFile)

//3,scale down cluster to 4 nodes and write to disk
By("start to scale down cluster")
usedCluster.Spec.Nodes.Count = "1"
usedCluster.Spec.Masters.Count = "3"
apply.WriteClusterFileToDisk(usedCluster, tempFile)
sess, err = testhelper.Start(apply.SealerApplyCmd(tempFile))
testhelper.CheckErr(err)
testhelper.CheckExit0(sess, settings.MaxWaiteTime)
apply.CheckNodeNumLocally(4)
image.DoImageOps(settings.SubCmdRmiOfSealer, settings.TestImageName)
})

})
Context("check regular scenario that provider is container", func() {
tempFile := testhelper.CreateTempFile()
BeforeEach(func() {
rawCluster.Spec.Provider = settings.CONTAINER
apply.MarshalClusterToFile(tempFile, rawCluster)
apply.CheckDockerAndSwapOff()
})
AfterEach(func() {
apply.DeleteClusterByFile(settings.GetClusterWorkClusterfile(rawCluster.Name))
testhelper.RemoveTempFile(tempFile)
testhelper.DeleteFileLocally(settings.GetClusterWorkClusterfile(rawCluster.Name))
})
It("init, scale up, scale down, clean up", func() {
// 1,init cluster to 2 nodes and write to disk
By("start to init cluster")
sess, err := testhelper.Start(apply.SealerApplyCmd(tempFile))
testhelper.CheckErr(err)
testhelper.CheckExit0(sess, settings.MaxWaiteTime)
apply.CheckNodeNumLocally(2)
result := testhelper.GetFileDataLocally(settings.GetClusterWorkClusterfile(rawCluster.Name))
err = testhelper.WriteFile(tempFile, []byte(result))
testhelper.CheckErr(err)
By("Wait for the cluster to be ready", func() {
apply.WaitAllNodeRunning()
})
//2,scale up cluster to 6 nodes and write to disk
By("Use join command to add 2master and 1node for scale up cluster in cloud mode", func() {
apply.SealerJoin(strconv.Itoa(2), strconv.Itoa(1))
apply.CheckNodeNumLocally(5)
})
result = testhelper.GetFileDataLocally(settings.GetClusterWorkClusterfile(rawCluster.Name))
err = testhelper.WriteFile(tempFile, []byte(result))
testhelper.CheckErr(err)
usedCluster := apply.LoadClusterFileFromDisk(tempFile)
//3,scale down cluster to 4 nodes and write to disk
By("start to scale down cluster")
usedCluster.Spec.Nodes.Count = "1"
usedCluster.Spec.Masters.Count = "3"
apply.WriteClusterFileToDisk(usedCluster, tempFile)
sess, err = testhelper.Start(apply.SealerApplyCmd(tempFile))
testhelper.CheckErr(err)
testhelper.CheckExit0(sess, settings.MaxWaiteTime)
apply.CheckNodeNumLocally(4)
image.DoImageOps(settings.SubCmdRmiOfSealer, settings.TestImageName)
})
})*/

Context("check regular scenario that provider is bare metal, executes machine is master0", func() {
var tempFile string
Expand Down
9 changes: 5 additions & 4 deletions test/suites/apply/apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ func LoadPluginFromDisk(clusterFilePath string) []v1.Plugin {
func GenerateClusterfile(clusterfile string) {
filepath := GetRawConfigPluginFilePath()
cluster := LoadClusterFileFromDisk(clusterfile)
cluster.Spec.Env = []string{"env=TestEnv"}
data, err := yaml.Marshal(cluster)
testhelper.CheckErr(err)
appendData := [][]byte{data}
Expand Down Expand Up @@ -122,11 +123,11 @@ func GenerateClusterfile(clusterfile string) {
}

func SealerDeleteCmd(clusterFile string) string {
return fmt.Sprintf("%s delete -f %s --force", settings.DefaultSealerBin, clusterFile)
return fmt.Sprintf("%s delete -f %s --force -d", settings.DefaultSealerBin, clusterFile)
}

func SealerApplyCmd(clusterFile string) string {
return fmt.Sprintf("%s apply -f %s", settings.DefaultSealerBin, clusterFile)
return fmt.Sprintf("%s apply -f %s -d", settings.DefaultSealerBin, clusterFile)
}

func SealerRunCmd(masters, nodes, passwd string, provider string) string {
Expand All @@ -142,7 +143,7 @@ func SealerRunCmd(masters, nodes, passwd string, provider string) string {
if provider != "" {
provider = fmt.Sprintf("--provider %s", provider)
}
return fmt.Sprintf("%s run %s %s %s %s %s", settings.DefaultSealerBin, settings.TestImageName, masters, nodes, passwd, provider)
return fmt.Sprintf("%s run %s %s %s %s %s -d", settings.DefaultSealerBin, settings.TestImageName, masters, nodes, passwd, provider)
}

func SealerRun(masters, nodes, passwd, provider string) {
Expand All @@ -156,7 +157,7 @@ func SealerJoinCmd(masters, nodes string) string {
if nodes != "" {
nodes = fmt.Sprintf("-n %s", nodes)
}
return fmt.Sprintf("%s join %s %s -c my-test-cluster", settings.DefaultSealerBin, masters, nodes)
return fmt.Sprintf("%s join %s %s -c my-test-cluster -d", settings.DefaultSealerBin, masters, nodes)
}

func SealerJoin(masters, nodes string) {
Expand Down
4 changes: 2 additions & 2 deletions test/suites/apply/fixtures/config_plugin_for_test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ spec:
type: SHELL
action: Originally
data: |
echo "OriginallyShell was successfully run from Clusterfile"
echo "$env: OriginallyShell was successfully run from Clusterfile"
---
apiVersion: sealer.aliyun.com/v1alpha1
kind: Plugin
Expand All @@ -82,7 +82,7 @@ spec:
type: SHELL
action: PostInstall
data: |
echo "PostInstallShell was successfully run from Clusterfile"
echo "$env: PostInstallShell was successfully run from Clusterfile"
---
apiVersion: sealer.aliyun.com/v1alpha1
kind: Plugin
Expand Down
2 changes: 1 addition & 1 deletion test/suites/build/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func (a *ArgsOfBuild) Build() string {
if a.BuildType == "" {
a.BuildType = settings.LiteBuild
}
return fmt.Sprintf("%s build -f %s -t %s -m %s %s", settings.DefaultSealerBin, a.KubeFile, a.ImageName, a.BuildType, a.Context)
return fmt.Sprintf("%s build -f %s -t %s -m %s %s -d", settings.DefaultSealerBin, a.KubeFile, a.ImageName, a.BuildType, a.Context)
}

func NewArgsOfBuild() *ArgsOfBuild {
Expand Down
4 changes: 2 additions & 2 deletions test/suites/build/fixtures/cloud_build/Kubefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ COPY Clusterfile etc
COPY test1 .
COPY recommended.yaml .
COPY Plugins.yaml plugin
COPY imageList manifests
CMD kubectl apply -f recommended.yaml
COPY test2 .
RUN wget -O redis.tar.gz http://download.redis.io/releases/redis-5.0.3.tar.gz
RUN tar zxvf redis.tar.gz
RUN wget -O redis.tar.gz http://download.redis.io/releases/redis-5.0.3.tar.gz && tar zxvf redis.tar.gz && rm -f redis.tar.gz
CMD kubectl get nodes
1 change: 1 addition & 0 deletions test/suites/build/fixtures/cloud_build/imageList
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
quay.io/tigera/operator:v1.17.4
5 changes: 2 additions & 3 deletions test/suites/build/fixtures/lite_build/Kubefile
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
FROM sealer-io/test:v1
FROM sealer-io/kubernetes:v1.19.8
COPY test1 .
COPY recommended.yaml manifests
COPY test2 .
COPY test3 .
RUN wget -O redis.tar.gz http://download.redis.io/releases/redis-5.0.3.tar.gz
RUN tar zxvf redis.tar.gz
RUN wget -O redis.tar.gz http://download.redis.io/releases/redis-5.0.3.tar.gz && tar zxvf redis.tar.gz && rm -f redis.tar.gz
CMD ls -l
COPY imageList manifests
10 changes: 5 additions & 5 deletions test/suites/image/image.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,15 +36,15 @@ func DoImageOps(action, imageName string) {
cmd := ""
switch action {
case settings.SubCmdPullOfSealer:
cmd = fmt.Sprintf("%s pull %s", settings.DefaultSealerBin, imageName)
cmd = fmt.Sprintf("%s pull %s -d", settings.DefaultSealerBin, imageName)
case settings.SubCmdPushOfSealer:
cmd = fmt.Sprintf("%s push %s", settings.DefaultSealerBin, imageName)
cmd = fmt.Sprintf("%s push %s -d", settings.DefaultSealerBin, imageName)
case settings.SubCmdRmiOfSealer:
cmd = fmt.Sprintf("%s rmi %s", settings.DefaultSealerBin, imageName)
cmd = fmt.Sprintf("%s rmi %s -d", settings.DefaultSealerBin, imageName)
case settings.SubCmdForceRmiOfSealer:
cmd = fmt.Sprintf("%s rmi -f %s", settings.DefaultSealerBin, GetImageID(imageName))
cmd = fmt.Sprintf("%s rmi -f %s -d", settings.DefaultSealerBin, GetImageID(imageName))
case settings.SubCmdRunOfSealer:
cmd = fmt.Sprintf("%s run %s", settings.DefaultSealerBin, imageName)
cmd = fmt.Sprintf("%s run %s -d", settings.DefaultSealerBin, imageName)
case settings.SubCmdListOfSealer:
cmd = fmt.Sprintf("%s images", settings.DefaultSealerBin)
}
Expand Down
5 changes: 5 additions & 0 deletions test/testhelper/settings/settings.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ package settings
import (
"os"
"time"

"github.com/alibaba/sealer/logger"
)

// init test params and settings
Expand All @@ -39,4 +41,7 @@ func init() {
if pollingInterval == "" {
DefaultPollingInterval = 10
}
logger.InitLogger(logger.Config{
DebugMode: true,
})
}

0 comments on commit e634e0a

Please sign in to comment.