From f88ae85b979d9813c08958387191bee537208404 Mon Sep 17 00:00:00 2001 From: Divya Vavili Date: Tue, 6 Jun 2017 22:26:15 -0700 Subject: [PATCH] k8s system test changes --- .gitignore | 3 + Makefile | 17 +- install/k8s/cluster/bootstrap_centos.sh | 13 +- install/k8s/cluster/k8smaster_centos.sh | 1 - install/k8s/contiv/contiv_devtest.yaml | 275 +++++++ scripts/netContain/Dockerfile | 1 - scripts/netContain/scripts/contivNet.sh | 44 +- scripts/python/createcfg.py | 18 +- test/systemtests/basic_test.go | 20 +- test/systemtests/cfg.json | 2 +- test/systemtests/hostaccess_test.go | 17 +- test/systemtests/init_test.go | 24 +- test/systemtests/k8setup_test.go | 2 +- test/systemtests/kubeadm_test.go | 969 ++++++++++++++++++++++++ test/systemtests/netprofile_test.go | 2 +- test/systemtests/network_test.go | 11 +- test/systemtests/node_test.go | 1 + test/systemtests/policy_test.go | 4 +- test/systemtests/trigger_test.go | 10 +- test/systemtests/util_test.go | 84 +- vagrant/k8s/Vagrantfile | 239 +++--- vagrant/k8s/setup_cluster.sh | 24 +- vagrant/k8s/vagrant_cluster.py | 6 +- 23 files changed, 1562 insertions(+), 225 deletions(-) create mode 100644 install/k8s/contiv/contiv_devtest.yaml create mode 100755 test/systemtests/kubeadm_test.go diff --git a/.gitignore b/.gitignore index 54c269d5b..1629c8a8f 100644 --- a/.gitignore +++ b/.gitignore @@ -68,3 +68,6 @@ netplugin-version # release artifacts scripts/netContain/*.tgz scripts/netContain/*.tar.bz2 + +vagrant/k8s/export/.contiv.yaml* +vagrant/k8s/contrib diff --git a/Makefile b/Makefile index 003aa8103..cd6d0af6f 100755 --- a/Makefile +++ b/Makefile @@ -122,6 +122,8 @@ endif #kubernetes demo targets k8s-cluster: + cd vagrant/k8s/ && CONTIV_K8S_USE_KUBEADM=1 ./setup_cluster.sh +k8s-legacy-cluster: cd vagrant/k8s/ && ./setup_cluster.sh k8s-l3-cluster: CONTIV_L3=1 make k8s-cluster @@ -131,17 +133,20 @@ k8s-demo-start: cd vagrant/k8s/ && ./restart_cluster.sh && vagrant ssh k8master k8s-destroy: cd vagrant/k8s/ && vagrant destroy -f -k8s-sanity-cluster: - cd vagrant/k8s/ && ./setup_cluster.sh -k8s-test: - export CONTIV_K8=1 && \ +k8s-legacy-test: + export CONTIV_K8S_LEGACY=1 && \ make k8s-sanity-cluster && \ cd vagrant/k8s/ && \ vagrant ssh k8master -c 'sudo -i bash -lc "cd /opt/gopath/src/github.com/contiv/netplugin && make run-build"' && \ ./start_sanity_service.sh - cd $(GOPATH)/src/github.com/contiv/netplugin/scripts/python && PYTHONIOENCODING=utf-8 ./createcfg.py -scheduler 'k8' - CONTIV_K8=1 CONTIV_NODES=3 go test -v -timeout 540m ./test/systemtests -check.v -check.f "00SSH|TestBasic|TestNetwork|ACID|TestPolicy|TestTrigger" + cd $(GOPATH)/src/github.com/contiv/netplugin/scripts/python && PYTHONIOENCODING=utf-8 ./createcfg.py -scheduler 'k8s' + CONTIV_K8S_LEGACY=1 CONTIV_NODES=3 go test -v -timeout 540m ./test/systemtests -check.v -check.f "00SSH|TestBasic|TestNetwork|ACID|TestPolicy|TestTrigger" + cd vagrant/k8s && vagrant destroy -f +k8s-test: k8s-cluster + cd $(GOPATH)/src/github.com/contiv/netplugin/scripts/python && PYTHONIOENCODING=utf-8 ./createcfg.py -scheduler 'k8s' -binpath contiv/bin -install_mode 'kubeadm' + CONTIV_K8S_USE_KUBEADM=1 CONTIV_NODES=3 go test -v -timeout 540m ./test/systemtests -check.v -check.f "00SSH|TestBasic|TestNetwork|TestPolicy" cd vagrant/k8s && vagrant destroy -f + # Mesos demo targets mesos-docker-demo: cd vagrant/mesos-docker && \ diff --git a/install/k8s/cluster/bootstrap_centos.sh b/install/k8s/cluster/bootstrap_centos.sh index 1003c69f8..65a4b203d 100755 --- a/install/k8s/cluster/bootstrap_centos.sh +++ b/install/k8s/cluster/bootstrap_centos.sh @@ -16,7 +16,18 @@ EOF setenforce 0 -yum install -y docker kubelet kubeadm kubectl kubernetes-cni +yum remove -y docker \ + docker-common \ + container-selinux \ + docker-selinux \ + docker-engine \ + docker-engine-selinux + +yum install -y docker ebtables \ + https://fedorapeople.org/groups/kolla/kubeadm-1.6.0-0.alpha.0.2074.a092d8e0f95f52.x86_64.rpm \ + https://fedorapeople.org/groups/kolla/kubectl-1.5.4-0.x86_64.rpm \ + https://fedorapeople.org/groups/kolla/kubelet-1.5.4-0.x86_64.rpm \ + https://fedorapeople.org/groups/kolla/kubernetes-cni-0.3.0.1-0.07a8a2.x86_64.rpm systemctl enable docker && systemctl start docker systemctl enable kubelet && systemctl start kubelet diff --git a/install/k8s/cluster/k8smaster_centos.sh b/install/k8s/cluster/k8smaster_centos.sh index 8318b2864..d9635d04c 100644 --- a/install/k8s/cluster/k8smaster_centos.sh +++ b/install/k8s/cluster/k8smaster_centos.sh @@ -1,2 +1 @@ kubeadm init --token=$1 --api-advertise-addresses=$2 --skip-preflight-checks=true --use-kubernetes-version $3 - diff --git a/install/k8s/contiv/contiv_devtest.yaml b/install/k8s/contiv/contiv_devtest.yaml new file mode 100644 index 000000000..d262cdfce --- /dev/null +++ b/install/k8s/contiv/contiv_devtest.yaml @@ -0,0 +1,275 @@ +--- +# This ConfigMap is used to configure a self-hosted Contiv installation. +# It can be used with an external cluster store(etcd or consul) or used +# with the etcd instance being installed as contiv-etcd +kind: ConfigMap +apiVersion: v1 +metadata: + name: contiv-config + namespace: kube-system +data: + # The location of your cluster store. This is set to the + # avdertise-client value below from the contiv-etcd service. + # Change it to an external etcd/consul instance if required. + cluster_store: "etcd://__NETMASTER_IP__:6666" + # The CNI network configuration to install on each node. + cni_config: |- + { + "cniVersion": "0.1.0", + "name": "contiv-net", + "type": "contivk8s" + } + config: |- + { + "K8S_API_SERVER": "https://__NETMASTER_IP__:6443", + "K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", + "K8S_KEY": "", + "K8S_CERT": "", + "K8S_TOKEN": "" + } +--- + +# This manifest installs the Contiv etcd on the kubeadm master. +# If using an external etcd instance, this can be deleted. This uses a DaemonSet +# to force it to run on the master even when the master isn't schedulable, and uses +# nodeSelector to ensure it only runs on the master. +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: contiv-etcd + namespace: kube-system + labels: + k8s-app: contiv-etcd +spec: + template: + metadata: + labels: + k8s-app: contiv-etcd + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: | + [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, + {"key":"CriticalAddonsOnly", "operator":"Exists"}] + spec: + # Only run this pod on the master. + nodeSelector: + kubeadm.alpha.kubernetes.io/role: master + hostNetwork: true + containers: + - name: contiv-etcd + image: gcr.io/google_containers/etcd:2.2.1 + command: ["/bin/sh","-c"] + args: ["/usr/local/bin/etcd --name=contiv --data-dir=/var/etcd/contiv-data --advertise-client-urls=http://__NETMASTER_IP__:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"] + volumeMounts: + - name: var-etcd + mountPath: /var/etcd + volumes: + - name: var-etcd + hostPath: + path: /var/etcd + +--- +# This manifest installs contiv-netplugin container, as well +# as the Contiv CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: contiv-netplugin + namespace: kube-system + labels: + k8s-app: contiv-netplugin +spec: + selector: + matchLabels: + k8s-app: contiv-netplugin + template: + metadata: + labels: + k8s-app: contiv-netplugin + spec: + hostNetwork: true + hostPID: true + containers: + # Runs netplugin container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: contiv-netplugin + image: contiv/netplugin:k8s_devtest + args: + - -pkubernetes + env: + - name: VLAN_IF + value: __VLAN_IF__ + - name: VTEP_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: CONTIV_ETCD + valueFrom: + configMapKeyRef: + name: contiv-config + key: cluster_store + - name: CONTIV_CNI_CONFIG + valueFrom: + configMapKeyRef: + name: contiv-config + key: cni_config + - name: CONTIV_CONFIG + valueFrom: + configMapKeyRef: + name: contiv-config + key: config + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/openvswitch + name: etc-openvswitch + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: false + - mountPath: /var/run + name: var-run + readOnly: false + - mountPath: /var/contiv + name: var-contiv + readOnly: false + - mountPath: /etc/kubernetes/pki + name: etc-kubernetes-pki + readOnly: false + - mountPath: /etc/kubernetes/ssl + name: etc-kubernetes-ssl + readOnly: false + - mountPath: /opt/cni/bin + name: cni-bin-dir + readOnly: false + - mountPath: /etc/cni/net.d/ + name: etc-cni-dir + readOnly: false + - mountPath: /contiv/bin + name: contiv-bin-dir + readOnly: false + volumes: + # Used by contiv-netplugin + - name: etc-openvswitch + hostPath: + path: /etc/openvswitch + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run + hostPath: + path: /var/run + - name: var-contiv + hostPath: + path: /var/contiv + - name: etc-kubernetes-pki + hostPath: + path: /etc/kubernetes/pki + - name: etc-kubernetes-ssl + hostPath: + path: /etc/kubernetes/ssl + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: etc-cni-dir + hostPath: + path: /etc/cni/net.d/ + - name: contiv-bin-dir + hostPath: + path: /opt/gopath/bin +--- + +# This manifest deploys the Contiv API Server on Kubernetes. +apiVersion: extensions/v1beta1 +kind: ReplicaSet +metadata: + name: contiv-netmaster + namespace: kube-system + labels: + k8s-app: contiv-netmaster +spec: + # The netmaster should have 1, 3, 5 nodes of which one is active at any given time. + # More nodes are desired in a production environment for HA. + replicas: 1 + template: + metadata: + name: contiv-netmaster + namespace: kube-system + labels: + k8s-app: contiv-netmaster + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: | + [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, + {"key":"CriticalAddonsOnly", "operator":"Exists"}] + spec: + # Only run this pod on the master. + nodeSelector: + kubeadm.alpha.kubernetes.io/role: master + # The netmaster must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + hostPID: true + containers: + - name: contiv-netmaster + image: contiv/netplugin:k8s_devtest + args: + - -m + - -pkubernetes + env: + - name: CONTIV_ETCD + valueFrom: + configMapKeyRef: + name: contiv-config + key: cluster_store + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/openvswitch + name: etc-openvswitch + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: false + - mountPath: /var/run + name: var-run + readOnly: false + - mountPath: /var/contiv + name: var-contiv + readOnly: false + - mountPath: /etc/kubernetes/ssl + name: etc-kubernetes-ssl + readOnly: false + - mountPath: /opt/cni/bin + name: cni-bin-dir + readOnly: false + - mountPath: /contiv/bin + name: contiv-bin-dir + readOnly: false + volumes: + # Used by contiv-netmaster + - name: etc-openvswitch + hostPath: + path: /etc/openvswitch + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run + hostPath: + path: /var/run + - name: var-contiv + hostPath: + path: /var/contiv + - name: etc-kubernetes-ssl + hostPath: + path: /etc/kubernetes/ssl + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: contiv-bin-dir + hostPath: + path: /opt/gopath/bin +--- diff --git a/scripts/netContain/Dockerfile b/scripts/netContain/Dockerfile index 3a152d2e2..5bd715a15 100644 --- a/scripts/netContain/Dockerfile +++ b/scripts/netContain/Dockerfile @@ -30,5 +30,4 @@ RUN apt-get update \ COPY ./bin /contiv/bin/ COPY ./scripts /contiv/scripts/ - ENTRYPOINT ["/contiv/scripts/contivNet.sh"] diff --git a/scripts/netContain/scripts/contivNet.sh b/scripts/netContain/scripts/contivNet.sh index 7f166a01a..f130e8a2c 100755 --- a/scripts/netContain/scripts/contivNet.sh +++ b/scripts/netContain/scripts/contivNet.sh @@ -19,6 +19,10 @@ vlan_if_param="" control_url=":9999" listen_url=":9999" +# These files indicate if the netmaster/netplugin process needs to be restarted +touch /tmp/restart_netmaster +touch /tmp/restart_netplugin + #This needs to be fixed, we cant rely on the value being supplied from # parameters, just explosion of parameters is not a great solution #export no_proxy="0.0.0.0, 172.28.11.253" @@ -73,8 +77,7 @@ while getopts ":xmp:v:i:c:drl:o:" opt; do done if [ $cleanup == true ] || [ $reinit == true ]; then - ovs-vsctl del-br contivVlanBridge || true - ovs-vsctl del-br contivVxlanBridge || true + ovs-vsctl list-br | grep contiv | xargs -I % ovs-vsctl del-br % > /dev/null 2>&1 for p in $(ifconfig | grep vport | awk '{print $1}'); do ip link delete $p type veth done @@ -114,34 +117,39 @@ if [ "$plugin" == "kubernetes" ]; then fi fi +set +eo pipefail + if [ $netmaster == true ]; then echo "Starting netmaster " while true; do - if [ "$cstore" != "" ]; then - /contiv/bin/netmaster $debug -cluster-mode $plugin -cluster-store $cstore -listen-url $listen_url -control-url $control_url &>/var/contiv/log/netmaster.log - else - /contiv/bin/netmaster $debug -cluster-mode $plugin -listen-url $listen_url -control-url $control_url &>/var/contiv/log/netmaster.log + if [ -f /tmp/restart_netmaster ]; then + if [ "$cstore" != "" ]; then + /contiv/bin/netmaster $debug -cluster-mode $plugin -cluster-store $cstore -listen-url $listen_url -control-url $control_url &>/var/contiv/log/netmaster.log + else + /contiv/bin/netmaster $debug -cluster-mode $plugin -listen-url $listen_url -control-url $control_url &>/var/contiv/log/netmaster.log + fi + echo "CRITICAL : Netmaster has exited. Trying to respawn in 5s" fi - echo "CRITICAL: netmaster has exited, Respawn in 5" sleep 5 done elif [ $netplugin == true ]; then echo "Starting netplugin" modprobe openvswitch - mkdir -p /var/contiv/log/ while true; do - if [ "$cstore" != "" ]; then - cstore_param="-cluster-store" - fi - if [ "$vtep_ip" != "" ]; then - vtep_ip_param="-vtep-ip" - fi - if [ "$vlan_if" != "" ]; then - vlan_if_param="-vlan-if" + if [ -f /tmp/restart_netplugin ]; then + if [ "$cstore" != "" ]; then + cstore_param="-cluster-store" + fi + if [ "$vtep_ip" != "" ]; then + vtep_ip_param="-vtep-ip" + fi + if [ "$vlan_if" != "" ]; then + vlan_if_param="-vlan-if" + fi + /contiv/bin/netplugin $debug $cstore_param $cstore $vtep_ip_param $vtep_ip $vlan_if_param $vlan_if -plugin-mode $plugin &>/var/contiv/log/netplugin.log + echo "CRITICAL : Netplugin has exited. Trying to respawn in 5s" fi - /contiv/bin/netplugin $debug $cstore_param $cstore $vtep_ip_param $vtep_ip $vlan_if_param $vlan_if -plugin-mode $plugin &>/var/contiv/log/netplugin.log - echo "CRITICAL: netplugin has exited, Respawn in 5" sleep 5 done fi diff --git a/scripts/python/createcfg.py b/scripts/python/createcfg.py index c270ea99e..482991703 100755 --- a/scripts/python/createcfg.py +++ b/scripts/python/createcfg.py @@ -5,11 +5,14 @@ import argparse parser = argparse.ArgumentParser() -parser.add_argument("-scheduler", default='docker', help="Scheduler used, if any") +parser.add_argument("-scheduler", default='docker', help="Scheduler used - docker or k8s") +# Install mode for k8s - Legacy vs Kubeadm. +parser.add_argument("-install_mode", default='legacy', help="Install mode - legacy or kubeadm") parser.add_argument("-swarm_var", default='', help="Swarm host variable") parser.add_argument("-platform", default='vagrant', help="Vagrant/baremetal") parser.add_argument("-product", default='netplugin', help="netplugin/volplugin") parser.add_argument("-contiv_l3", default='', help="Running in L3 mode") +parser.add_argument("-contiv_k8s", default=0, help="Running in K8s mode") parser.add_argument("-key_file", default="/home/admin/.ssh/id_rsa", help="file path of key_file") parser.add_argument("-binpath", default="/opt/gopath/bin", help="GOBIN path") parser.add_argument("-hostips", default="192.168.2.10,192.168.2.11,192.168.2.12", help="Host IPs in the system") @@ -21,8 +24,10 @@ parser.add_argument("-iterations", default=3, help="Number of iterations for each test") parser.add_argument("-enableDNS", default=False, help="Enabling DNS") parser.add_argument("-contiv_cluster_store", default="etcd://localhost:2379", help="cluster info") +parser.add_argument("-k8s_contiv_cluster_store", default="etcd://netmaster:6666", help="cluster info") parser.add_argument("-datainterfaces", default="eth2,eth3", help="Data interface") parser.add_argument("-l3_datainterfaces", default="eth2", help="Data interface") +parser.add_argument("-k8s_datainterfaces", default="eth2", help="Data interface") parser.add_argument("-mgmtinterface", default="eth1", help="Control interface") parser.add_argument("-vlan", default="1120-1150", help="vlan range") parser.add_argument("-vxlan", default="1-10000", help="vxlan range") @@ -35,6 +40,7 @@ args = parser.parse_args() data = {} data['scheduler'] = args.scheduler +data['install_mode'] = args.install_mode data['swarm_variable'] = args.swarm_var data['platform'] = args.platform data['product'] = args.product @@ -43,8 +49,11 @@ data['containers'] = args.containers data['iterations'] = args.iterations data['enableDNS'] = args.enableDNS -data['contiv_cluster_store'] = args.contiv_cluster_store data['contiv_l3'] = args.contiv_l3 +if args.scheduler == 'k8s': + data['contiv_cluster_store'] = args.k8s_contiv_cluster_store +else: + data['contiv_cluster_store'] = args.contiv_cluster_store data['keyFile'] = args.key_file data['binpath'] = args.binpath data['hostips'] = args.hostips @@ -53,6 +62,9 @@ data['dataInterfaces'] = args.datainterfaces else: data['dataInterfaces'] = args.l3_datainterfaces +if args.scheduler == 'k8s': + data['dataInterfaces'] = args.k8s_datainterfaces +data['install_mode'] = args.install_mode data['mgmtInterface'] = args.mgmtinterface data['vlan'] = args.vlan data['vxlan'] = args.vxlan @@ -64,7 +76,7 @@ filepath = os.environ['GOPATH'] + '/src/github.com/contiv/netplugin/test/systemtests/cfg.json' with open(filepath, 'w') as outfile: - print "Generating the config file: " + filepath + print("Generating the config file: " + filepath) json.dump(data, outfile) os._exit(0) diff --git a/test/systemtests/basic_test.go b/test/systemtests/basic_test.go index b1182d461..6451c07fb 100755 --- a/test/systemtests/basic_test.go +++ b/test/systemtests/basic_test.go @@ -135,14 +135,14 @@ func (s *systemtestSuite) testBasicStartStopContainer(c *C, encap string) { } func (s *systemtestSuite) TestBasicSvcDiscoveryVXLAN(c *C) { - if s.basicInfo.Scheduler == "k8" { + if s.basicInfo.Scheduler == kubeScheduler { return } s.testBasicSvcDiscovery(c, "vxlan") } func (s *systemtestSuite) TestBasicSvcDiscoveryVLAN(c *C) { - if s.basicInfo.Scheduler == "k8" { + if s.basicInfo.Scheduler == kubeScheduler { return } s.testBasicSvcDiscovery(c, "vlan") @@ -221,18 +221,27 @@ func (s *systemtestSuite) testBasicSvcDiscovery(c *C, encap string) { } func (s *systemtestSuite) TestBasicNetmasterPortListen(c *C) { + var masterNodeIndex int32 + var masterNode *node + masterDefaultPort := "9999" + for _, node := range s.nodes { // Stop all netmaster instances c.Assert(node.stopNetmaster(), IsNil) + if node.Name() == "k8master" { + masterNode = node + } } for i := 0; i < s.basicInfo.Iterations; i++ { - masterNodeIndex := rand.Int31n(int32(len(s.nodes))) - masterNode := s.nodes[masterNodeIndex] + masterNodeIndex = 0 + if s.basicInfo.Scheduler != kubeScheduler { + masterNodeIndex = rand.Int31n(int32(len(s.nodes))) + masterNode = s.nodes[masterNodeIndex] + } masterIP, err := masterNode.getIPAddr(s.hostInfo.HostMgmtInterface) c.Assert(err, IsNil) - masterDefaultPort := "9999" masterListenPort := "999" + masterIP[len(masterIP)-1:] masterCtrlPort := "888" + masterIP[len(masterIP)-1:] @@ -251,6 +260,7 @@ func (s *systemtestSuite) TestBasicNetmasterPortListen(c *C) { // Control port with non default port and wildcard IP is not valid logrus.Infof("Checking case: --listen-url A.B.C.D:XXXX --control-url :YYYY") c.Assert(masterNode.startNetmaster(fmt.Sprintf("--listen-url=%s:%s --control-url=:%s", masterIP, masterListenPort, masterCtrlPort)), IsNil) + time.Sleep(5 * time.Second) c.Assert(masterNode.exec.runCommandUntilNoNetmasterError(), NotNil) // Case: --listen-url :YYYY --control-url A.B.C.D:YYYY diff --git a/test/systemtests/cfg.json b/test/systemtests/cfg.json index 65b40d924..f22412c0f 100644 --- a/test/systemtests/cfg.json +++ b/test/systemtests/cfg.json @@ -1 +1 @@ -{"binpath": "/opt/gopath/bin", "iterations": 3, "encap": "vlan", "keyFile": "/home/admin/.ssh/id_rsa", "gateway": "10.1.1.254", "subnet": "10.1.1.0/24", "network": "TestNet", "contiv_cluster_store": "etcd://localhost:2379", "platform": "vagrant", "mgmtInterface": "eth1", "dataInterfaces": "eth2,eth3", "vxlan": "1-10000", "product": "netplugin", "swarm_variable": "", "vlan": "1120-1150", "aci_mode": "off", "scheduler": "docker", "enableDNS": false, "tenant": "TestTenant", "short": false, "hostips": "192.168.2.10,192.168.2.11,192.168.2.12", "hostusernames": "admin,admin,admin", "containers": 3, "contiv_l3": ""} \ No newline at end of file +{"scheduler": "k8s", "install_mode": "kubeadm", "swarm_variable": "", "platform": "vagrant", "product": "netplugin", "aci_mode": "off", "short": false, "containers": 3, "iterations": 3, "enableDNS": false, "contiv_l3": "", "contiv_cluster_store": "etcd://netmaster:6666", "keyFile": "/home/admin/.ssh/id_rsa", "binpath": "contiv/bin", "hostips": "192.168.2.10,192.168.2.11,192.168.2.12", "hostusernames": "admin,admin,admin", "dataInterfaces": "eth2", "mgmtInterface": "eth1", "vlan": "1120-1150", "vxlan": "1-10000", "subnet": "10.1.1.0/24", "gateway": "10.1.1.254", "network": "TestNet", "tenant": "TestTenant", "encap": "vlan"} \ No newline at end of file diff --git a/test/systemtests/hostaccess_test.go b/test/systemtests/hostaccess_test.go index 83d889e3b..6d2d1b1bc 100755 --- a/test/systemtests/hostaccess_test.go +++ b/test/systemtests/hostaccess_test.go @@ -1,9 +1,9 @@ package systemtests import ( - "time" - "github.com/contiv/contivmodel/client" . "github.com/contiv/check" + "github.com/contiv/contivmodel/client" + "time" ) func (s *systemtestSuite) TestBasicHostAccess(c *C) { @@ -11,16 +11,25 @@ func (s *systemtestSuite) TestBasicHostAccess(c *C) { c.Skip("Skipping basic host access test for routing mode") } - time.Sleep(30*time.Second) + time.Sleep(30 * time.Second) global, err := s.cli.GlobalGet("global") c.Assert(err, IsNil) // save the FwdMode fm := global.FwdMode global.FwdMode = "routing" + + c.Assert(s.TearDownDefaultNetwork(), IsNil) c.Assert(s.cli.GlobalPost(global), IsNil) + time.Sleep(60 * time.Second) + c.Assert(s.SetupDefaultNetwork(), IsNil) + s.hostAccTest(c) global.FwdMode = fm + + c.Assert(s.TearDownDefaultNetwork(), IsNil) c.Assert(s.cli.GlobalPost(global), IsNil) + time.Sleep(60 * time.Second) + c.Assert(s.SetupDefaultNetwork(), IsNil) } func (s *systemtestSuite) hostAccTest(c *C) { @@ -55,7 +64,7 @@ func (s *systemtestSuite) hostAccTest(c *C) { GroupName: "epg-a", }), IsNil) - time.Sleep(15*time.Second) + time.Sleep(15 * time.Second) c.Assert(s.verifyHostRoutes([]string{"17.5.4.0/22", "13.5.7.0/24"}, true), IsNil) // Create num_nodes + 1 containers numContainters := len(s.nodes) + 1 diff --git a/test/systemtests/init_test.go b/test/systemtests/init_test.go index be0178b66..90d2a4f70 100755 --- a/test/systemtests/init_test.go +++ b/test/systemtests/init_test.go @@ -2,9 +2,7 @@ package systemtests import ( "flag" - "fmt" "os" - "strings" . "testing" "github.com/Sirupsen/logrus" @@ -25,6 +23,7 @@ type systemtestSuite struct { } type BasicInfo struct { Scheduler string `json:"scheduler"` //swarm, k8s or plain docker + InstallMode string `json:"install_mode"` //legacy or kubeadm SwarmEnv string `json:"swarm_variable"` //env variables to be set with swarm Platform string `json:"platform"` //vagrant or baremetal Product string `json:"product"` //for netplugin / volplugin @@ -56,6 +55,13 @@ type GlobInfo struct { Encap string `json:"encap"` } +const ( + kubeScheduler string = "k8s" + swarmScheduler string = "swarm" + legacyInstall string = "legacy" + kubeadmInstall string = "kubeadm" +) + var sts = &systemtestSuite{} var _ = Suite(sts) @@ -117,7 +123,6 @@ func (s *systemtestSuite) SetUpTest(c *C) { case "vagrant": s.SetUpTestVagrant(c) } - } func (s *systemtestSuite) TearDownTest(c *C) { @@ -150,19 +155,6 @@ func (s *systemtestSuite) TearDownSuite(c *C) { for _, node := range s.nodes { node.exec.cleanupContainers() } - - // Print all errors and fatal messages - for _, node := range s.nodes { - logrus.Infof("Checking for errors on %v", node.Name()) - out, _ := node.runCommand(`for i in /tmp/net*; do grep "error\|fatal\|panic" $i; done`) - if strings.Contains(out, "No such file or directory") { - continue - } - if out != "" { - logrus.Errorf("Errors in logfiles on %s: \n", node.Name()) - fmt.Printf("%s\n==========================\n\n", out) - } - } } func (s *systemtestSuite) Test00SSH(c *C) { diff --git a/test/systemtests/k8setup_test.go b/test/systemtests/k8setup_test.go index 89ae78f70..6046340c9 100755 --- a/test/systemtests/k8setup_test.go +++ b/test/systemtests/k8setup_test.go @@ -780,7 +780,7 @@ func (k *kubernetes) reloadNode(n *node) error { cmd := exec.Command("vagrant", "reload", n.Name()) cmd.Env = os.Environ() cmd.Env = append(cmd.Env, "VAGRANT_CWD="+topDir+"/src/github.com/contiv/netplugin/vagrant/k8s/") - cmd.Env = append(cmd.Env, "CONTIV_K8=1") + cmd.Env = append(cmd.Env, "CONTIV_K8S_USE_KUBEADM=1") out, err := cmd.CombinedOutput() if err != nil { diff --git a/test/systemtests/kubeadm_test.go b/test/systemtests/kubeadm_test.go new file mode 100755 index 000000000..09245804f --- /dev/null +++ b/test/systemtests/kubeadm_test.go @@ -0,0 +1,969 @@ +package systemtests + +import ( + "encoding/json" + "errors" + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "os" + "time" +) + +type kubePod struct { + node *node +} + +var k8sMaster *node + +const ( + k8sMasterNode = "k8master" + netmasterRestartFile = "/tmp/restart_netmaster" + netpluginRestartFile = "/tmp/restart_netplugin" + netmasterLogLocation = "/var/contiv/log/netmaster.log" + netpluginLogLocation = "/var/contiv/log/netplugin.log" +) + +func (s *systemtestSuite) NewK8sPodExec(n *node) *kubePod { + k8 := new(kubePod) + k8.node = n + + if n.Name() == k8sMasterNode { + k8sMaster = n + } + return k8 +} + +func (k *kubePod) isMaster() bool { + return (k.node.Name() == k8sMasterNode) +} + +func (k *kubePod) newContainer(node *node, containerID, name string, spec containerSpec) (*container, error) { + cont := &container{ + node: node, + containerID: containerID, + name: name, + } + + out, err := k8sMaster.exec.getIPAddr(cont, "eth0") + + if err != nil { + return nil, err + } + cont.eth0.ip = out + + out, err = cont.node.exec.getIPv6Addr(cont, "eth0") + if err == nil { + cont.eth0.ipv6 = out + } + + return cont, nil +} + +func (k *kubePod) runContainer(spec containerSpec) (*container, error) { + var namestr, labelstr, image string + labels := []string{} + + if len(spec.tenantName) != 0 && spec.tenantName != "default" { + labels = append(labels, "io.contiv.tenant="+spec.tenantName) + } + + if spec.serviceName != "" { + labels = append(labels, "io.contiv.net-group="+spec.serviceName) + } + if spec.networkName != "" { + labels = append(labels, "io.contiv.network="+spec.networkName) + } + + labelstr = strings.Join(labels, ",") + + if len(labelstr) != 0 { + labelstr = "--labels=" + labelstr + } + + image = "--image=contiv/alpine " + + cmdStr := " --command -- sleep 900000" + + if spec.name != "" { + namestr = spec.name + } + + if len(spec.labels) > 0 { + l := " --labels=" + for _, label := range spec.labels { + labelstr += l + label + " " + } + } + + cmd := fmt.Sprintf("kubectl run %s %s %s --restart=Never %s ", namestr, labelstr, image, cmdStr) + + logrus.Infof("Starting Pod %s on with: %s", spec.name, cmd) + out, err := k8sMaster.tbnode.RunCommandWithOutput(cmd) + if err != nil { + logrus.Errorf("cmd %q failed: output below", cmd) + logrus.Println(out) + return nil, err + } + + //find out the node where pod is deployed + + for i := 0; i < 50; i++ { + time.Sleep(5 * time.Second) + cmd = fmt.Sprintf("kubectl get pods -o wide | grep %s", spec.name) + out, err = k8sMaster.tbnode.RunCommandWithOutput(cmd) + if strings.Contains(out, "Running") { + break + } + } + + podInfoStr := strings.TrimSpace(out) + + if podInfoStr == "" { + errStr := "Error Scheduling the pod" + logrus.Errorf(errStr) + return nil, errors.New(errStr) + } + + podInfo := strings.Split(podInfoStr, " ") + + podID := podInfo[0] + nodeID := podInfo[len(podInfo)-1] + + podNode := k.node.suite.vagrant.GetNode(nodeID) + + n := &node{ + tbnode: podNode, + suite: k.node.suite, + exec: k, + } + + cont, err := k.newContainer(n, podID, spec.name, spec) + if err != nil { + logrus.Error(err) + return nil, err + } + + return cont, nil +} + +func (k *kubePod) checkPingFailure(c *container, ipaddr string) error { + logrus.Infof("Expecting ping failure from %v to %s", c, ipaddr) + if err := k.checkPing(c, ipaddr); err == nil { + return fmt.Errorf("Ping succeeded when expected to fail from %v to %s", c, ipaddr) + } + + return nil +} + +func (k *kubePod) checkPing(c *container, ipaddr string) error { + logrus.Infof("Checking ping from %v to %s", c, ipaddr) + out, err := k.exec(c.containerID, "ping -c 1 "+ipaddr) + + if err != nil || strings.Contains(out, "0 received, 100% packet loss") { + errStr := fmt.Sprintf("Ping from %s to %s FAILED: %q - %v", c, ipaddr, out, err) + logrus.Errorf(errStr) + return errors.New(errStr) + } + + logrus.Infof("Ping from %s to %s SUCCEEDED", c, ipaddr) + return nil +} + +func (k *kubePod) checkPing6Failure(c *container, ipaddr string) error { + logrus.Infof("Expecting ping failure from %v to %s", c, ipaddr) + if err := k.checkPing6(c, ipaddr); err == nil { + return fmt.Errorf("Ping succeeded when expected to fail from %v to %s", c, ipaddr) + } + + return nil +} + +func (k *kubePod) checkPing6(c *container, ipaddr string) error { + logrus.Infof("Checking ping6 from %v to %s", c, ipaddr) + out, err := k.exec(c.containerID, "ping6 -c 1 "+ipaddr) + + if err != nil || strings.Contains(out, "0 received, 100% packet loss") { + return fmt.Errorf("Ping failed from %v to %s: %q - %v", c, ipaddr, out, err) + } + + logrus.Infof("Ping from %v to %s SUCCEEDED", c, ipaddr) + return nil +} + +func (k *kubePod) getIPAddr(c *container, dev string) (string, error) { + out, err := k8sMaster.tbnode.RunCommandWithOutput(fmt.Sprintf("kubectl exec %s -- ip addr show dev %s | grep inet | head -1", c.containerID, dev)) + if err != nil { + logrus.Errorf("Failed to get IP for container %q", c.containerID) + logrus.Println(out) + } + + parts := regexp.MustCompile(`\s+`).Split(strings.TrimSpace(out), -1) + if len(parts) < 2 { + return "", fmt.Errorf("Invalid output from container %q: %s", c.containerID, out) + } + + parts = strings.Split(parts[1], "/") + out = strings.TrimSpace(parts[0]) + return out, err +} + +func (k *kubePod) getIPv6Addr(c *container, dev string) (string, error) { + /*FIXME: fix for k8 v6 */ + return "", nil +} + +func (k *kubePod) getMACAddr(c *container, dev string) (string, error) { + out, err := k8sMaster.tbnode.RunCommandWithOutput(fmt.Sprintf("kubectl exec %s -- ip addr show dev %s | grep ether | head -1", c.containerID, dev)) + if err != nil { + logrus.Errorf("Failed to get IP for container %q", c.containerID) + logrus.Println(out) + } + + parts := regexp.MustCompile(`\s+`).Split(strings.TrimSpace(out), -1) + if len(parts) < 2 { + return "", fmt.Errorf("Invalid output from container %q: %s", c.containerID, out) + } + + parts = strings.Split(parts[1], "/") + out = strings.TrimSpace(parts[0]) + return out, err +} + +/* +* exec is used to run a specific command using kubectl on the host + */ +func (k *kubePod) exec(podName, args string, ns ...string) (string, error) { + namespace := "default" + if len(ns) != 0 { + namespace = ns[0] + } + cmd := `kubectl -n ` + namespace + ` exec ` + podName + ` -- ` + args + logrus.Debugf("Exec: Running command -- %s", cmd) + out, err := k8sMaster.runCommand(cmd) + if err != nil { + return out, err + } + + return out, nil +} + +/* +* execBG executes a background process on the node + */ +func (k *kubePod) execBG(podName, args string, ns ...string) { + namespace := "default" + if len(ns) != 0 { + namespace = ns[0] + } + cmd := `kubectl -n ` + namespace + ` exec ` + podName + ` -- ` + args + logrus.Debugf("ExecBG: Running command -- %s", cmd) + k8sMaster.tbnode.RunCommandBackground(cmd) +} + +/* +* podExec function is used to run a command typically multiple commands +* with pipes and redirections within the pod rather than the node + */ +func (k *kubePod) podExec(podName, args string, ns ...string) (string, error) { + // NOTE: + // Remotessh library wraps this command as follows: + // ssh bash -lc '' + // + // Backticks and quotes here ensure that the command + // is properly wrapped to execute on pod rather than on the node + podCmd := `sh -c '\''` + args + `'\''` + return k.exec(podName, podCmd, ns...) +} + +/* +* podExecBG function is used to run a background command +* within the pod rather than the node + */ +func (k *kubePod) podExecBG(podName, args string, ns ...string) error { + namespace := "default" + if len(ns) != 0 { + namespace = ns[0] + } + + // NOTE: + // Remotessh library wraps this command as follows: + // ssh bash -lc '' + // + // - Backticks and quotes here ensure that the command + // is properly wrapped to execute on pod rather than on the node + // - nohup along with & ensures that the process continues to live + // after the shell is terminated + // Since the command is to run in BG in the pod and not on the node, + // RunCommandBackground is not required here + + podCmd := `kubectl -n ` + namespace + ` exec ` + podName + ` -- nohup sh -c '\''` + args + ` &'\''` + logrus.Debugf("Pod Exec BG: Running command -- %s", podCmd) + return k8sMaster.tbnode.RunCommand(podCmd) +} + +func (k *kubePod) start(c *container) error { + //Kubernetes does not support start/stop + return nil +} + +func (k *kubePod) stop(c *container) error { + //Kubernetes does not support start/stop + return nil +} + +func (k *kubePod) rm(c *container) error { + logrus.Infof("Removing Pod: %s on %s", c.containerID, c.node.Name()) + k8sMaster.tbnode.RunCommand(fmt.Sprintf("kubectl delete pod %s", c.name)) + for i := 0; i < 80; i++ { + out, _ := k8sMaster.tbnode.RunCommandWithOutput(fmt.Sprintf("kubectl get pod %s", c.containerID)) + if strings.Contains(out, "not found") { + return nil + } + time.Sleep(5 * time.Second) + } + return fmt.Errorf("Error Termininating pod %s on node %s", c.name, c.node.Name()) +} + +func (k *kubePod) startListener(c *container, port int, protocol string) error { + var protoStr string + + if protocol == "udp" { + protoStr = "-u" + } + + k.execBG(c.containerID, fmt.Sprintf("nc -lk %s -p %v -e /bin/true", protoStr, port)) + return nil + +} + +func (k *kubePod) startIperfServer(c *container) error { + k.execBG(c.containerID, "iperf -s") + return nil +} + +func (k *kubePod) startIperfClient(c *container, ip, limit string, isErr bool) error { + var ( + bwLimit int64 + bwInt64 int64 + ) + bw, err := k.exec(c.containerID, fmt.Sprintf("iperf -c %s ", ip)) + logrus.Infof("starting iperf client on: %v for server ip: %s", c, ip) + if err != nil { + logrus.Errorf("Error starting the iperf client") + } + if strings.Contains(bw, "bits/sec") { + bwString := strings.Split(bw, "Bytes ") + bwInt64, err = BwConvertInt64(bwString[1]) + if err != nil { + return err + } + if limit != "" { + bwLimit, err = BwConvertInt64(limit) + if err != nil { + return err + } + bwLimit = bwLimit + (bwLimit / 10) + if bwLimit > bwInt64 { + logrus.Infof("Obtained bandwidth: %dkbits is less than the limit: %dkbits", bwInt64, bwLimit) + } else if bwLimit < bwInt64 { + if isErr { + logrus.Errorf("Obtained Bandwidth: %s is more than the limit: %s", strings.TrimSpace(bwString[1]), limit) + } else { + logrus.Errorf("Obtained bandwidth: %s is more than the limit %s", bwString[1], limit) + return errors.New("Applied bandwidth is more than bandwidth rate!") + } + } else { + errStr := fmt.Sprintf("Bandwidth rate: %s not applied", limit) + logrus.Errorf(errStr) + return errors.New(errStr) + } + } else { + logrus.Infof("Obtained bandwidth: %s", bwString[1]) + } + } else { + logrus.Errorf("Bandwidth string invalid: %s", bw) + } + return err +} + +func (k *kubePod) tcFilterShow(bw string) error { + if k.isMaster() { + return nil + } + + qdiscShow, err := k.node.tbnode.RunCommandWithOutput("tc qdisc show") + if err != nil { + return err + } + qdiscoutput := strings.Split(qdiscShow, "ingress") + vvport := strings.Split(qdiscoutput[1], "parent") + vvPort := strings.Split(vvport[0], "dev ") + cmd := fmt.Sprintf("tc -s filter show dev %s parent ffff:", vvPort[1]) + str, err := k.node.runCommand(cmd) + if err != nil { + return err + } + output := strings.Split(str, "rate ") + rate := strings.Split(output[1], "burst") + regex := regexp.MustCompile("[0-9]+") + outputStr := regex.FindAllString(rate[0], -1) + outputInt, err := strconv.ParseInt(outputStr[0], 10, 64) + bwInt, err := BwConvertInt64(bw) + if err != nil { + return err + } + if bwInt == outputInt { + logrus.Infof("Applied bandwidth: %dkbits equals tc qdisc rate: %dkbits", bwInt, outputInt) + } else { + errStr := fmt.Sprintf("Applied bandwidth: %dkbits does not match the tc rate: %d", bwInt, outputInt) + logrus.Errorf(errStr) + return errors.New(errStr) + } + return nil +} + +func (k *kubePod) checkConnection(c *container, ipaddr, protocol string, port int) error { + var protoStr string + + if protocol == "udp" { + protoStr = "-u" + } + + logrus.Infof("Checking connection from %s to ip %s on port %d", c, ipaddr, port) + + out, err := k.exec(c.containerID, fmt.Sprintf("nc -z -n -v -w 1 %s %s %v", protoStr, ipaddr, port)) + if err != nil && !strings.Contains(out, "open") { + logrus.Errorf("Connection from %v to ip %s on port %d FAILED", *c, ipaddr, port) + } else { + logrus.Infof("Connection from %v to ip %s on port %d SUCCEEDED", *c, ipaddr, port) + } + + return err +} + +func (k *kubePod) checkNoConnection(c *container, ipaddr, protocol string, port int) error { + logrus.Infof("Expecting connection to fail from %v to %s on port %d", c, ipaddr, port) + + if err := k.checkConnection(c, ipaddr, protocol, port); err != nil { + return nil + } + return fmt.Errorf("Connection SUCCEEDED on port %d from %s from %v when it should have FAILED.", port, ipaddr, c) +} + +func (k *kubePod) cleanupContainers() error { + if k.isMaster() { + logrus.Infof("Cleaning up containers on %s", k.node.Name()) + k8sMaster.tbnode.RunCommand(fmt.Sprintf("kubectl delete pod --all")) + } + return nil +} + +func (k *kubePod) startNetplugin(args string) error { + if k.isMaster() { + return nil + } + podName, err := getPodName("netplugin", k.node.Name()) + if err != nil { + logrus.Errorf("pod not found: %+v", err) + return err + } + + logrus.Infof("Starting netplugin on %s", k.node.Name()) + startNetpluginCmd := k.node.suite.basicInfo.BinPath + `/netplugin -plugin-mode=kubernetes -vlan-if=` + k.node.suite.hostInfo.HostDataInterfaces + ` -cluster-store=` + k.node.suite.basicInfo.ClusterStore + ` ` + args + ` > ` + netpluginLogLocation + ` 2>&1` + + return k.podExecBG(podName, startNetpluginCmd, "kube-system") +} + +func (k *kubePod) stopNetplugin() error { + podName, err := getPodName("netplugin", k.node.Name()) + if err != nil { + logrus.Errorf("pod not found: %+v", err) + return err + } + + stopRestartCmd := `rm ` + netpluginRestartFile + k.exec(podName, stopRestartCmd, "kube-system") + + logrus.Infof("Stopping netplugin on %s", k.node.Name()) + killNetpluginCmd := `pkill netplugin` + _, err = k.exec(podName, killNetpluginCmd, "kube-system") + return err +} + +func (k *kubePod) stopNetmaster() error { + if !k.isMaster() { + return nil + } + podName, err := getPodName("netmaster", k.node.Name()) + if err != nil { + logrus.Errorf("pod not found: %+v", err) + return err + } + + stopRestartCmd := `rm ` + netmasterRestartFile + k.exec(podName, stopRestartCmd, "kube-system") + + logrus.Infof("Stopping netmaster on %s", k.node.Name()) + killNetmasterCmd := `pkill netmaster` + _, err = k.exec(podName, killNetmasterCmd, "kube-system") + return err +} + +func (k *kubePod) startNetmaster(args string) error { + if !k.isMaster() { + return nil + } + logrus.Infof("Starting netmaster on %s", k.node.Name()) + podName, err := getPodName("netmaster", k.node.Name()) + if err != nil { + logrus.Errorf("pod not found: %+v", err) + return err + } + + netmasterStartCmd := k.node.suite.basicInfo.BinPath + `/netmaster` + ` -cluster-store=` + k.node.suite.basicInfo.ClusterStore + ` -cluster-mode=kubernetes ` + args + ` > ` + netmasterLogLocation + ` 2>&1` + + return k.podExecBG(podName, netmasterStartCmd, "kube-system") +} + +func (k *kubePod) cleanupMaster() { + if !k.isMaster() { + return + } + logrus.Infof("Cleaning up master on %s", k8sMaster.Name()) + podName, err := getPodName("contiv-etcd", k.node.Name()) + if err != nil { + logrus.Errorf("pod not found: %+v", err) + return + } + clusterStoreInfo := strings.Split(k.node.suite.basicInfo.ClusterStore, "//") + etcdClient := "http://" + clusterStoreInfo[len(clusterStoreInfo)-1] + logrus.Infof("Cleaning out etcd info on %s", etcdClient) + + k.podExec(podName, `etcdctl -C `+etcdClient+` rm --recursive /contiv`, "kube-system") + k.podExec(podName, `etcdctl -C `+etcdClient+` rm --recursive /contiv.io`, "kube-system") + k.podExec(podName, `etcdctl -C `+etcdClient+` rm --recursive /docker`, "kube-system") +} + +func getPodName(podRegex, nodeName string) (string, error) { + podNameCmd := `kubectl -n kube-system get pods -o wide | grep ` + podRegex + ` | grep ` + nodeName + ` | cut -d " " -f 1` + podName, err := k8sMaster.tbnode.RunCommandWithOutput(podNameCmd) + if err != nil { + logrus.Errorf("Couldn't fetch pod info on %s", nodeName) + return "", err + } + podName = strings.TrimSpace(podName) + return podName, nil +} + +func (k *kubePod) cleanupSlave() { + if k.isMaster() { + return + } + logrus.Infof("Cleaning up slave on %s", k.node.Name()) + podName, err := getPodName("netplugin", k.node.Name()) + if err != nil { + logrus.Errorf("pod not found: %+v", err) + return + } + + ovsCleanupCmd := `ovs-vsctl list-br | grep contiv | xargs -rt -I % ovs-vsctl del-br %` + _, err = k.podExec(podName, ovsCleanupCmd, "kube-system") + if err != nil { + logrus.Errorf("ovs cleanup failed with err: %+v", err) + } + + linkCleanupCmd := `ifconfig | grep vport | cut -d " " -f 1 | xargs -rt -I % ip link delete %` + _, err = k.podExec(podName, linkCleanupCmd, "kube-system") + if err != nil { + logrus.Errorf("link cleanup failed with err: %+v", err) + } +} + +func (k *kubePod) runCommandUntilNoNetmasterError() error { + if !k.isMaster() { + return nil + } + logrus.Infof("Checking for netmaster status on node: %s", k.node.Name()) + podName, err := getPodName("netmaster", k.node.Name()) + if err != nil { + logrus.Errorf("pod not found: %+v", err) + return err + } + + processCheckCmd := `kubectl -n kube-system exec ` + podName + ` -- pgrep netmaster` + return k8sMaster.runCommandUntilNoError(processCheckCmd) +} + +func (k *kubePod) runCommandUntilNoNetpluginError() error { + if k.isMaster() { + return nil + } + logrus.Infof("Checking for netplugin status on: %s", k.node.Name()) + podName, err := getPodName("netplugin", k.node.Name()) + if err != nil { + logrus.Errorf("pod not found: %+v", err) + return err + } + + processCheckCmd := `kubectl -n kube-system exec ` + podName + ` -- pgrep netplugin` + return k8sMaster.runCommandUntilNoError(processCheckCmd) +} + +func (k *kubePod) rotateNetmasterLog() error { + if k.isMaster() { + return k.rotateLog("netmaster") + } + return nil +} + +func (k *kubePod) rotateNetpluginLog() error { + if !k.isMaster() { + return k.rotateLog("netplugin") + } + return nil +} + +func (k *kubePod) checkForNetpluginErrors() error { + if k.isMaster() { + return nil + } + + podName, err := getPodName("netplugin", k.node.Name()) + if err != nil { + logrus.Errorf("pod not found: %+v", err) + return err + } + + // NOTE: Checking for error here could result in Error code: 123 + // Err code 123 might be the case when grep results in no output + fatalCheckCmd := `ls /var/contiv/log/net* | xargs -r -I % grep --text -A 5 "panic\|fatal" %` + out, _ := k.podExec(podName, fatalCheckCmd, "kube-system") + if out != "" { + errStr := fmt.Sprintf("fatal error in netplugin logs on %s\n", k.node.Name()) + logrus.Errorf(errStr) + fmt.Printf("%s\n==========================================\n", out) + return errors.New(errStr) + } + + errCheckCmd := `ls /var/contiv/log/net* | xargs -r -I {} grep --text "error" {}` + out, _ = k.podExec(podName, errCheckCmd, "kube-system") + if out != "" { + logrus.Errorf("error output in netplugin logs on %s: \n", k.node.Name()) + fmt.Printf("%s==========================================\n\n", out) + // FIXME: We still have some tests that are failing error check + // return fmt.Errorf("error output in netplugin logs") + } + + return nil +} + +func (k *kubePod) rotateLog(processName string) error { + podName, err := getPodName(processName, k.node.Name()) + if err != nil { + logrus.Errorf("pod not found: %+v", err) + return err + } + + oldLogFile := fmt.Sprintf("/var/contiv/log/%s.log", processName) + newLogFilePrefix := fmt.Sprintf("/var/contiv/log/_%s", processName) + rotateLogCmd := `echo` + " `date +%s` " + `| xargs -I {} mv ` + oldLogFile + ` ` + newLogFilePrefix + `-{}.log` + _, err = k.podExec(podName, rotateLogCmd, "kube-system") + return err +} + +func (k *kubePod) checkConnectionRetry(c *container, ipaddr, protocol string, port, delay, retries int) error { + var protoStr string + var err error + + err = nil + + if protocol == "udp" { + protoStr = "-u" + } + + logrus.Infof("Checking connection from %s to ip %s on port %d, delay: %d, retries: %d", + c, ipaddr, port, delay, retries) + + for i := 0; i < retries; i++ { + + _, err = k.exec(c.containerID, fmt.Sprintf("nc -z -n -v -w 1 %s %s %v", protoStr, ipaddr, port)) + if err == nil { + logrus.Infof("Connection to ip %s on port %d SUCCEEDED, tries: %d", ipaddr, port, i+1) + return nil + } + time.Sleep(2 * time.Second) + } + + logrus.Errorf("Connection to ip %s on port %d FAILED %v", ipaddr, port, err) + return err +} + +func (k *kubePod) checkNoConnectionRetry(c *container, ipaddr, protocol string, port, delay, retries int) error { + logrus.Infof("Expecting connection to fail from %v to %s on port %d", c, ipaddr, port) + + if err := k.checkConnectionRetry(c, ipaddr, protocol, port, delay, retries); err != nil { + return nil + } + + return fmt.Errorf("Connection SUCCEEDED on port %d from %s from %s when it should have FAILED.", port, ipaddr, c) +} + +func (k *kubePod) checkPing6WithCount(c *container, ipaddr string, count int) error { + logrus.Infof("Checking ping6 from %v to %s", c, ipaddr) + cmd := fmt.Sprintf("ping6 -c %d %s", count, ipaddr) + out, err := k.exec(c.containerID, cmd) + + if err != nil || strings.Contains(out, "0 received, 100% packet loss") { + errStr := fmt.Sprintf("Ping6 from %s to %s FAILED: %q - %v", c, ipaddr, out, err) + logrus.Errorf(errStr) + return errors.New(errStr) + } + + logrus.Infof("Ping6 from %s to %s SUCCEEDED", c, ipaddr) + return nil +} + +func (k *kubePod) checkPingWithCount(c *container, ipaddr string, count int) error { + logrus.Infof("Checking ping from %s to %s", c, ipaddr) + cmd := fmt.Sprintf("ping -c %d %s", count, ipaddr) + out, err := k.exec(c.containerID, cmd) + + if err != nil || strings.Contains(out, "0 received, 100% packet loss") { + errStr := fmt.Sprintf("Ping from %s to %s FAILED: %q - %v", c, ipaddr, out, err) + logrus.Errorf(errStr) + return errors.New(errStr) + } + + logrus.Infof("Ping from %s to %s SUCCEEDED", c, ipaddr) + return nil +} +func (k *kubePod) checkSchedulerNetworkCreated(nwName string, expectedOp bool) error { + return nil +} + +func (k *kubePod) checkSchedulerNetworkOnNodeCreated(nwName []string, n *node) error { + return nil +} + +func (k *kubePod) waitForListeners() error { + if k.isMaster() { + return nil + } + return k.node.runCommandWithTimeOut("netstat -tlpn | grep 9090 | grep LISTEN", 500*time.Millisecond, 50*time.Second) +} + +func (k *kubePod) verifyAgents(agentIPs map[string]bool) (string, error) { + if !k.isMaster() { + return "", nil + } + + var data interface{} + actAgents := make(map[string]uint32) + + // read vtep information from inspect + cmd := "curl -s localhost:9999/debug/ofnet | python -mjson.tool" + str, err := k.node.tbnode.RunCommandWithOutput(cmd) + if err != nil { + return "", err + } + + err = json.Unmarshal([]byte(str), &data) + if err != nil { + logrus.Errorf("Unmarshal error: %v", err) + return str, err + } + + dd := data.(map[string]interface{}) + adb := dd["AgentDb"].(map[string]interface{}) + for key := range adb { + actAgents[key] = 1 + } + + // build expected agentRpc + rpcSet := []string{":9002", ":9003"} + expAgents := make(map[string]uint32) + for agent := range agentIPs { + for _, rpc := range rpcSet { + k := agent + rpc + expAgents[k] = 1 + } + } + + for agent := range expAgents { + _, found := actAgents[agent] + if !found { + return str, errors.New("Agent " + agent + " not found") + } + } + + // verify there are no extraneous Agents + for agent := range actAgents { + _, found := expAgents[agent] + if !found { + return str, errors.New("Unexpected Agent " + agent + " found on " + k.node.Name()) + } + } + + return "", nil +} + +func (k *kubePod) verifyVTEPs(expVTEPS map[string]bool) (string, error) { + var data interface{} + actVTEPs := make(map[string]uint32) + if k.isMaster() { + return "", nil + } + // read vtep information from inspect + cmd := "curl -s localhost:9090/inspect/driver | python -mjson.tool" + str, err := k.node.tbnode.RunCommandWithOutput(cmd) + if err != nil { + return "", err + } + + err = json.Unmarshal([]byte(str), &data) + if err != nil { + logrus.Errorf("Unmarshal error: %v", err) + return str, err + } + + drvInfo := data.(map[string]interface{}) + vx, found := drvInfo["vxlan"] + if !found { + errStr := fmt.Sprintf("vxlan not found in driver info") + logrus.Errorf(errStr) + return str, errors.New(errStr) + } + + vt := vx.(map[string]interface{}) + v, found := vt["VtepTable"] + if !found { + errStr := fmt.Sprintf("VtepTable not found in driver info") + logrus.Errorf(errStr) + return str, errors.New(errStr) + } + + vteps := v.(map[string]interface{}) + for key := range vteps { + actVTEPs[key] = 1 + } + + // read local ip + localVtep := "" + l, found := vt["LocalIp"] + if found { + switch l.(type) { + case string: + localVtep = l.(string) + actVTEPs[localVtep] = 1 + } + } + + for vtep := range expVTEPS { + _, found := actVTEPs[vtep] + if !found { + return str, errors.New("VTEP " + vtep + " not found") + } + } + + // verify there are no extraneous VTEPs + for vtep := range actVTEPs { + _, found := expVTEPS[vtep] + if !found { + return str, errors.New("Unexpected VTEP " + vtep + " found on " + localVtep) + } + } + + return "", nil +} + +func (k *kubePod) verifyEPs(epList []string) (string, error) { + // read ep information from inspect + if k.isMaster() { + return "", nil + } + cmd := "curl -s localhost:9090/inspect/driver | python -mjson.tool" + str, err := k.node.tbnode.RunCommandWithOutput(cmd) + if err != nil { + return "", err + } + + for _, ep := range epList { + if !strings.Contains(str, ep) { + return str, errors.New(ep + " not found on " + k.node.Name()) + } + } + + return "", nil +} + +//FIXME: This needs to be moved to node abstraction implmentation Once +//that change is in. +func (k *kubePod) reloadNode(n *node) error { + + if n.Name() == k8sMasterNode { + return nil + } + + logrus.Infof("Reloading node %s", n.Name()) + + topDir := os.Getenv("GOPATH") + topDir = strings.Split(topDir, ":")[1] + cmd := exec.Command("vagrant", "reload", n.Name()) + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "VAGRANT_CWD="+topDir+"/src/github.com/contiv/netplugin/vagrant/k8s/") + cmd.Env = append(cmd.Env, "CONTIV_K8S_USE_KUBEADM=1") + out, err := cmd.CombinedOutput() + + if err != nil { + logrus.Errorf("Error reloading node %s. Err: %v\n Output: %s", n.Name(), err, string(out)) + return err + } + + logrus.Infof("Reloaded node %s. Output:\n%s", n.Name(), string(out)) + return nil +} + +func (k *kubePod) getMasterIP() (string, error) { + return k8sMaster.getIPAddr("eth1") +} + +func (k *kubePod) verifyUplinkState(n *node, uplinks []string) error { + var err error + var portName string + var cmd, output string + + if n.Name() == k8sMasterNode { + return nil + } + + if len(uplinks) > 1 { + portName = "uplinkPort" + } else { + portName = uplinks[0] + } + + // Verify port state + cmd = fmt.Sprintf("sudo ovs-vsctl find Port name=%s", portName) + output, err = n.runCommand(cmd) + if err != nil || !(strings.Contains(string(output), portName)) { + err = fmt.Errorf("Lookup failed for uplink Port %s. Err: %+v", portName, err) + return err + } + + // Verify Interface state + for _, uplink := range uplinks { + cmd = fmt.Sprintf("sudo ovs-vsctl find Interface name=%s", uplink) + output, err = n.runCommand(cmd) + if err != nil || !(strings.Contains(string(output), uplink)) { + err = fmt.Errorf("Lookup failed for uplink interface %s for uplink cfg:%+v. Err: %+v", uplink, uplinks, err) + return err + } + } + + return err +} diff --git a/test/systemtests/netprofile_test.go b/test/systemtests/netprofile_test.go index b3d19da08..f1a371308 100644 --- a/test/systemtests/netprofile_test.go +++ b/test/systemtests/netprofile_test.go @@ -565,7 +565,7 @@ func (s *systemtestSuite) testNetprofileTriggerNetpluginRestart(c *C, encap stri //the netmaster is back up and running. func (s *systemtestSuite) TestNetprofileUpdateNetmasterSwitchover(c *C) { - if s.basicInfo.Scheduler == "k8" { + if s.basicInfo.Scheduler == kubeScheduler { return } diff --git a/test/systemtests/network_test.go b/test/systemtests/network_test.go index ae62f6370..9701ab382 100755 --- a/test/systemtests/network_test.go +++ b/test/systemtests/network_test.go @@ -12,14 +12,14 @@ import ( ) func (s *systemtestSuite) TestInfraNetworkAddDeleteVXLAN(c *C) { - if s.basicInfo.Scheduler == "k8" { + if s.basicInfo.Scheduler == kubeScheduler { return } s.testInfraNetworkAddDelete(c, "vxlan") } func (s *systemtestSuite) TestInfraNetworkAddDeleteVLAN(c *C) { - if s.basicInfo.Scheduler == "k8" { + if s.basicInfo.Scheduler == kubeScheduler { return } s.testInfraNetworkAddDelete(c, "vlan") @@ -200,6 +200,7 @@ func (s *systemtestSuite) TestNetworkAddDeleteNoGatewayVXLAN(c *C) { func (s *systemtestSuite) TestNetworkAddDeleteNoGatewayVLAN(c *C) { s.testNetworkAddDeleteNoGateway(c, "vlan") } + func (s *systemtestSuite) testNetworkAddDeleteNoGateway(c *C, encap string) { if s.fwdMode == "routing" && encap == "vlan" { @@ -380,6 +381,7 @@ func (s *systemtestSuite) TestNetworkAddDeleteTenantFwdModeChangeVXLAN(c *C) { for i := 0; i < s.basicInfo.Iterations; i++ { s.testNetworkAddDeleteTenant(c, "vxlan", fwdMode) if fwdMode == "routing" { + c.Assert(s.TearDownDefaultNetwork(), IsNil) c.Assert(s.cli.GlobalPost(&client.Global{FwdMode: "bridge", Name: "global", NetworkInfraType: "default", @@ -389,9 +391,12 @@ func (s *systemtestSuite) TestNetworkAddDeleteTenantFwdModeChangeVXLAN(c *C) { PvtSubnet: "172.19.0.0/16", }), IsNil) time.Sleep(60 * time.Second) + c.Assert(s.SetupDefaultNetwork(), IsNil) + s.testNetworkAddDeleteTenant(c, "vxlan", "bridge") fwdMode = "bridge" } else { + c.Assert(s.TearDownDefaultNetwork(), IsNil) c.Assert(s.cli.GlobalPost(&client.Global{FwdMode: "routing", Name: "global", NetworkInfraType: "default", @@ -401,6 +406,8 @@ func (s *systemtestSuite) TestNetworkAddDeleteTenantFwdModeChangeVXLAN(c *C) { PvtSubnet: "172.19.0.0/16", }), IsNil) time.Sleep(60 * time.Second) + c.Assert(s.SetupDefaultNetwork(), IsNil) + s.testNetworkAddDeleteTenant(c, "vxlan", "routing") fwdMode = "routing" } diff --git a/test/systemtests/node_test.go b/test/systemtests/node_test.go index 032afe419..208ace8ab 100755 --- a/test/systemtests/node_test.go +++ b/test/systemtests/node_test.go @@ -138,6 +138,7 @@ func (n *node) checkForNetpluginErrors() error { func (n *node) runCommandWithTimeOut(cmd string, tick, timeout time.Duration) error { runCmd := func() (string, bool) { + logrus.Debugf("Running cmd: %s", cmd) if err := n.tbnode.RunCommand(cmd); err != nil { return "", false } diff --git a/test/systemtests/policy_test.go b/test/systemtests/policy_test.go index 329c413f0..5092f3656 100755 --- a/test/systemtests/policy_test.go +++ b/test/systemtests/policy_test.go @@ -389,7 +389,7 @@ func (s *systemtestSuite) testPolicyFeatures(c *C, encap string) { NetworkName: "private", Subnet: "10.1.0.0/16", Gateway: "10.1.1.254", - PktTag: 1, + PktTag: 10, Encap: encap, } c.Assert(s.cli.NetworkPost(network), IsNil) @@ -398,7 +398,7 @@ func (s *systemtestSuite) testPolicyFeatures(c *C, encap string) { NetworkName: "dummy", Subnet: "20.1.0.0/16", Gateway: "20.1.1.254", - PktTag: 2, + PktTag: 20, Encap: encap, } c.Assert(s.cli.NetworkPost(dummyNet), IsNil) diff --git a/test/systemtests/trigger_test.go b/test/systemtests/trigger_test.go index 43926f721..f0c9606d4 100755 --- a/test/systemtests/trigger_test.go +++ b/test/systemtests/trigger_test.go @@ -68,7 +68,7 @@ func (s *systemtestSuite) TestTriggerNetpluginUplinkUpgrade(c *C) { func (s *systemtestSuite) TestTriggerNetmasterSwitchover(c *C) { - if s.basicInfo.Scheduler == "k8" { + if s.basicInfo.Scheduler == kubeScheduler { return } @@ -246,8 +246,8 @@ func (s *systemtestSuite) TestTriggerNetpluginDisconnect(c *C) { } func (s *systemtestSuite) TestTriggerNodeReload(c *C) { - // can not run this test on docker 1.10 & k8s - if s.basicInfo.Scheduler == "k8" { + // can not run this test on k8s + if s.basicInfo.Scheduler == kubeScheduler { c.Skip("Skipping node reload test for k8s") } @@ -288,7 +288,7 @@ func (s *systemtestSuite) TestTriggerNodeReload(c *C) { // reload VMs one at a time for _, node := range s.nodes { - if s.basicInfo.Scheduler == "k8" && node.Name() == "k8master" { + if s.basicInfo.Scheduler == kubeScheduler && node.Name() == "k8master" { continue } c.Assert(node.reloadNode(), IsNil) @@ -390,7 +390,7 @@ func (s *systemtestSuite) TestTriggerNetPartition(c *C) { // reload VMs one at a time for _, node := range s.nodes { - if s.basicInfo.Scheduler == "k8" && node.Name() == "k8master" { + if s.basicInfo.Scheduler == kubeScheduler && node.Name() == "k8master" { continue } nodeIP, err := node.getIPAddr("eth1") diff --git a/test/systemtests/util_test.go b/test/systemtests/util_test.go index dfbd64d15..8a7bb09be 100755 --- a/test/systemtests/util_test.go +++ b/test/systemtests/util_test.go @@ -956,7 +956,7 @@ func (s *systemtestSuite) startListenersOnProviders(containers []*container, por func (s *systemtestSuite) getVTEPList() (map[string]bool, error) { vtepMap := make(map[string]bool) for _, n := range s.nodes { - if s.basicInfo.Scheduler == "k8" && n.Name() == "k8master" { + if s.basicInfo.Scheduler == kubeScheduler && n.Name() == "k8master" { continue } vtep, err := n.getIPAddr(s.hostInfo.HostMgmtInterface) @@ -1054,9 +1054,9 @@ func (s *systemtestSuite) verifyVTEPs() error { time.Sleep(1 * time.Second) } - logrus.Errorf("Node %s failed to verify all VTEPs", failNode) + logrus.Errorf("Node %s failed to verify all VTEPs. ERR: %v", failNode, err) logrus.Infof("Debug output:\n %s", dbgOut) - return errors.New("Failed to verify VTEPs after 20 sec") + return errors.New("Failed to verify VTEPs after 60 sec") } func (s *systemtestSuite) verifyEPs(containers []*container) error { @@ -1197,9 +1197,9 @@ func (s *systemtestSuite) SetUpSuiteBaremetal(c *C) { node.suite = s switch s.basicInfo.Scheduler { - case "k8": + case kubeScheduler: node.exec = s.NewK8sExec(node) - case "swarm": + case swarmScheduler: node.exec = s.NewSwarmExec(node) default: node.exec = s.NewDockerExec(node) @@ -1240,7 +1240,7 @@ func (s *systemtestSuite) SetUpSuiteVagrant(c *C) { if s.fwdMode == "routing" { contivL3Nodes := 2 switch s.basicInfo.Scheduler { - case "k8": + case kubeScheduler: topDir := os.Getenv("GOPATH") //topDir contains the godeps path. hence purging the gopath topDir = strings.Split(topDir, ":")[1] @@ -1248,7 +1248,11 @@ func (s *systemtestSuite) SetUpSuiteVagrant(c *C) { contivNodes = 4 // 3 contiv nodes + 1 k8master c.Assert(s.vagrant.Setup(false, []string{"CONTIV_L3=1 VAGRANT_CWD=" + topDir + "/src/github.com/contiv/netplugin/vagrant/k8s/"}, contivNodes), IsNil) - case "swarm": + // Sleep to give enough time for the netplugin pods to come up + logrus.Infof("Sleeping for 1 minute for pods to come up") + time.Sleep(time.Minute) + + case swarmScheduler: c.Assert(s.vagrant.Setup(false, append([]string{"CONTIV_NODES=3 CONTIV_L3=1"}, s.basicInfo.SwarmEnv), contivNodes+contivL3Nodes), IsNil) default: c.Assert(s.vagrant.Setup(false, []string{"CONTIV_NODES=3 CONTIV_L3=1"}, contivNodes+contivL3Nodes), IsNil) @@ -1257,7 +1261,7 @@ func (s *systemtestSuite) SetUpSuiteVagrant(c *C) { } else { switch s.basicInfo.Scheduler { - case "k8": + case kubeScheduler: contivNodes = contivNodes + 1 //k8master topDir := os.Getenv("GOPATH") @@ -1271,7 +1275,11 @@ func (s *systemtestSuite) SetUpSuiteVagrant(c *C) { c.Assert(s.vagrant.Setup(false, []string{"VAGRANT_CWD=" + topDir + "/src/github.com/contiv/netplugin/vagrant/k8s/"}, contivNodes), IsNil) - case "swarm": + // Sleep to give enough time for the netplugin pods to come up + logrus.Infof("Sleeping for 1 minute for pods to come up") + time.Sleep(time.Minute) + + case swarmScheduler: c.Assert(s.vagrant.Setup(false, append([]string{}, s.basicInfo.SwarmEnv), contivNodes), IsNil) default: c.Assert(s.vagrant.Setup(false, []string{}, contivNodes), IsNil) @@ -1288,9 +1296,13 @@ func (s *systemtestSuite) SetUpSuiteVagrant(c *C) { node.tbnode = nodeObj node.suite = s switch s.basicInfo.Scheduler { - case "k8": - node.exec = s.NewK8sExec(node) - case "swarm": + case kubeScheduler: + if s.basicInfo.InstallMode == kubeadmInstall { + node.exec = s.NewK8sPodExec(node) + } else { + node.exec = s.NewK8sExec(node) + } + case swarmScheduler: node.exec = s.NewSwarmExec(node) default: node.exec = s.NewDockerExec(node) @@ -1342,7 +1354,7 @@ func (s *systemtestSuite) SetUpTestBaremetal(c *C) { } time.Sleep(5 * time.Second) - if s.basicInfo.Scheduler != "k8" { + if s.basicInfo.Scheduler != kubeScheduler { for i := 0; i < 11; i++ { _, err := s.cli.TenantGet("default") if err == nil { @@ -1412,7 +1424,8 @@ func (s *systemtestSuite) SetUpTestVagrant(c *C) { } time.Sleep(5 * time.Second) - if s.basicInfo.Scheduler != "k8" { + + if s.basicInfo.Scheduler != kubeScheduler { for i := 0; i < 21; i++ { _, err := s.cli.TenantGet("default") @@ -1425,6 +1438,10 @@ func (s *systemtestSuite) SetUpTestVagrant(c *C) { } } + if s.basicInfo.Scheduler == kubeScheduler { + c.Assert(s.SetupDefaultNetwork(), IsNil) + } + if s.fwdMode == "routing" { c.Assert(s.cli.GlobalPost(&client.Global{FwdMode: "routing", Name: "global", @@ -1441,6 +1458,9 @@ func (s *systemtestSuite) SetUpTestVagrant(c *C) { func (s *systemtestSuite) verifyHostRoutes(routes []string, expect bool) error { for _, n := range s.nodes { + if s.basicInfo.Scheduler == kubeScheduler && n.Name() == "k8master" { + continue + } out, err := n.runCommand("ip route") if err != nil { logrus.Errorf("Error getting routes: %v", err) @@ -1459,6 +1479,7 @@ func (s *systemtestSuite) verifyHostRoutes(routes []string, expect bool) error { return nil } + func (s *systemtestSuite) verifyHostPing(containers []*container) error { for _, c := range containers { @@ -1470,6 +1491,7 @@ func (s *systemtestSuite) verifyHostPing(containers []*container) error { return nil } + func (s *systemtestSuite) IsolationTest(containers []*container) error { for _, c := range containers { err := c.node.exec.checkPingFailure(c, "172.19.255.254") @@ -1480,3 +1502,37 @@ func (s *systemtestSuite) IsolationTest(containers []*container) error { return nil } + +func (s *systemtestSuite) TearDownDefaultNetwork() error { + if s.basicInfo.Scheduler != kubeScheduler { + return nil + } + + err := s.cli.NetworkDelete("default", "default-net") + if err != nil { + logrus.Errorf("default-net not deleted. Err: %+v", err) + return err + } + time.Sleep(time.Second) + return nil +} + +func (s *systemtestSuite) SetupDefaultNetwork() error { + if s.basicInfo.Scheduler != kubeScheduler { + return nil + } + + err := s.cli.NetworkPost(&client.Network{ + TenantName: "default", + NetworkName: "default-net", + Subnet: "100.10.1.0/24", + Gateway: "100.10.1.254", + Encap: "vxlan", + }) + if err != nil { + logrus.Errorf("default-net not created. Err: %+v", err) + return err + } + time.Sleep(time.Second) + return nil +} diff --git a/vagrant/k8s/Vagrantfile b/vagrant/k8s/Vagrantfile index 767d4a41f..0ab000d78 100755 --- a/vagrant/k8s/Vagrantfile +++ b/vagrant/k8s/Vagrantfile @@ -7,15 +7,14 @@ require "rubygems" require "json" require 'fileutils' -# netplugin_synced_gopath="/opt/golang" gopath_folder="/opt/gopath" +master_ip = "192.168.2.10" http_proxy = ENV['HTTP_PROXY'] || ENV['http_proxy'] || '' https_proxy = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || '' # python -c 'import random; print "%0x.%0x" % (random.SystemRandom().getrandbits(3*8), random.SystemRandom().getrandbits(8*8)) token = "d900e1.8a392798f13b33a4" -master_ip = "192.168.2.10" # method to create an etc_hosts file based on the cluster info def create_etc_hosts(cluster) @@ -56,8 +55,8 @@ tar xf netplugin-$contiv_version.tar.bz2 netctl rm -f netplugin-$contiv_version.tar.bz2 chmod +x netctl mv netctl /usr/bin/ -kubectl get deployment/kube-dns -n kube-system -o json > kube-dns.yaml -kubectl delete deployment/kube-dns -n kube-system +kubectl get deployment/kube-dns -n kube-system -o json > /shared/kube-dns.yaml +kubectl delete deployment -n kube-system kube-dns SCRIPT # method to read the cluster config file @@ -81,26 +80,18 @@ echo 'export GOSRC=$GOPATH/src' >> /etc/profile.d/envvar.sh echo 'export PATH=$PATH:/usr/local/go/bin:$GOBIN' >> /etc/profile.d/envvar.sh echo "export http_proxy='$1'" >> /etc/profile.d/envvar.sh echo "export https_proxy='$1'" >> /etc/profile.d/envvar.sh -echo "export no_proxy='k8master,192.168.2.10,192.168.2.11,192.168.2.12,netmaster,localhost,127.0.0.1'" >> /etc/profile.d/envvar.sh -echo "export no_proxy='k8master,192.168.2.10,192.168.2.11,192.168.2.12,netmaster,localhost,127.0.0.1'" >> ~/.profile +echo "export no_proxy='k8master,192.168.2.10,192.168.2.11,192.168.2.12,192.168.2.13,netmaster,localhost,127.0.0.1'" >> /etc/profile.d/envvar.sh +echo "export no_proxy='k8master,192.168.2.10,192.168.2.11,192.168.2.12,192.168.2.13,netmaster,localhost,127.0.0.1'" >> ~/.profile source /etc/profile.d/envvar.sh # Change ownership for gopath folder -chown vagrant #{gopath_folder} +chown -R vagrant #{gopath_folder} sudo yum install -y net-tools -SCRIPT - -provision_master = <