diff --git a/.gitignore b/.gitignore index 839823718..eb4fb23d8 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,6 @@ env.sh *.log test/system/venv/ test/system/run-test-image.yaml* +test/system/templates/*.yaml +*.pyc +.vscode/launch.json diff --git a/Dockerfile.test b/Dockerfile.test index a0783c848..ed4fcf639 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -1,4 +1,4 @@ -FROM iad.ocir.io/oracle/oci-volume-provisioner-system-test:1.0.0 +FROM iad.ocir.io/oracle/oci-volume-provisioner-system-test:1.0.2 COPY dist /dist COPY manifests /manifests diff --git a/Gopkg.toml b/Gopkg.toml index 9efa44d40..0083b064e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -48,7 +48,7 @@ ignored = ["k8s.io/client-go/pkg/api/v1"] [[override]] name = "github.com/docker/distribution" -revision = "f0cc927784781fa395c06317c58dea2841ece3a9" # Lock in to version 2.6.3 when it is released +revision = "f0cc927784781fa395c06317c58dea2841ece3a9" # Lock in to version 2.6.3 when it is released [[override]] name = "k8s.io/api" diff --git a/Makefile b/Makefile index b0d695c18..6638c6f0f 100644 --- a/Makefile +++ b/Makefile @@ -59,6 +59,8 @@ test: build: ${DIR}/${BIN} sed 's#@VERSION@#${VERSION}#g; s#@IMAGE@#${IMAGE}#g' \ manifests/oci-volume-provisioner.yaml > $(DIR)/oci-volume-provisioner.yaml + sed 's#@VERSION@#${VERSION}#g; s#@IMAGE@#${IMAGE}#g' \ + manifests/oci-volume-provisioner-fss.yaml > $(DIR)/oci-volume-provisioner-fss.yaml cp manifests/storage-class.yaml $(DIR)/storage-class.yaml cp manifests/storage-class-ext3.yaml $(DIR)/storage-class-ext3.yaml cp manifests/oci-volume-provisioner-rbac.yaml $(DIR)/oci-volume-provisioner-rbac.yaml diff --git a/cmd/main.go b/cmd/main.go index e9e63e0d3..3bb180faf 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -26,17 +26,15 @@ import ( "github.com/oracle/oci-volume-provisioner/pkg/provisioner/core" "github.com/oracle/oci-volume-provisioner/pkg/signals" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - - "k8s.io/apimachinery/pkg/api/resource" ) const ( resyncPeriod = 15 * time.Second minResyncPeriod = 12 * time.Hour - provisionerName = "oracle.com/oci" exponentialBackOffOnError = false failedRetryThreshold = 5 leasePeriod = controller.DefaultLeaseDuration @@ -56,7 +54,7 @@ func informerResyncPeriod(minResyncPeriod time.Duration) func() time.Duration { func main() { syscall.Umask(0) - + rand.Seed(time.Now().Unix()) kubeconfig := flag.String("kubeconfig", "", "Path to Kubeconfig file with authorization and master location information.") volumeRoundingEnabled := flag.Bool("rounding-enabled", true, "When enabled volumes will be rounded up if less than 'minVolumeSizeMB'") minVolumeSize := flag.String("min-volume-size", "50Gi", "The minimum size for a block volume. By default OCI only supports block volumes > 50GB") @@ -92,6 +90,14 @@ func main() { glog.Fatal("env variable NODE_NAME must be set so that this provisioner can identify itself") } + // Decides what type of provider to deploy, either block or fss + provisionerType := os.Getenv("PROVISIONER_TYPE") + if provisionerType == "" { + provisionerType = core.ProvisionerNameBlock + } + + glog.Infof("Starting volume provisioner in %s mode", provisionerType) + sharedInformerFactory := informers.NewSharedInformerFactory(clientset, informerResyncPeriod(minResyncPeriod)()) volumeSizeLowerBound, err := resource.ParseQuantity(*minVolumeSize) @@ -101,13 +107,15 @@ func main() { // Create the provisioner: it implements the Provisioner interface expected by // the controller - ociProvisioner := core.NewOCIProvisioner(clientset, sharedInformerFactory.Core().V1().Nodes(), nodeName, *volumeRoundingEnabled, volumeSizeLowerBound) - + ociProvisioner, err := core.NewOCIProvisioner(clientset, sharedInformerFactory.Core().V1().Nodes(), provisionerType, nodeName, *volumeRoundingEnabled, volumeSizeLowerBound) + if err != nil { + glog.Fatalf("Cannot create volume provisioner %v", err) + } // Start the provision controller which will dynamically provision oci // PVs pc := controller.NewProvisionController( clientset, - provisionerName, + provisionerType, ociProvisioner, serverVersion.GitVersion, controller.ResyncPeriod(resyncPeriod), diff --git a/examples/example-claim-ffsw.template b/examples/example-claim-ffsw.template deleted file mode 100644 index 5cb0b9907..000000000 --- a/examples/example-claim-ffsw.template +++ /dev/null @@ -1,11 +0,0 @@ -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: demooci-ffsw-{{TEST_ID}} -spec: - storageClassName: "ffsw" - accessModes: - - ReadWriteMany - resources: - requests: - storage: 50Gi diff --git a/examples/example-claim-fss.yaml b/examples/example-claim-fss.yaml new file mode 100644 index 000000000..f0e513abd --- /dev/null +++ b/examples/example-claim-fss.yaml @@ -0,0 +1,14 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: demooci +spec: + storageClassName: "oci-fss" + selector: + matchLabels: + failure-domain.beta.kubernetes.io/zone: "PHX-AD-1" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi diff --git a/examples/example-pod-fss.yaml b/examples/example-pod-fss.yaml new file mode 100644 index 000000000..72515a2a1 --- /dev/null +++ b/examples/example-pod-fss.yaml @@ -0,0 +1,18 @@ +kind: Pod +apiVersion: v1 +metadata: + name: ocidemo-fss +spec: + volumes: + - name: nginx + persistentVolumeClaim: + claimName: fss-pvc + containers: + - name: task-pv-container + image: nginx + ports: + - containerPort: 80 + name: "http-server" + volumeMounts: + - mountPath: "/usr/share/nginx/html" + name: task-pv-storage diff --git a/hack/check-golint.sh b/hack/check-golint.sh index 6d6ec6cba..17b40516d 100755 --- a/hack/check-golint.sh +++ b/hack/check-golint.sh @@ -21,9 +21,8 @@ set -o nounset set -o pipefail TARGETS=$(for d in "$@"; do echo ./$d/...; done) - echo -n "Checking golint: " -ERRS=$(golint ${TARGETS} 2>&1 || true) +ERRS=$(golint ${TARGETS} 2>&1 | grep -v mock_interfaces.go || true) if [ -n "${ERRS}" ]; then echo "FAIL" echo "${ERRS}" diff --git a/manifests/oci-volume-provisioner-fss.yaml b/manifests/oci-volume-provisioner-fss.yaml new file mode 100644 index 000000000..a4fd4f032 --- /dev/null +++ b/manifests/oci-volume-provisioner-fss.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: oci-volume-provisioner-fss + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + app: oci-volume-provisioner + spec: + serviceAccountName: oci-volume-provisioner + containers: + - name: oci-volume-provisioner + image: @IMAGE@:@VERSION@ + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: PROVISIONER_TYPE + value: oracle.com/oci-fss + volumeMounts: + - name: config + mountPath: /etc/oci/ + readOnly: true + volumes: + - name: config + secret: + secretName: oci-volume-provisioner diff --git a/manifests/oci-volume-provisioner.yaml b/manifests/oci-volume-provisioner.yaml index a42359f11..052169fa8 100644 --- a/manifests/oci-volume-provisioner.yaml +++ b/manifests/oci-volume-provisioner.yaml @@ -19,6 +19,8 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: PROVISIONER_TYPE + value: oracle.com/oci volumeMounts: - name: config mountPath: /etc/oci/ diff --git a/pkg/oci/client/client.go b/pkg/oci/client/client.go index 50b310a60..0907c79d4 100644 --- a/pkg/oci/client/client.go +++ b/pkg/oci/client/client.go @@ -28,23 +28,26 @@ import ( "time" "github.com/golang/glog" - "github.com/pkg/errors" - "github.com/oracle/oci-go-sdk/common" "github.com/oracle/oci-go-sdk/common/auth" "github.com/oracle/oci-go-sdk/core" + "github.com/oracle/oci-go-sdk/filestorage" "github.com/oracle/oci-go-sdk/identity" + "github.com/pkg/errors" + "github.com/oracle/oci-volume-provisioner/pkg/oci/instancemeta" ) // ProvisionerClient wraps the OCI sub-clients required for volume provisioning. type provisionerClient struct { - cfg *Config - blockStorage *core.BlockstorageClient - identity *identity.IdentityClient - context context.Context - timeout time.Duration - metadata *instancemeta.InstanceMetadata + cfg *Config + blockStorage *core.BlockstorageClient + identity *identity.IdentityClient + fileStorage *filestorage.FileStorageClient + virtualNetwork *core.VirtualNetworkClient + context context.Context + timeout time.Duration + metadata *instancemeta.InstanceMetadata } // BlockStorage specifies the subset of the OCI core API utilised by the provisioner. @@ -59,10 +62,28 @@ type Identity interface { ListAvailabilityDomains(ctx context.Context, request identity.ListAvailabilityDomainsRequest) (response identity.ListAvailabilityDomainsResponse, err error) } +// FSS specifies the subset of the OCI core API utilised by the provisioner. +type FSS interface { + CreateFileSystem(ctx context.Context, request filestorage.CreateFileSystemRequest) (response filestorage.CreateFileSystemResponse, err error) + DeleteFileSystem(ctx context.Context, request filestorage.DeleteFileSystemRequest) (response filestorage.DeleteFileSystemResponse, err error) + CreateMountTarget(ctx context.Context, request filestorage.CreateMountTargetRequest) (response filestorage.CreateMountTargetResponse, err error) + CreateExport(ctx context.Context, request filestorage.CreateExportRequest) (response filestorage.CreateExportResponse, err error) + DeleteExport(ctx context.Context, request filestorage.DeleteExportRequest) (response filestorage.DeleteExportResponse, err error) + GetMountTarget(ctx context.Context, request filestorage.GetMountTargetRequest) (response filestorage.GetMountTargetResponse, err error) + ListMountTargets(ctx context.Context, request filestorage.ListMountTargetsRequest) (response filestorage.ListMountTargetsResponse, err error) +} + +//VCN specifies the subset of the OCI core API utilised by the provisioner. +type VCN interface { + GetPrivateIp(ctx context.Context, request core.GetPrivateIpRequest) (response core.GetPrivateIpResponse, err error) +} + // ProvisionerClient is passed to all sub clients to provision a volume type ProvisionerClient interface { BlockStorage() BlockStorage Identity() Identity + FSS() FSS + VCN() VCN Context() context.Context Timeout() time.Duration CompartmentOCID() string @@ -77,6 +98,14 @@ func (p *provisionerClient) Identity() Identity { return p.identity } +func (p *provisionerClient) FSS() FSS { + return p.fileStorage +} + +func (p *provisionerClient) VCN() VCN { + return p.virtualNetwork +} + func (p *provisionerClient) Context() context.Context { return p.context } @@ -119,6 +148,16 @@ func FromConfig(cfg *Config) (ProvisionerClient, error) { return nil, err } + fileStorage, err := filestorage.NewFileStorageClientWithConfigurationProvider(config) + if err != nil { + return nil, err + } + + virtualNetwork, err := core.NewVirtualNetworkClientWithConfigurationProvider(config) + if err != nil { + return nil, err + } + identity, err := identity.NewIdentityClientWithConfigurationProvider(config) if err != nil { return nil, err @@ -134,12 +173,14 @@ func FromConfig(cfg *Config) (ProvisionerClient, error) { } return &provisionerClient{ - cfg: cfg, - blockStorage: &blockStorage, - identity: &identity, - timeout: 3 * time.Minute, - context: context.Background(), - metadata: metadata, + cfg: cfg, + blockStorage: &blockStorage, + identity: &identity, + fileStorage: &fileStorage, + virtualNetwork: &virtualNetwork, + timeout: 3 * time.Minute, + context: context.Background(), + metadata: metadata, }, nil } diff --git a/pkg/provisioner/block/block_test.go b/pkg/provisioner/block/block_test.go index 5575ea629..d26a5e4af 100644 --- a/pkg/provisioner/block/block_test.go +++ b/pkg/provisioner/block/block_test.go @@ -15,18 +15,18 @@ package block import ( - "context" "fmt" "testing" "time" - "github.com/oracle/oci-volume-provisioner/pkg/oci/client" "github.com/oracle/oci-volume-provisioner/pkg/oci/instancemeta" + "github.com/oracle/oci-volume-provisioner/pkg/provisioner" "github.com/kubernetes-incubator/external-storage/lib/controller" "github.com/oracle/oci-go-sdk/common" "github.com/oracle/oci-go-sdk/core" "github.com/oracle/oci-go-sdk/identity" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,6 +35,10 @@ import ( var ( volumeBackupID = "dummyVolumeBackupId" defaultAD = identity.AvailabilityDomain{Name: common.String("PHX-AD-1"), CompartmentId: common.String("ocid1.compartment.oc1")} + fileSystemID = "dummyFileSystemId" + exportID = "dummyExportID" + serverIPs = []string{"dummyServerIP"} + privateIP = "127.0.0.1" ) func TestResolveFSTypeWhenNotConfigured(t *testing.T) { @@ -55,65 +59,6 @@ func TestResolveFSTypeWhenConfigured(t *testing.T) { } } -type mockBlockStorageClient struct { - volumeState core.VolumeLifecycleStateEnum -} - -func (c *mockBlockStorageClient) CreateVolume(ctx context.Context, request core.CreateVolumeRequest) (response core.CreateVolumeResponse, err error) { - return core.CreateVolumeResponse{Volume: core.Volume{Id: common.String(volumeBackupID)}}, nil -} - -func (c *mockBlockStorageClient) DeleteVolume(ctx context.Context, request core.DeleteVolumeRequest) (response core.DeleteVolumeResponse, err error) { - return core.DeleteVolumeResponse{}, nil -} - -func (c *mockBlockStorageClient) GetVolume(ctx context.Context, request core.GetVolumeRequest) (response core.GetVolumeResponse, err error) { - return core.GetVolumeResponse{Volume: core.Volume{LifecycleState: c.volumeState}}, nil -} - -type mockIdentityClient struct { - common.BaseClient -} - -func (client mockIdentityClient) ListAvailabilityDomains(ctx context.Context, request identity.ListAvailabilityDomainsRequest) (response identity.ListAvailabilityDomainsResponse, err error) { - return -} - -type mockProvisionerClient struct { - storage *mockBlockStorageClient -} - -func (p *mockProvisionerClient) BlockStorage() client.BlockStorage { - return p.storage -} - -func (p *mockProvisionerClient) Identity() client.Identity { - return &mockIdentityClient{} -} - -func (p *mockProvisionerClient) Context() context.Context { - return context.Background() -} - -func (p *mockProvisionerClient) Timeout() time.Duration { - return 30 * time.Second -} - -func (p *mockProvisionerClient) CompartmentOCID() (compartmentOCID string) { - return "" -} - -func (p *mockProvisionerClient) TenancyOCID() string { - return "ocid1.tenancy.oc1..aaaaaaaatyn7scrtwtqedvgrxgr2xunzeo6uanvyhzxqblctwkrpisvke4kq" -} - -// NewClientProvisioner creates an OCI client from the given configuration. -func NewClientProvisioner(pcData client.ProvisionerClient, - storage *mockBlockStorageClient, -) client.ProvisionerClient { - return &mockProvisionerClient{storage: storage} -} - func TestCreateVolumeFromBackup(t *testing.T) { // test creating a volume from an existing backup options := controller.VolumeOptions{ @@ -135,7 +80,7 @@ func TestCreateVolumeFromBackup(t *testing.T) { }} block := NewBlockProvisioner( - NewClientProvisioner(nil, &mockBlockStorageClient{volumeState: core.VolumeLifecycleStateAvailable}), + provisioner.NewClientProvisioner(nil, &provisioner.MockBlockStorageClient{VolumeState: core.VolumeLifecycleStateAvailable}), instancemeta.NewMock(&instancemeta.InstanceMetadata{ CompartmentOCID: "", Region: "phx", @@ -181,7 +126,7 @@ func TestCreateVolumeFailure(t *testing.T) { }, }} - block := NewBlockProvisioner(NewClientProvisioner(nil, &mockBlockStorageClient{volumeState: tt.state}), + block := NewBlockProvisioner(provisioner.NewClientProvisioner(nil, &provisioner.MockBlockStorageClient{VolumeState: tt.state}), instancemeta.NewMock(&instancemeta.InstanceMetadata{ CompartmentOCID: "", Region: "phx", @@ -221,7 +166,7 @@ func TestVolumeRoundingLogic(t *testing.T) { CompartmentOCID: "", Region: "phx", }) - block := NewBlockProvisioner(NewClientProvisioner(nil, &mockBlockStorageClient{volumeState: core.VolumeLifecycleStateAvailable}), + block := NewBlockProvisioner(provisioner.NewClientProvisioner(nil, &provisioner.MockBlockStorageClient{VolumeState: core.VolumeLifecycleStateAvailable}), metadata, tt.enabled, tt.minVolumeSize, diff --git a/pkg/provisioner/core/provisioner.go b/pkg/provisioner/core/provisioner.go index 6f32b8d31..70f54e5f6 100644 --- a/pkg/provisioner/core/provisioner.go +++ b/pkg/provisioner/core/provisioner.go @@ -15,14 +15,14 @@ package core import ( - "errors" "os" "strings" "time" "github.com/golang/glog" - "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/pkg/errors" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" informersv1 "k8s.io/client-go/informers/core/v1" @@ -34,10 +34,17 @@ import ( "github.com/oracle/oci-volume-provisioner/pkg/oci/client" "github.com/oracle/oci-volume-provisioner/pkg/oci/instancemeta" "github.com/oracle/oci-volume-provisioner/pkg/provisioner/block" + "github.com/oracle/oci-volume-provisioner/pkg/provisioner/fss" "github.com/oracle/oci-volume-provisioner/pkg/provisioner/plugin" ) const ( + // ProvisionerNameDefault is the name of the default OCI volume provisioner (block) + ProvisionerNameDefault = "oracle.com/oci" + // ProvisionerNameBlock is the name of the OCI block volume provisioner + ProvisionerNameBlock = "oracle.com/oci-block" + // ProvisionerNameFss is the name of the OCI FSS dedicated storage provisioner + ProvisionerNameFss = "oracle.com/oci-fss" ociProvisionerIdentity = "ociProvisionerIdentity" ociAvailabilityDomain = "ociAvailabilityDomain" ociCompartment = "ociCompartment" @@ -56,7 +63,7 @@ type OCIProvisioner struct { } // NewOCIProvisioner creates a new OCI provisioner. -func NewOCIProvisioner(kubeClient kubernetes.Interface, nodeInformer informersv1.NodeInformer, nodeName string, volumeRoundingEnabled bool, minVolumeSize resource.Quantity) *OCIProvisioner { +func NewOCIProvisioner(kubeClient kubernetes.Interface, nodeInformer informersv1.NodeInformer, provisionerType string, nodeName string, volumeRoundingEnabled bool, minVolumeSize resource.Quantity) (*OCIProvisioner, error) { configPath, ok := os.LookupEnv("CONFIG_YAML_FILENAME") if !ok { configPath = configFilePath @@ -77,19 +84,24 @@ func NewOCIProvisioner(kubeClient kubernetes.Interface, nodeInformer informersv1 if err != nil { glog.Fatalf("Unable to create volume provisioner client: %v", err) } - - blockProvisioner := block.NewBlockProvisioner(client, instancemeta.New(), - volumeRoundingEnabled, - minVolumeSize, - time.Minute*3) - + var provisioner plugin.ProvisionerPlugin + switch provisionerType { + case ProvisionerNameDefault: + provisioner = block.NewBlockProvisioner(client, instancemeta.New(), volumeRoundingEnabled, minVolumeSize, time.Minute*3) + case ProvisionerNameBlock: + provisioner = block.NewBlockProvisioner(client, instancemeta.New(), volumeRoundingEnabled, minVolumeSize, time.Minute*3) + case ProvisionerNameFss: + provisioner = fss.NewFilesystemProvisioner(client) + default: + return nil, errors.Errorf("invalid provisioner type %q", provisionerType) + } return &OCIProvisioner{ client: client, kubeClient: kubeClient, nodeLister: nodeInformer.Lister(), nodeListerSynced: nodeInformer.Informer().HasSynced, - provisioner: blockProvisioner, - } + provisioner: provisioner, + }, nil } var _ controller.Provisioner = &OCIProvisioner{} diff --git a/pkg/provisioner/core/utils.go b/pkg/provisioner/core/utils.go index 96fae286d..504f4f122 100644 --- a/pkg/provisioner/core/utils.go +++ b/pkg/provisioner/core/utils.go @@ -19,17 +19,15 @@ import ( "fmt" "strings" - "github.com/golang/glog" - - "github.com/oracle/oci-go-sdk/common" - "github.com/oracle/oci-go-sdk/identity" - "k8s.io/api/core/v1" - metav1 "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" + metav1 "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume/util" + + "github.com/golang/glog" + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/identity" ) func (p *OCIProvisioner) findADByName(name string) (*identity.AvailabilityDomain, error) { diff --git a/pkg/provisioner/fss/fss.go b/pkg/provisioner/fss/fss.go new file mode 100644 index 000000000..8781b2d9d --- /dev/null +++ b/pkg/provisioner/fss/fss.go @@ -0,0 +1,271 @@ +// Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fss + +import ( + "context" + "fmt" + "math/rand" + "os" + + "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/core" + "github.com/oracle/oci-go-sdk/filestorage" + "github.com/oracle/oci-go-sdk/identity" + "github.com/pkg/errors" + + "github.com/oracle/oci-volume-provisioner/pkg/oci/client" + "github.com/oracle/oci-volume-provisioner/pkg/provisioner" + "github.com/oracle/oci-volume-provisioner/pkg/provisioner/plugin" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ociVolumeID = "volume.beta.kubernetes.io/oci-volume-id" + ociExportID = "volume.beta.kubernetes.io/oci-export-id" + volumePrefixEnvVarName = "OCI_VOLUME_NAME_PREFIX" + fsType = "fsType" + subnetID = "subnetId" + mntTargetID = "mntTargetId" +) + +// filesystemProvisioner is the internal provisioner for OCI filesystem volumes +type filesystemProvisioner struct { + client client.ProvisionerClient +} + +var _ plugin.ProvisionerPlugin = &filesystemProvisioner{} + +// NewFilesystemProvisioner creates a new file system provisioner that creates +// filesystems using OCI File System Service. +func NewFilesystemProvisioner(client client.ProvisionerClient) plugin.ProvisionerPlugin { + return &filesystemProvisioner{ + client: client, + } +} + +// getMountTargetFromID retrieves mountTarget from given mountTargetID +func (fsp *filesystemProvisioner) getMountTargetFromID(mountTargetID string) (*filestorage.MountTarget, error) { + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + + resp, err := fsp.client.FSS().GetMountTarget(ctx, filestorage.GetMountTargetRequest{ + MountTargetId: &mountTargetID, + }) + if err != nil { + glog.Errorf("Failed to retrieve mount point mountTargetId=%q: %v", mountTargetID, err) + return nil, err + } + return &resp.MountTarget, nil +} + +// listAllMountTargets retrieves all available mount targets +func (fsp *filesystemProvisioner) listAllMountTargets(ad string) ([]filestorage.MountTargetSummary, error) { + var ( + page *string + mountTargets []filestorage.MountTargetSummary + ) + // Check if there already is a mount target in the existing compartment + for { + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + resp, err := fsp.client.FSS().ListMountTargets(ctx, filestorage.ListMountTargetsRequest{ + AvailabilityDomain: &ad, + CompartmentId: common.String(fsp.client.CompartmentOCID()), + Page: page, + }) + if err != nil { + return nil, err + } + mountTargets = append(mountTargets, resp.Items...) + if page = resp.OpcNextPage; resp.OpcNextPage == nil { + break + } + } + return mountTargets, nil +} + +func (fsp *filesystemProvisioner) getOrCreateMountTarget(mtID string, ad string, subnetID string) (*filestorage.MountTarget, error) { + if mtID != "" { + // Mount target already specified in the configuration file, find it in the list of mount targets + return fsp.getMountTargetFromID(mtID) + } + mountTargets, err := fsp.listAllMountTargets(ad) + if err != nil { + return nil, err + } + if len(mountTargets) != 0 { + glog.V(4).Infof("Found mount targets to use") + mntTargetSummary := mountTargets[rand.Int()%len(mountTargets)] + target, err := fsp.getMountTargetFromID(*mntTargetSummary.Id) + return target, err + } + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + // Mount target not created, create a new one + resp, err := fsp.client.FSS().CreateMountTarget(ctx, filestorage.CreateMountTargetRequest{ + CreateMountTargetDetails: filestorage.CreateMountTargetDetails{ + AvailabilityDomain: &ad, + SubnetId: &subnetID, + CompartmentId: common.String(fsp.client.CompartmentOCID()), + DisplayName: common.String(fmt.Sprintf("%s%s", os.Getenv(volumePrefixEnvVarName), "mnt")), + }, + }) + if err != nil { + return nil, err + } + return &resp.MountTarget, nil +} + +func (fsp *filesystemProvisioner) Provision(options controller.VolumeOptions, ad *identity.AvailabilityDomain) (*v1.PersistentVolume, error) { + // Create the FileSystem. + var fsID string + { + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + resp, err := fsp.client.FSS().CreateFileSystem(ctx, filestorage.CreateFileSystemRequest{ + CreateFileSystemDetails: filestorage.CreateFileSystemDetails{ + AvailabilityDomain: ad.Name, + CompartmentId: common.String(fsp.client.CompartmentOCID()), + DisplayName: common.String(fmt.Sprintf("%s%s", os.Getenv(volumePrefixEnvVarName), options.PVC.Name)), + }, + }) + if err != nil { + glog.Errorf("Failed to create a file system options=%#v: %v", options, err) + return nil, err + } + fsID = *resp.FileSystem.Id + } + + target, err := fsp.getOrCreateMountTarget(options.Parameters[mntTargetID], *ad.Name, options.Parameters[subnetID]) + if err != nil { + glog.Errorf("Failed to retrieve mount target: %s", err) + return nil, err + } + + glog.V(6).Infof("Creating export set") + // Create the ExportSet. + var exportSetID string + { + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + resp, err := fsp.client.FSS().CreateExport(ctx, filestorage.CreateExportRequest{ + CreateExportDetails: filestorage.CreateExportDetails{ + ExportSetId: target.ExportSetId, + FileSystemId: &fsID, + Path: common.String("/" + fsID), + }, + }) + if err != nil { + glog.Errorf("Failed to create export: %v", err) + return nil, err + } + exportSetID = *resp.Export.Id + } + + if len(target.PrivateIpIds) == 0 { + glog.Errorf("Failed to find server IDs associated with the Mount Target (OCID %s) to provision a persistent volume", target.Id) + return nil, errors.Errorf("failed to find server IDs associated with the Mount Target with OCID %q", target.Id) + } + + // Get PrivateIP. + var serverIP string + { + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + id := target.PrivateIpIds[rand.Int()%len(target.PrivateIpIds)] + getPrivateIPResponse, err := fsp.client.VCN().GetPrivateIp(ctx, core.GetPrivateIpRequest{ + PrivateIpId: &id, + }) + if err != nil { + glog.Errorf("Failed to retrieve IP address for mount target privateIpID=%q: %v", id, err) + return nil, err + } + serverIP = *getPrivateIPResponse.PrivateIp.IpAddress + } + + glog.Infof("Creating persistent volume on mount target with private IP address %s", serverIP) + return &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: fsID, + Annotations: map[string]string{ + ociVolumeID: fsID, + ociExportID: exportSetID, + }, + Labels: map[string]string{}, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, + AccessModes: options.PVC.Spec.AccessModes, + //FIXME: fs storage doesn't enforce quota, capacity is meaningless here. + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + NFS: &v1.NFSVolumeSource{ + // Randomnly select IP address associated with the mount target to use for attachment + Server: serverIP, + Path: "/" + fsID, + ReadOnly: false, + }, + }, + }, + }, nil +} + +// Delete destroys a OCI volume created by Provision +func (fsp *filesystemProvisioner) Delete(volume *v1.PersistentVolume) error { + exportID, ok := volume.Annotations[ociExportID] + if !ok { + return errors.Errorf("%q annotation not found on PV", ociExportID) + } + + filesystemID, ok := volume.Annotations[ociVolumeID] + if !ok { + return errors.Errorf("%q annotation not found on PV", ociVolumeID) + } + + glog.Infof("Deleting export for filesystemID %v", filesystemID) + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + if _, err := fsp.client.FSS().DeleteExport(ctx, filestorage.DeleteExportRequest{ + ExportId: &exportID, + }); err != nil { + if !provisioner.IsNotFound(err) { + glog.Errorf("Failed to delete export exportID=%q: %v", exportID, err) + return err + } + glog.Infof("ExportID %q was not found. Unable to delete it: %v", exportID, err) + } + + ctx, cancel = context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + + glog.Infof("Deleting volume %v with FileSystemID %v", volume, filesystemID) + _, err := fsp.client.FSS().DeleteFileSystem(ctx, filestorage.DeleteFileSystemRequest{ + FileSystemId: &filesystemID, + }) + if err != nil { + if !provisioner.IsNotFound(err) { + return err + } + glog.Infof("FileSystemID %q was not found. Unable to delete it: %v", filesystemID, err) + } + return nil +} diff --git a/pkg/provisioner/fss/fss_test.go b/pkg/provisioner/fss/fss_test.go new file mode 100644 index 000000000..cf27113ce --- /dev/null +++ b/pkg/provisioner/fss/fss_test.go @@ -0,0 +1,80 @@ +// Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fss + +import ( + "reflect" + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/identity" + + "github.com/oracle/oci-volume-provisioner/pkg/provisioner" +) + +func TestGetMountTargetFromID(t *testing.T) { + // test retrieving a mount target from given ID + fss := filesystemProvisioner{client: provisioner.NewClientProvisioner(nil, nil)} + resp, err := fss.getMountTargetFromID("mtOCID") + if err != nil { + t.Fatalf("Failed to retrieve mount target from ID: %v", err) + } + if !reflect.DeepEqual(resp.PrivateIpIds, provisioner.ServerIPs) { + t.Fatalf("Incorrect response for retrieving mount target from ID") + } +} + +func TestListAllMountTargets(t *testing.T) { + // test listing all mount targets + fss := filesystemProvisioner{client: provisioner.NewClientProvisioner(nil, nil)} + resp, err := fss.listAllMountTargets("adOCID") + if err != nil { + t.Fatalf("Failed to retrieve list mount targets: %v", err) + } + if !reflect.DeepEqual(resp, provisioner.MountTargetItems) { + t.Fatalf("Incorrect response for listing mount targets") + } +} + +func TestGetOrCreateMountTarget(t *testing.T) { + // test get or create mount target + fss := filesystemProvisioner{client: provisioner.NewClientProvisioner(nil, nil)} + resp, err := fss.getOrCreateMountTarget("", provisioner.NilListMountTargetsADID, "subnetID") + if err != nil { + t.Fatalf("Failed to retrieve or create mount target: %v", err) + } + if *resp.Id != provisioner.CreatedMountTargetID { + t.Fatalf("Failed to create mount target") + } + +} +func TestCreateVolumeWithFSS(t *testing.T) { + // test creating a volume on a file system storage + options := controller.VolumeOptions{ + PVName: "dummyVolumeOptions", + PVC: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{}, + }} + ad := identity.AvailabilityDomain{Name: common.String("dummyAdName"), CompartmentId: common.String("dummyCompartmentId")} + fss := filesystemProvisioner{client: provisioner.NewClientProvisioner(nil, nil)} + _, err := fss.Provision(options, &ad) + if err != nil { + t.Fatalf("Failed to provision volume from fss storage: %v", err) + } +} diff --git a/pkg/provisioner/mock_interfaces.go b/pkg/provisioner/mock_interfaces.go new file mode 100644 index 000000000..a108b823e --- /dev/null +++ b/pkg/provisioner/mock_interfaces.go @@ -0,0 +1,176 @@ +// Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provisioner + +import ( + "context" + "time" + + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/core" + "github.com/oracle/oci-go-sdk/filestorage" + "github.com/oracle/oci-go-sdk/identity" + "github.com/oracle/oci-volume-provisioner/pkg/oci/client" +) + +var ( + // VolumeBackupID of backup volume + VolumeBackupID = "dummyVolumeBackupId" + fileSystemID = "dummyFileSystemId" + exportID = "dummyExportID" + // NilListMountTargetsADID lists no mount targets for the given AD + NilListMountTargetsADID = "dummyNilListMountTargetsForADID" + mountTargetID = "dummyMountTargetID" + // CreatedMountTargetID for dynamically created mount target + CreatedMountTargetID = "dummyCreatedMountTargetID" + // ServerIPs address for mount target + ServerIPs = []string{"dummyServerIP"} + // MountTargetItems retrieving during listing + MountTargetItems = []filestorage.MountTargetSummary{filestorage.MountTargetSummary{Id: &mountTargetID}} + // EmptyMountTargetItems retrieving during listing + EmptyMountTargetItems = []filestorage.MountTargetSummary{} + privateIP = "127.0.0.1" +) + +// MockBlockStorageClient mocks BlockStorage client implementation +type MockBlockStorageClient struct { + VolumeState core.VolumeLifecycleStateEnum +} + +// CreateVolume mocks the BlockStorage CreateVolume implementation +func (c *MockBlockStorageClient) CreateVolume(ctx context.Context, request core.CreateVolumeRequest) (response core.CreateVolumeResponse, err error) { + return core.CreateVolumeResponse{Volume: core.Volume{Id: common.String(VolumeBackupID)}}, nil +} + +// DeleteVolume mocks the BlockStorage DeleteVolume implementation +func (c *MockBlockStorageClient) DeleteVolume(ctx context.Context, request core.DeleteVolumeRequest) (response core.DeleteVolumeResponse, err error) { + return core.DeleteVolumeResponse{}, nil +} + +// GetVolume mocks the BlockStorage GetVolume implementation +func (c *MockBlockStorageClient) GetVolume(ctx context.Context, request core.GetVolumeRequest) (response core.GetVolumeResponse, err error) { + return core.GetVolumeResponse{Volume: core.Volume{LifecycleState: c.VolumeState}}, nil +} + +// MockFileStorageClient mocks FileStorage client implementation +type MockFileStorageClient struct { +} + +// CreateFileSystem mocks the FileStorage CreateFileSystem implementation +func (c *MockFileStorageClient) CreateFileSystem(ctx context.Context, request filestorage.CreateFileSystemRequest) (response filestorage.CreateFileSystemResponse, err error) { + return filestorage.CreateFileSystemResponse{FileSystem: filestorage.FileSystem{Id: common.String(fileSystemID)}}, nil +} + +// DeleteFileSystem mocks the FileStorage DeleteFileSystem implementation +func (c *MockFileStorageClient) DeleteFileSystem(ctx context.Context, request filestorage.DeleteFileSystemRequest) (response filestorage.DeleteFileSystemResponse, err error) { + return filestorage.DeleteFileSystemResponse{}, nil +} + +// CreateExport mocks the FileStorage CreateExport implementation +func (c *MockFileStorageClient) CreateExport(ctx context.Context, request filestorage.CreateExportRequest) (response filestorage.CreateExportResponse, err error) { + return filestorage.CreateExportResponse{Export: filestorage.Export{Id: common.String(exportID)}}, nil +} + +// DeleteExport mocks the FileStorage DeleteExport implementation +func (c *MockFileStorageClient) DeleteExport(ctx context.Context, request filestorage.DeleteExportRequest) (response filestorage.DeleteExportResponse, err error) { + return filestorage.DeleteExportResponse{}, nil +} + +// CreateMountTarget mocks the FileStorage CreateMountTarget implementation +func (c *MockFileStorageClient) CreateMountTarget(ctx context.Context, request filestorage.CreateMountTargetRequest) (response filestorage.CreateMountTargetResponse, err error) { + return filestorage.CreateMountTargetResponse{MountTarget: filestorage.MountTarget{PrivateIpIds: ServerIPs, Id: &CreatedMountTargetID}}, nil +} + +// GetMountTarget mocks the FileStorage GetMountTarget implementation +func (c *MockFileStorageClient) GetMountTarget(ctx context.Context, request filestorage.GetMountTargetRequest) (response filestorage.GetMountTargetResponse, err error) { + return filestorage.GetMountTargetResponse{MountTarget: filestorage.MountTarget{PrivateIpIds: ServerIPs}}, nil +} + +// ListMountTargets mocks the FileStorage ListMountTargets implementation +func (c *MockFileStorageClient) ListMountTargets(ctx context.Context, request filestorage.ListMountTargetsRequest) (response filestorage.ListMountTargetsResponse, err error) { + if *request.AvailabilityDomain == NilListMountTargetsADID { + return filestorage.ListMountTargetsResponse{Items: EmptyMountTargetItems}, nil + } + return filestorage.ListMountTargetsResponse{Items: MountTargetItems}, nil +} + +// MockVirtualNetworkClient mocks VirtualNetwork client implementation +type MockVirtualNetworkClient struct { +} + +// GetPrivateIp mocks the VirtualNetwork GetPrivateIp implementation +func (c *MockVirtualNetworkClient) GetPrivateIp(ctx context.Context, request core.GetPrivateIpRequest) (response core.GetPrivateIpResponse, err error) { + return core.GetPrivateIpResponse{PrivateIp: core.PrivateIp{IpAddress: common.String(privateIP)}}, nil +} + +// MockIdentityClient mocks identity client structure +type MockIdentityClient struct { + common.BaseClient +} + +// ListAvailabilityDomains mocks the client ListAvailabilityDomains implementation +func (client MockIdentityClient) ListAvailabilityDomains(ctx context.Context, request identity.ListAvailabilityDomainsRequest) (response identity.ListAvailabilityDomainsResponse, err error) { + return +} + +// MockProvisionerClient mocks client structure +type MockProvisionerClient struct { + Storage *MockBlockStorageClient +} + +// BlockStorage mocks client BlockStorage implementation +func (p *MockProvisionerClient) BlockStorage() client.BlockStorage { + return p.Storage +} + +// FSS mocks client FileStorage implementation +func (p *MockProvisionerClient) FSS() client.FSS { + return &MockFileStorageClient{} +} + +// VCN mocks client VirtualNetwork implementation +func (p *MockProvisionerClient) VCN() client.VCN { + return &MockVirtualNetworkClient{} +} + +// Identity mocks client Identity implementation +func (p *MockProvisionerClient) Identity() client.Identity { + return &MockIdentityClient{} +} + +// Context mocks client Context implementation +func (p *MockProvisionerClient) Context() context.Context { + return context.Background() +} + +// Timeout mocks client Timeout implementation +func (p *MockProvisionerClient) Timeout() time.Duration { + return 30 * time.Second +} + +// CompartmentOCID mocks client CompartmentOCID implementation +func (p *MockProvisionerClient) CompartmentOCID() (compartmentOCID string) { + return "" +} + +// TenancyOCID mocks client TenancyOCID implementation +func (p *MockProvisionerClient) TenancyOCID() string { + return "ocid1.tenancy.oc1..aaaaaaaatyn7scrtwtqedvgrxgr2xunzeo6uanvyhzxqblctwkrpisvke4kq" +} + +// NewClientProvisioner creates an OCI client from the given configuration. +func NewClientProvisioner(pcData client.ProvisionerClient, storage *MockBlockStorageClient) client.ProvisionerClient { + return &MockProvisionerClient{Storage: storage} +} diff --git a/pkg/provisioner/plugin/plugin.go b/pkg/provisioner/plugin/plugin.go index 77d67d042..01a34520c 100644 --- a/pkg/provisioner/plugin/plugin.go +++ b/pkg/provisioner/plugin/plugin.go @@ -17,13 +17,13 @@ package plugin import ( "github.com/kubernetes-incubator/external-storage/lib/controller" "github.com/oracle/oci-go-sdk/identity" + "k8s.io/api/core/v1" ) const ( // OCIProvisionerName is the name of the provisioner defined in the storage class definitions OCIProvisionerName = "oracle/oci" - // LabelZoneFailureDomain the availability domain in which the PD resides. LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" // LabelZoneRegion the region in which the PD resides. diff --git a/test/system/README.md b/test/system/README.md index 2297a6138..35b23e58e 100644 --- a/test/system/README.md +++ b/test/system/README.md @@ -9,15 +9,21 @@ We first need to setup the environment. The following must be defined: * $KUBECONFIG or $KUBECONFIG_VAR If the --check-oci argument is going to be set, then the following will also -need to be defined: +need to be defined: * $OCICONFIG or $OCICONFIG_VAR Note: If set, OCICONFIG/KUBECONFIG must contain the path to the required files. Alternatively, OCICONFIG_VAR/KUBECONFIG_VAR must contain the content -of the required files (base64 encoded). If both are set, the former will +of the required files (base64 encoded). If both are set, the former will take precedence. +* $MNT_TARGET_OCID + +Mount target ocid used to attach file systems to for the oci-fss provisioner type. +If not mount target ocid is specified, the volume provisioner will randomly select one +from the existing ones. If no mount targets exist, a new one will be created. + We can then run the system test as follows: ``` diff --git a/test/system/backup_vol_system_test.py b/test/system/backup_vol_system_test.py new file mode 100644 index 000000000..cb8d2b0af --- /dev/null +++ b/test/system/backup_vol_system_test.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from vol_provisioner_system_test import VolumeProvisionerSystemTestInterface +import oci +import utils +import atexit +from yaml_utils import PopulateYaml + +class BackupVolumeSystemTest(VolumeProvisionerSystemTestInterface): + + KUBERNETES_RESOURCES = ["../../dist/storage-class.yaml", "../../dist/storage-class-ext3.yaml", + "../../dist/oci-volume-provisioner.yaml", + "../../dist/oci-volume-provisioner-rbac.yaml"] + BACKUP_CLAIM_TEMPLATE = "templates/example-claim-from-backup.template" + CM_VOLUME_FROM_BACKUP = "volume_provisioner_volume_from_backup" + + def __init__(self, test_id=None, setup=False, check_oci=False, canaryMetrics=None): + super(BackupVolumeSystemTest, self).__init__(test_id=test_id, setup=setup, check_oci=check_oci, + k8Resources=self.KUBERNETES_RESOURCES, canaryMetrics=canaryMetrics) + + def run(self): + super(BackupVolumeSystemTest, self).run() + if self._check_oci: # Do not run tests in the validate-test-image stage (oci_config not propagated to image) + utils.log("Running system test: Create volume from backup", as_banner=True) + _backup_ocid, _availability_domain = self._setup_create_volume_from_backup() + _claim_target = PopulateYaml(self.BACKUP_CLAIM_TEMPLATE, self._test_id, + region=_availability_domain.split(':')[1], backup_id=_backup_ocid).generateFile() + _res = self._test_create_volume(_claim_target, "demooci-from-backup-" + self._test_id, + availability_domain=_availability_domain, + verify_func=self._volume_from_backup_check, canaryMetricName=self.CM_VOLUME_FROM_BACKUP) + self._tear_down_create_volume_from_backup(_backup_ocid) + self._checkTestSuccess() + + def _create_backup(self, volume_ocid): + '''Create volume backup on OCI from existing volume + @param volume_ocid: Ocid of course volume + @type volume_ocid: C{Str} + @return: Tuple containing the backup id, compartment id and display name + @rtype: C{Tuple}''' + client = oci.core.blockstorage_client.BlockstorageClient(self._oci_config) + _backup_details = oci.core.models.CreateVolumeBackupDetails(volume_id=volume_ocid, + display_name="backup_volume_system_test" + self._test_id) + _response = client.create_volume_backup(_backup_details) + utils.log("Response for creating backup for volume %s: %s" % (volume_ocid, _response.data)) + _res = self._get_json_doc(str(_response.data)) + return _res['id'], _res['compartment_id'], _res['display_name'] + + def _delete_backup(self, backup_ocid): + '''Delete volume backup from OCI + @param backup_ocid: Ocid of backup volume to delete + @type backup_ocid: C{Str}''' + client = oci.core.blockstorage_client.BlockstorageClient(self._oci_config) + _response = client.delete_volume_backup(backup_ocid) + utils.log("Response for deleting volume backup %s: %s" % (backup_ocid, _response.data)) + + def _create_volume_from_backup(self, backup_ocid, test_id, availability_domain, compartment_id): + client = oci.core.blockstorage_client.BlockstorageClient(self._oci_config) + _volume_details = oci.core.models.CreateVolumeDetails(volume_backup_id=backup_ocid, + display_name="restored_volume_system_test" + test_id, + availability_domain=availability_domain, + compartment_id=compartment_id) + try: + _response = client.create_volume(_volume_details) + utils.log("Response for creating volume from backup %s: %s %s" % (_response.data, self._get_json_doc(str(_response.data))['id'], compartment_id)) + return self._get_json_doc(str(_response.data))['id'] + except Exception as exc: + utils.log("Failed to create volume from backup %s" % exc) + + def _setup_create_volume_from_backup(self, storageType=VolumeProvisionerSystemTestInterface.BLOCK_STORAGE, availability_domain=None): + '''Setup environment for creating a volume from a backup device + @return: OCID of generated backup + @rtype: C{Str}''' + utils.log("Creating test volume (using terraform)", as_banner=True) + self._terraform("init", self.TERRAFORM_DIR) + self._terraform("apply", self.TERRAFORM_DIR) + _availability_domain = self._get_terraform_output_var(self.TERRAFORM_AVAILABILITY_DOMAIN) + utils.log(self._terraform("output -json", self.TERRAFORM_DIR)) + # Create replication controller and write data to the generated volume + _rc_name, _rc_config = self._create_rc_or_pod("templates/example-replication-controller-with-volume-claim.template", + _availability_domain, volume_name=self._get_volume_name()) + self._create_file_via_replication_controller(_rc_name) + self._verify_file_existance_via_replication_controller(_rc_name) + # Create backup from generated volume + _backup_ocid, compartment_id, _volume_name = self._create_backup(self._get_terraform_output_var(self.TERRAFORM_VOLUME_OCID)) + if not self._wait_for_volume_to_create(_backup_ocid, compartment_id=compartment_id, backup=True, storageType=storageType, + availability_domain=availability_domain): + utils.log("Failed to find backup with name: " + _volume_name) + return _backup_ocid, _availability_domain + + def _tear_down_create_volume_from_backup(self, backup_ocid): + '''Tear down create volume from backup + @param test_id: Test id used to append to component names + @type test_id: C{Str} + @param backup_ocid: OCID of backup from which the test volume was created + @type backup_ocid: C{Str}''' + def _destroy_test_volume_atexit(): + utils.log("Destroying test volume (using terraform)", as_banner=True) + self._terraform("destroy -force", self.TERRAFORM_DIR) + atexit.register(_destroy_test_volume_atexit) + self._delete_backup(backup_ocid) + + def _volume_from_backup_check(self, test_id, availability_domain, volume, file_name='hello.txt'): + '''Verify whether the volume created from the backup is in a healthy state + @param test_id: Test id to use for creating components + @type test_id: C{Str} + @param availability_domain: Availability domain to create resource in + @type availability_domain: C{Str} + @param volume: Name of volume to verify + @type volume: C{Str} + @param file_name: Name of file to do checks for + @type file_name: C{Str}''' + _ocid = volume.split('.') + _ocid = _ocid[-1] + _rc_name, _rc_config = self._create_rc_or_pod("templates/example-replication-controller.template", availability_domain, _ocid) + utils.log("Does the file from the previous backup exist?") + stdout = utils.kubectl("exec " + _rc_name + " -- ls /usr/share/nginx/html") + if file_name not in stdout.split("\n"): + utils.log("Error: Failed to find file %s in mounted volume" % file_name) + utils.log("Deleting the replication controller (deletes the single nginx pod).") + utils.kubectl("delete -f " + _rc_config) diff --git a/test/system/block_system_test.py b/test/system/block_system_test.py new file mode 100644 index 000000000..60483cb87 --- /dev/null +++ b/test/system/block_system_test.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import utils +from yaml_utils import PopulateYaml +from vol_provisioner_system_test import VolumeProvisionerSystemTestInterface + +class BlockSystemTests(VolumeProvisionerSystemTestInterface): + + KUBERNETES_RESOURCES = ["../../dist/storage-class.yaml", "../../dist/storage-class-ext3.yaml", + "../../dist/oci-volume-provisioner.yaml", + "../../dist/oci-volume-provisioner-rbac.yaml"] + SIMPLE_CLAIM_TEMPLATE = "templates/example-claim.template" + EXT_CLAIM_TEMPLATE = "templates/example-claim-ext3.template" + NO_AD_CLAIM_TEMPLATE = "templates/example-claim-no-AD.template" + CM_SIMPLE = "volume_provisioner_simple" + CM_EXT3 = "volume_provisioner_ext3" + CM_NO_AD = "volume_provisioner_no_ad" + + def __init__(self, test_id=None, setup=False, check_oci=False, canaryMetrics=None): + super(BlockSystemTests, self).__init__(test_id=test_id, setup=setup, check_oci=check_oci, + k8Resources=self.KUBERNETES_RESOURCES, canaryMetrics=canaryMetrics) + + def run(self): + super(BlockSystemTests, self).run() + utils.log("Running system test: Simple", as_banner=True) + self._testSuccess = self._test_create_volume(PopulateYaml(self.SIMPLE_CLAIM_TEMPLATE, region=self._region, + test_id=self._test_id).generateFile(), "demooci-" + self._test_id, + self._check_oci, canaryMetricName=self.CM_SIMPLE) + self._checkTestSuccess() + utils.log("Running system test: Ext3 file system", as_banner=True) + self._testSuccess = self._test_create_volume(PopulateYaml(self.EXT_CLAIM_TEMPLATE, + self._test_id).generateFile(), "demooci-ext3-" + self._test_id, + self._check_oci, canaryMetricName=self.CM_EXT3) + self._checkTestSuccess() + utils.log("Running system test: No AD specified", as_banner=True) + self._testSuccess = self._test_create_volume(PopulateYaml(self.NO_AD_CLAIM_TEMPLATE, + self._test_id).generateFile(), "demooci-no-ad-" + self._test_id, + self._check_oci, canaryMetricName=self.CM_NO_AD) + self._checkTestSuccess() diff --git a/test/system/canary_metrics.py b/test/system/canary_metrics.py new file mode 100644 index 000000000..fa43af7c3 --- /dev/null +++ b/test/system/canary_metrics.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import utils +import os +import json +import datetime +from collections import MutableMapping + +class CanaryMetrics(object): + + CM_SIMPLE = "volume_provisioner_simple" + CM_EXT3 = "volume_provisioner_ext3" + CM_NO_AD = "volume_provisioner_no_ad" + CM_VOLUME_FROM_BACKUP = "volume_provisioner_volume_from_backup" + START_TIME = "start_time" + END_TIME = "end_time" + + def __init__(self, metrics_file=None, *args, **kwargs): + self._canaryMetrics = dict(*args, **kwargs) + self._metrics_file = metrics_file + self._canaryMetrics[self.START_TIME] = self.canary_metric_date() + + @staticmethod + def canary_metric_date(): + return datetime.datetime.today().strftime('%Y-%m-%d-%H%m%S') + + def update_canary_metric(self, name, result): + self._canaryMetrics[name] = result + + def finish_canary_metrics(self): + self.update_canary_metric(self.END_TIME, self.canary_metric_date()) + if self._metrics_file: + with open(self._metrics_file, 'w') as metrics_file: + json.dump(self._canaryMetrics, metrics_file, sort_keys=True, indent=4) diff --git a/test/system/fss_system_test.py b/test/system/fss_system_test.py new file mode 100644 index 000000000..1c299cd2a --- /dev/null +++ b/test/system/fss_system_test.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import utils +from yaml_utils import PopulateYaml +from vol_provisioner_system_test import VolumeProvisionerSystemTestInterface + +class FSSSystemTests(VolumeProvisionerSystemTestInterface): + + STORAGE_CLAIM_WITH_SUBNET_ID = "templates/example-storage-class-fss-subnet.template" + STORAGE_CLAIM_WITH_MNT_ID = "templates/example-storage-class-fss-mnt.template" + STORAGE_CLAIM_EMPTY = "templates/example-storage-class-fss-empty.template" + FSS_CLAIM = "templates/example-claim-fss.template" + MNT_TARGET_OCID = "MNT_TARGET_OCID" + SUBNET_OCID = "SUBNET_OCID" + KUBERNETES_RESOURCES = ["../../dist/oci-volume-provisioner-fss.yaml", + "../../dist/oci-volume-provisioner-rbac.yaml"] + TEST_SC_FILES = [STORAGE_CLAIM_WITH_MNT_ID, STORAGE_CLAIM_EMPTY] + CM_FSS = "" + + def __init__(self, subnet_ocid=None, mnt_target_ocid=None, test_id=None, setup=False, check_oci=False, canaryMetrics=None): + super(FSSSystemTests, self).__init__(test_id=test_id, setup=setup, check_oci=check_oci, + k8Resources=self.KUBERNETES_RESOURCES, canaryMetrics=canaryMetrics) + self._subnet_ocid = subnet_ocid + self._mnt_target_ocid = mnt_target_ocid + self._scFile = self.STORAGE_CLAIM_WITH_SUBNET_ID + + def run(self): + super(FSSSystemTests, self).run() + if self._check_oci: # Do not run tests in the validate-test-image stage + utils.log("Running system test: Create volume with FSS", as_banner=True) + for _testSc in self.TEST_SC_FILES: + # Not testing the creation of a mount target, as all mount targets on the system will have + # to be removed + self._testCreateVolumeFromStorageClass(_testSc) + + def _testCreateVolumeFromStorageClass(self, scFile): + '''Test creating a volume based on provided storage class + @type scFile: Path for storage class config file + @param scFile: C{Str}''' + utils.log("Testing for sc: %s" % scFile, as_banner=True) + _storageClassFile = PopulateYaml(self._scFile, self._test_id, mount_target_ocid=self._mnt_target_ocid, + subnet_ocid=self._subnet_ocid).generateFile() + # Delete any previously existing storage classes with the same name + utils.kubectl("delete -f " + _storageClassFile, exit_on_error=False) + # Create storage class yaml file + _storageClassFile = PopulateYaml(self._scFile, self._test_id, mount_target_ocid=self._mnt_target_ocid, + subnet_ocid=self._subnet_ocid).generateFile() + utils.kubectl("create -f " + _storageClassFile, exit_on_error=False) + self._testSuccess = self._test_create_volume(PopulateYaml(self.FSS_CLAIM, self._test_id, region=self._region).generateFile(), + "demooci-fss-" + self._test_id, availability_domain=self.DEFAULT_AVAILABILITY_DOMAIN, + storageType=self.FS_STORAGE, verify_func=self._volume_from_fss_dynamic_check, canaryMetricName=self.CM_FSS) + self._checkTestSuccess() + + def _volume_from_fss_dynamic_check(self, availability_domain, volume, file_name='hello.txt'): + '''Verify whether the file system is attached to the pod and can be written to + @param test_id: Test id to use for creating components + @type test_id: C{Str} + @param availability_domain: Availability domain to create resource in + @type availability_domain: C{Str} + @param volume: Name of volume to verify + @type volume: C{Str} + @param file_name: Name of file to do checks for + @type file_name: C{Str}''' + _ocid = volume.split('.') + _ocid = _ocid[-1] + _rc_name, _rc_config = self._create_rc_or_pod("templates/example-pod-fss.template", + availability_domain, _ocid) + utils.log("Does the file from the previous backup exist?") + stdout = utils.kubectl("exec " + _rc_name + " -- ls /usr/share/nginx/html") + if file_name not in stdout.split("\n"): + utils.log("Error: Failed to find file %s in mounted volume" % file_name) + utils.log("Deleting the replication controller (deletes the single nginx pod).") + utils.kubectl("delete -f " + _rc_config) diff --git a/test/system/runner.py b/test/system/runner.py index 958cd6aeb..9429e2ad9 100755 --- a/test/system/runner.py +++ b/test/system/runner.py @@ -20,350 +20,70 @@ import json import os import re -import select -from shutil import copyfile -import subprocess import sys import time import uuid import oci import yaml +import utils +from yaml_utils import PopulateYaml +from vol_provisioner_system_test import VolumeProvisionerSystemTestInterface +from block_system_test import BlockSystemTests +from fss_system_test import FSSSystemTests +from backup_vol_system_test import BackupVolumeSystemTest +from canary_metrics import CanaryMetrics -DEBUG_FILE = "runner.log" -TERRAFORM_CLUSTER = "terraform/cluster" -TERRAFORM_DIR = "terraform" -# Variable name correspond to the ones found in the terraform config file -TERRAFORM_AVAILABILITY_DOMAIN = "availability_domain" -TERRAFORM_VOLUME_OCID = "volume_ocid" TEST_NAME = "volumeprovisionersystemtest" -TMP_OCICONFIG = "/tmp/ociconfig" -TMP_KUBECONFIG = "/tmp/kubeconfig.conf" -TMP_OCI_API_KEY_FILE = "/tmp/oci_api_key.pem" REGION = "us-ashburn-1" -TIMEOUT = 600 WRITE_REPORT=True -REPORT_DIR_PATH="/tmp/results" -REPORT_FILE="done" -POD_CONTROLLER = "controller" -POD_VOLUME = "volume" - -# On exit return 0 for success or any other integer for a failure. -# If write_report is true then write a completion file to the Sonabuoy plugin result file. -# The default location is: /tmp/results/done -def _finish_with_exit_code(exit_code, write_report=True, report_dir_path=REPORT_DIR_PATH, report_file=REPORT_FILE): - print "finishing with exit code: " + str(exit_code) - if write_report: - if not os.path.exists(report_dir_path): - os.makedirs(report_dir_path) - if exit_code == 0: - _debug_file("\nTest Suite Success\n") - else: - _debug_file("\nTest Suite Failed\n") - time.sleep(3) - copyfile(DEBUG_FILE, report_dir_path + "/" + DEBUG_FILE) - with open(report_dir_path + "/" + report_file, "w+") as file: - file.write(str(report_dir_path + "/" + DEBUG_FILE)) - finish_canary_metrics() - sys.exit(exit_code) +OCICONFIG = "OCICONFIG" +OCICONFIG_VAR = "OCICONFIG_VAR" +KUBECONFIG_VAR = "KUBECONFIG_VAR" +SUBNET_OCID = "SUBNET_OCID" +METRICS_FILE = "METRICS_FILE" +MNT_TARGET_OCID = "MNT_TARGET_OCID" def _check_env(check_oci): if check_oci: - if "OCICONFIG" not in os.environ and "OCICONFIG_VAR" not in os.environ: - _log("Error. Can't find either OCICONFIG or OCICONFIG_VAR in the environment.") - _finish_with_exit_code(1) - + if OCICONFIG not in os.environ and OCICONFIG_VAR not in os.environ: + utils.log("Error. Can't find either OCICONFIG or OCICONFIG_VAR in the environment.") + utils.finish_with_exit_code(1) def _create_key_files(check_oci): - _log("Setting environment variables") - if "OCICONFIG_VAR" in os.environ: - _run_command("echo \"$OCICONFIG_VAR\" | openssl enc -base64 -d -A > " + TMP_OCICONFIG, ".") - _run_command("chmod 600 " + TMP_OCICONFIG, ".") - if "KUBECONFIG_VAR" in os.environ: - _run_command("echo \"$KUBECONFIG_VAR\" | openssl enc -base64 -d -A > " + TMP_KUBECONFIG, ".") + utils.log("Setting environment variables") + if OCICONFIG_VAR in os.environ: + utils.run_command("echo \"$OCICONFIG_VAR\" | openssl enc -base64 -d -A > " + VolumeProvisionerSystemTestInterface.TMP_OCICONFIG, ".") + utils.run_command("chmod 600 " + VolumeProvisionerSystemTestInterface.TMP_OCICONFIG, ".") + if KUBECONFIG_VAR in os.environ: + utils.run_command("echo \"$KUBECONFIG_VAR\" | openssl enc -base64 -d -A > " + utils.TMP_KUBECONFIG, ".") if check_oci: - oci_config_file = _get_oci_config_file() + oci_config_file = VolumeProvisionerSystemTestInterface()._get_oci_config_file() with open(oci_config_file, 'r') as stream: try: cnf = yaml.load(stream) - with open(TMP_OCI_API_KEY_FILE, 'w') as stream: + with open(VolumeProvisionerSystemTestInterface.TMP_OCI_API_KEY_FILE, 'w') as stream: stream.write(cnf['auth']['key']) except yaml.YAMLError as err: - _log("Error. Failed to parse oci config file %s. Error: %s " % (oci_config_file, err)) - _finish_with_exit_code(1) + utils.log("Error. Failed to parse oci config file %s. Error: %s " % (oci_config_file, err)) + utils.finish_with_exit_code(1) def _destroy_key_files(check_oci): - if "OCICONFIG_VAR" in os.environ: - os.remove(TMP_OCICONFIG) - if "KUBECONFIG_VAR" in os.environ: - os.remove(TMP_KUBECONFIG) + if OCICONFIG_VAR in os.environ: + os.remove(VolumeProvisionerSystemTestInterface.TMP_OCICONFIG) + if KUBECONFIG_VAR in os.environ: + os.remove(utils.TMP_KUBECONFIG) if check_oci: - os.remove(TMP_OCI_API_KEY_FILE) - - -def _get_kubeconfig(): - return os.environ['KUBECONFIG'] if "KUBECONFIG" in os.environ else TMP_KUBECONFIG - - -def _get_oci_config_file(): - return os.environ['OCICONFIG'] if "OCICONFIG" in os.environ else TMP_OCICONFIG - + os.remove(VolumeProvisionerSystemTestInterface.TMP_OCI_API_KEY_FILE) def _get_oci_api_key_file(): - return TMP_OCI_API_KEY_FILE - - -def _banner(as_banner, bold): - if as_banner: - if bold: - print "********************************************************" - else: - print "--------------------------------------------------------" - - -def _reset_debug_file(): - if os.path.exists(DEBUG_FILE): - os.remove(DEBUG_FILE) - - -def _debug_file(string): - with open(DEBUG_FILE, "a") as debug_file: - debug_file.write(string) - - -def _log(string, as_banner=False, bold=False): - _banner(as_banner, bold) - print string - _banner(as_banner, bold) - - -def _process_stream(stream, read_fds, global_buf, line_buf): - char = stream.read(1) - if char == '': - read_fds.remove(stream) - global_buf.append(char) - line_buf.append(char) - if char == '\n': - _debug_file(''.join(line_buf)) - line_buf = [] - return line_buf - -def _poll(stdout, stderr): - stdoutbuf = [] - stdoutbuf_line = [] - stderrbuf = [] - stderrbuf_line = [] - read_fds = [stdout, stderr] - x_fds = [stdout, stderr] - while read_fds: - rlist, _, _ = select.select(read_fds, [], x_fds) - if rlist: - for stream in rlist: - if stream == stdout: - stdoutbuf_line = _process_stream(stream, read_fds, stdoutbuf, stdoutbuf_line) - if stream == stderr: - stderrbuf_line = _process_stream(stream, read_fds, stderrbuf, stderrbuf_line) - return (''.join(stdoutbuf), ''.join(stderrbuf)) - -def _run_command(cmd, cwd, display_errors=True): - _log(cwd + ": " + cmd) - process = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True, cwd=cwd) - (stdout, stderr) = _poll(process.stdout, process.stderr) - returncode = process.wait() - if returncode != 0 and display_errors: - _log(" stdout: " + stdout) - _log(" stderr: " + stderr) - _log(" result: " + str(returncode)) - return (stdout, stderr, returncode) + return VolumeProvisionerSystemTestInterface.TMP_OCI_API_KEY_FILE def _get_timestamp(test_id): return test_id if test_id is not None else datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') -def _get_terraform_env(): - timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') - return "TF_VAR_test_id=" + timestamp - -def _terraform(action, cwd, terraform_env): - (stdout, _, returncode) = _run_command(terraform_env + " terraform " + action, cwd) - if returncode != 0: - _log("Error running terraform") - sys.exit(1) - return stdout - -def _kubectl(action, exit_on_error=True, display_errors=True, log_stdout=True): - if "KUBECONFIG" not in os.environ and "KUBECONFIG_VAR" not in os.environ: - (stdout, _, returncode) = _run_command("kubectl " + action, ".", display_errors) - else: - (stdout, _, returncode) = _run_command("KUBECONFIG=" + _get_kubeconfig() + " kubectl " + action, ".", display_errors) - if exit_on_error and returncode != 0: - _log("Error running kubectl") - _finish_with_exit_code(1) - if log_stdout: - _log(stdout) - return stdout - -def _get_pod_infos(test_id, pod_type): - '''Retrieve pod information from kube-system - @param test_id: Test id to use to search for the pod to get infor for - @type test_id: C{Str} - @param pod_type: Pod type to search for - @type pod_type: C{Str} - @return: Tuple containing the name of the resource, its status and the - node it's running on - @rtype: C{Tuple}''' - _namespace = "-n kube-system" if pod_type == POD_VOLUME else "" - stdout = _kubectl(_namespace + " get pods -o wide") - infos = [] - for line in stdout.split("\n"): - line_array = line.split() - if len(line_array) > 0: - name = line_array[0] - if name.startswith('oci-volume-provisioner')and pod_type == POD_VOLUME: - status = line_array[2] - node = line_array[6] - infos.append((name, status, node)) - if re.match(r"nginx-controller-" + test_id + ".*", line) and pod_type == POD_CONTROLLER: - name = line_array[0] - status = line_array[2] - node = line_array[6] - infos.append((name, status, node)) - return infos - -def _get_volume(volume_name): - stdout = _kubectl("get PersistentVolumeClaim -o wide") - for line in stdout.split("\n"): - line_array = line.split() - if len(line_array) >= 3: - name = line_array[0] - status = line_array[1] - if name == volume_name and status == "Bound": - return line_array[2] - return None - -def _get_volume_and_wait(volume_name): - num_polls = 0 - volume = _get_volume(volume_name) - while not volume: - _log(" waiting...") - time.sleep(1) - num_polls += 1 - if num_polls == TIMEOUT: - return False - volume = _get_volume(volume_name) - return volume - - -def _get_json_doc(response): - decoder = json.JSONDecoder() - try: - doc = decoder.decode(response) - except (ValueError, UnicodeError) as _: - _log('Invalid JSON in response: %s' % str(response)) - _finish_with_exit_code(1) - return doc - - -def _oci_config(): - config = dict(oci.config.DEFAULT_CONFIG) - oci_config_file = _get_oci_config_file() - with open(oci_config_file, 'r') as stream: - try: - cnf = yaml.load(stream) - config["user"] = cnf['auth']['user'] - config["tenancy"] = cnf['auth']['tenancy'] - config["fingerprint"] = cnf['auth']['fingerprint'] - config["key_file"] = TMP_OCI_API_KEY_FILE - config["region"] = cnf['auth']['region'] - return config - except yaml.YAMLError: - _log("Error. Failed to parse oci config file " + oci_config_file) - _finish_with_exit_code(1) - - -def _volume_exists(compartment_id, volume, state, backup=False): - '''Verify whether the volume is available or not''' - client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) - if backup: - volumes= oci.pagination.list_call_get_all_results(client.list_volume_backups, compartment_id) - else: - volumes = oci.pagination.list_call_get_all_results(client.list_volumes, compartment_id) - _log("Getting status for volume %s" % volume) - for vol in _get_json_doc(str(volumes.data)): - if vol['id'].endswith(volume) and vol['lifecycle_state'] == state: - return True - return False - -def _create_backup(volume_ocid, test_id): - '''Create volume backup on OCI from existing volume - @param volume_ocid: Ocid of course volume - @type volume_ocid: C{Str} - @param test_id: Test id used to append to component name - @type test_id: C{Str} - @return: Tuple containing the backup id, compartment id and display name - @rtype: C{Tuple}''' - client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) - _backup_details = oci.core.models.CreateVolumeBackupDetails(volume_id=volume_ocid, - display_name="backup_volume_system_test" + test_id) - _response = client.create_volume_backup(_backup_details) - _log("Response for creating backup for volume %s: %s" % (volume_ocid, _response.data)) - _res = _get_json_doc(str(_response.data)) - return _res['id'], _res['compartment_id'], _res['display_name'] - -def _delete_backup(backup_ocid): - '''Delete volume backup from OCI - @param backup_ocid: Ocid of backup volume to delete - @type backup_ocid: C{Str}''' - client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) - _response = client.delete_volume_backup(backup_ocid) - _log("Response for deleting volume backup %s: %s" % (backup_ocid, _response.data)) - - -def _create_volume_from_backup(backup_ocid, test_id, availability_domain, compartment_id): - client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) - _volume_details = oci.core.models.CreateVolumeDetails(volume_backup_id=backup_ocid, - display_name="restored_volume_system_test" + test_id, - availability_domain=availability_domain, - compartment_id=compartment_id) - try: - _response = client.create_volume(_volume_details) - _log("Response for creating volume from backup %s: %s %s" % (_response.data, _get_json_doc(str(_response.data))['id'], compartment_id)) - return _get_json_doc(str(_response.data))['id'] - except Exception as exc: - _log("Failed to create volume from backup %s" % exc) - -def _wait_for_volume(compartment_id, volume, state, backup=False): - num_polls = 0 - while not _volume_exists(compartment_id, volume, state, backup): - _log(" waiting...") - time.sleep(1) - num_polls += 1 - if num_polls == TIMEOUT: - return False - return True - -def _wait_for_volume_to_create(compartment_id, volume, backup=False): - return _wait_for_volume(compartment_id, volume, 'AVAILABLE', backup) - - -def _wait_for_volume_to_delete(compartment_id, volume, backup=False): - return _wait_for_volume(compartment_id, volume, 'TERMINATED', backup) - - -def _get_compartment_id(pod_name): - '''Gets the oci compartment_id from the oci-volume-provisioner pod host. - This is where oci volume resources will be created.''' - result = _kubectl("-n kube-system exec %s -- curl -s http://169.254.169.254/opc/v1/instance/" % pod_name, - exit_on_error=False, log_stdout=False) - result_json = _get_json_doc(str(result)) - compartment_id = result_json["compartmentId"] - return compartment_id - - def _handle_args(): parser = argparse.ArgumentParser(description='Description of your program') parser.add_argument('--setup', @@ -385,316 +105,13 @@ def _handle_args(): args = vars(parser.parse_args()) if args['check_oci'] and not args['setup']: - _log("If --check-oci is specified, then --setup also needs to be set.") - _finish_with_exit_code(1) + utils.log("If --check-oci is specified, then --setup also needs to be set.") + utils.finish_with_exit_code(1) return args - -def _cleanup(exit_on_error=False, display_errors=True): - _kubectl("delete -f ../../dist/oci-volume-provisioner.yaml", - exit_on_error, display_errors) - _kubectl("delete -f ../../dist/oci-volume-provisioner-rbac.yaml", - exit_on_error, display_errors) - _kubectl("delete -f ../../dist/storage-class.yaml", - exit_on_error, display_errors) - _kubectl("delete -f ../../dist/storage-class-ext3.yaml", - exit_on_error, display_errors) - _kubectl("-n kube-system delete secret oci-volume-provisioner", - exit_on_error, display_errors) - - -def _get_region(): - nodes_json = _kubectl("get nodes -o json", log_stdout=False) - nodes = json.loads(nodes_json) - for node in nodes['items']: - return node['metadata']['labels']['failure-domain.beta.kubernetes.io/zone'] - _log("Region lookup failed") - _finish_with_exit_code(1) - - -def _create_yaml(template, test_id, region=None, backup_id=None): - '''Generate yaml based on the given template and fill in additional details - @param template: Name of file to use as template - @type template: C{Str} - @param test_id: Used for tagging resources with test id - @type test_id: C{Str} - @param region: Used for selecting resources from specified region - @type region: C{Str} - @param backup_id: Backup id to create PVC from - @type backup_id: C{Str} - @return: Name of generated config file - @rtype: C{Str}''' - yaml_file = template + ".yaml" - with open(template, "r") as sources: - lines = sources.readlines() - with open(yaml_file, "w") as sources: - for line in lines: - patched_line = line - patched_line = re.sub('{{TEST_ID}}', test_id, patched_line) - if region is not None: - patched_line = re.sub('{{REGION}}', region, patched_line) - if backup_id is not None: - patched_line = re.sub('{{BACKUP_ID}}', backup_id, patched_line) - sources.write(patched_line) - return yaml_file - - -def _test_create_volume(compartment_id, claim_target, claim_volume_name, check_oci, test_id=None, - availability_domain=None, verify_func=None): - '''Test making a volume claim from a configuration file - @param backup_ocid: Verify whether the volume created from a backup contains backup info - @type backup_ocid: C{Str}''' - _kubectl("create -f " + claim_target, exit_on_error=False) - - volume = _get_volume_and_wait(claim_volume_name) - _log("Created volume with name: " + volume) - - if check_oci: - _log("Querying the OCI api to make sure a volume with this name exists...") - if not _wait_for_volume_to_create(compartment_id, volume): - _log("Failed to find volume with name: " + volume) - return False - _log("Volume: " + volume + " is present and available") - - if verify_func: - verify_func(test_id, availability_domain, volume) - - _log("Delete the volume claim") - _kubectl("delete -f " + claim_target, exit_on_error=False) - - if check_oci: - _log("Querying the OCI api to make sure a volume with this name now doesnt exist...") - _wait_for_volume_to_delete(compartment_id, volume) - if not _volume_exists(compartment_id, volume, 'TERMINATED'): - _log("Volume with name: " + volume + " still exists") - return False - _log("Volume: " + volume + " has now been terminated") - - return True - -def _patch_template_file(infile, outfile, volume_name, test_id, availability_domain): - '''Generate yaml based on the given template and fill in additional details - @param template: Name of file to use as template - @type template: C{Str} - @param test_id: Used for tagging resources with test id - @type test_id: C{Str} - @param availability_domain: Used for selecting resources from specified AD - @type availability_domain: C{Str} - @return: Name of generated config file - @rtype: C{Str}''' - with open(infile, "r") as sources: - lines = sources.readlines() - with open(outfile + "." + test_id, "w") as sources: - for line in lines: - patched_line = line - if volume_name is not None: - patched_line = re.sub('{{VOLUME_NAME}}', volume_name, patched_line) - patched_line = re.sub('{{TEST_ID}}', test_id, patched_line) - if availability_domain: - availability_domain = availability_domain.replace(':', '-') # yaml config does not allow ':' - patched_line = re.sub('{{AVAILABILITY_DOMAIN}}', availability_domain, patched_line) - sources.write(patched_line) - return outfile + "." + test_id - -def _create_rc_yaml(using_oci, volume_name, test_id, availability_domain): - '''Generate replication controller yaml file from provided templates''' - if using_oci: - return _patch_template_file( "replication-controller.yaml.template", - "replication-controller.yaml", - volume_name, test_id, availability_domain) - else: - return _patch_template_file( "replication-controller-with-volume-claim.yaml.template", - "replication-controller-with-volume-claim.yaml", - volume_name, test_id, availability_domain) - -def _get_terraform_output_var(terraform_env, var_name): - '''Retrieve variable value from terraform output from state file - @param terraform_env: Terraform test id - @type terraform_env: C{Str} - @param var_name: Name of variable to retrieve from output - @type var_name: C{Str} - @return: Value of variable - @rtype: C{Str}''' - output = _terraform("output -json", TERRAFORM_DIR, terraform_env) - jsn = json.loads(output) - return jsn[var_name]["value"] - -def _get_volume_name(terraform_env): - '''Retrieve volume name from terraform status output - @param terraform_env: Terraform test id - @type terraform_env: C{Str} - @return: Volume OCID - @rtype: C{Str}''' - _ocid = _get_terraform_output_var(terraform_env, TERRAFORM_VOLUME_OCID).split('.') - return _ocid[len(_ocid)-1] - -def _wait_for_pod_status(desired_status, test_id, pod_type): - '''Wait until the pod gets to the desired status - @param desired_status: Status to wait for - @type desired_status: C{Str} - @param test_id: Test_id used to retrieve components generated by this test - @type test_id: C{Str} - @param pod_type: Pod type to query - @type pod_type: C{Str} - @return: Tuple containing the name of the resource, its status and the - node it's running on - @rtype: C{Tuple}''' - infos = _get_pod_infos(test_id, pod_type) - num_polls = 0 - while not any(i[1] == desired_status for i in infos): - for i in infos: - _log(" - pod: " + i[0] + ", status: " + i[1] + ", node: " + i[2]) - time.sleep(1) - num_polls += 1 - if num_polls == TIMEOUT: - for i in infos: - _log("Error: Pod: " + i[0] + " " + - "failed to achieve status: " + desired_status + "." + - "Final status was: " + i[1]) - sys.exit(1) - infos = _get_pod_infos(test_id, pod_type) - for i in infos: - if i[1] == desired_status: - return (i[0], i[1], i[2]) - # Should never get here. - return (None, None, None) - -def _create_replication_controller(test_id, availability_domain, volume_name="default_volume"): - '''Create replication controller and wait for it to start - @param test_id: Test id used to append to component names - @type test_id : C{Str} - @param availability_domain: Availability domain to start rc in - @type availability_domain: C{Str} - @param volume_name: Volume name used by the replication controller - @type volume_name: C{Str} - @return: Tuple containing the name of the created rc and its config file - @rtype: C{Tuple}''' - _rc_config = _create_rc_yaml(True, volume_name, test_id, availability_domain) - _log("Starting the replication controller (creates a single nginx pod).") - _kubectl("delete -f " + _rc_config, exit_on_error=False, display_errors=False) - _kubectl("create -f " + _rc_config) - _log("Waiting for the pod to start.") - _rc_name, _, _ = _wait_for_pod_status("Running", test_id, POD_CONTROLLER) - return _rc_name, _rc_config - -def _create_file_via_replication_controller(rc_name, file_name="hello.txt"): - '''Create file via the replication controller - @param rcName: Name of the replication controller to write data to - @type rcName: C{Str} - @param fileName: Name of file to create - @type fileName: C{Str}''' - _kubectl("exec " + rc_name + " -- touch /usr/share/nginx/html/" + file_name) - -def _verify_file_existance_via_replication_controller(rc_name, file_name="hello.txt"): - '''Verify whether file exists via the replication controller - @param rcName: Name of the replication controller to verify - @type rcName: C{Str} - @param fileName: Name of file to create - @type fileName: C{Str}''' - _log("Does the new file exist?") - stdout = _kubectl("exec " + rc_name + " -- ls /usr/share/nginx/html") - if file_name not in stdout.split("\n"): - _log("Error: Failed to find file %s in mounted volume" % file_name) - sys.exit(1) - _log("Yes it does!") - -def _setup_create_volume_from_backup(terraform_env, test_id): - '''Setup environment for creating a volume from a backup device - @param test_id: Test id used to append to component names - @type test_id : C{Str} - @return: OCID of generated backup - @rtype: C{Str}''' - _log("Creating test volume (using terraform)", as_banner=True) - _terraform("init", TERRAFORM_DIR, terraform_env) - _terraform("apply", TERRAFORM_DIR, terraform_env) - _availability_domain = _get_terraform_output_var(terraform_env, TERRAFORM_AVAILABILITY_DOMAIN) - _log(_terraform("output -json", TERRAFORM_DIR, terraform_env)) - # Create replication controller and write data to the generated volume - _rc_name, _rc_config = _create_replication_controller(test_id, _availability_domain, volume_name=_get_volume_name(terraform_env)) - _create_file_via_replication_controller(_rc_name) - _verify_file_existance_via_replication_controller(_rc_name) - # Create backup from generated volume - _backup_ocid, compartment_id, _volume_name = _create_backup(_get_terraform_output_var(terraform_env, TERRAFORM_VOLUME_OCID), test_id) - if not _wait_for_volume_to_create(compartment_id, _backup_ocid, backup=True): - _log("Failed to find backup with name: " + _volume_name) - return _backup_ocid, _availability_domain - -def _tear_down_create_volume_from_backup(terraform_env, backup_ocid): - '''Tear down create volume from backup - @param test_id: Test id used to append to component names - @type test_id: C{Str} - @param backup_ocid: OCID of backup from which the test volume was created - @type backup_ocid: C{Str}''' - def _destroy_test_volume_atexit(): - _log("Destroying test volume (using terraform)", as_banner=True) - _terraform("destroy -force", TERRAFORM_DIR, terraform_env) - atexit.register(_destroy_test_volume_atexit) - _delete_backup(backup_ocid) - -def _volume_from_backup_check(test_id, availability_domain, volume, file_name='hello.txt'): - '''Verify whether the volume created from the backup is in a healthy state - @param test_id: Test id to use for creating components - @type test_id: C{Str} - @param availability_domain: Availability domain to create resource in - @type availability_domain: C{Str} - @param volume: Name of volume to verify - @type volume: C{Str} - @param file_name: Name of file to do checks for - @type file_name: C{Str}''' - _ocid = volume.split('.') - _ocid = _ocid[-1] - _rc_name, _rc_config = _create_replication_controller(test_id, availability_domain, _ocid) - _log("Does the file from the previous backup exist?") - stdout = _kubectl("exec " + _rc_name + " -- ls /usr/share/nginx/html") - if file_name not in stdout.split("\n"): - _log("Error: Failed to find file %s in mounted volume" % file_name) - _log("Deleting the replication controller (deletes the single nginx pod).") - _kubectl("delete -f " + _rc_config) - - -# Canary Metrics ************************************************************** -# - -CM_SIMPLE = "volume_provisioner_simple" -CM_EXT3 = "volume_provisioner_ext3" -CM_NO_AD = "volume_provisioner_no_ad" -CM_VOLUME_FROM_BACKUP = "volume_provisioner_volume_from_backup" - -def canary_metric_date(): - return datetime.datetime.today().strftime('%Y-%m-%d-%H%m%S') - -def init_canary_metrics(check_oci): - if "METRICS_FILE" in os.environ: - _log("generating metrics file...") - canary_metrics = {} - canary_metrics["start_time"] = canary_metric_date() - canary_metrics[CM_SIMPLE] = 0 - canary_metrics[CM_EXT3] = 0 - canary_metrics[CM_NO_AD] = 0 - if check_oci: - canary_metrics[CM_VOLUME_FROM_BACKUP] = 0 - with open(os.environ.get("METRICS_FILE"), 'w') as metrics_file: - json.dump(canary_metrics, metrics_file, sort_keys=True, indent=4) - -def update_canary_metric(name, result): - if "METRICS_FILE" in os.environ: - _log("updating metrics fle...") - with open(os.environ.get("METRICS_FILE"), 'r') as metrics_file: - canary_metrics = json.load(metrics_file) - canary_metrics[name] = result - with open(os.environ.get("METRICS_FILE"), 'w') as metrics_file: - json.dump(canary_metrics, metrics_file, sort_keys=True, indent=4) - -def finish_canary_metrics(): - update_canary_metric("end_time", canary_metric_date()) - - -# Main ************************************************************************ -# - def _main(): - _reset_debug_file() + utils.reset_debug_file() args = _handle_args() _check_env(args['check_oci']) @@ -704,75 +121,21 @@ def _destroy_key_files_atexit(): atexit.register(_destroy_key_files_atexit) test_id = str(uuid.uuid4())[:8] - - success = True - - if args['setup']: - # Cleanup in case any existing state exists in the cluster - _cleanup(display_errors=False) - _log("Setting up the volume provisioner", as_banner=True) - _kubectl("-n kube-system create secret generic oci-volume-provisioner " + \ - "--from-file=config.yaml=" + _get_oci_config_file(), - exit_on_error=False) - _kubectl("create -f ../../dist/storage-class.yaml", exit_on_error=False) - _kubectl("create -f ../../dist/storage-class-ext3.yaml", exit_on_error=False) - _kubectl("create -f ../../dist/oci-volume-provisioner-rbac.yaml", exit_on_error=False) - _kubectl("create -f ../../dist/oci-volume-provisioner.yaml", exit_on_error=False) - pod_name, _, _ = _wait_for_pod_status("Running", test_id, POD_VOLUME) - compartment_id = _get_compartment_id(pod_name) - else: - compartment_id = None - + canaryMetrics = CanaryMetrics(metrics_file=os.environ.get(METRICS_FILE)) if args['teardown']: def _teardown_atexit(): - _log("Tearing down the volume provisioner", as_banner=True) - _cleanup() + utils.log("Tearing down the volume provisioner", as_banner=True) + # BlockSystemTests(test_id, args['setup']).cleanup() + FSSSystemTests(test_id, args['setup']).cleanup() + # BackupVolumeSystemTest(test_id, args['setup']).cleanup() atexit.register(_teardown_atexit) if not args['no_test']: - _log("Running system test: Simple", as_banner=True) - init_canary_metrics(args['check_oci']) - res = _test_create_volume(compartment_id, - _create_yaml("../../examples/example-claim.template", test_id, _get_region()), - "demooci-" + test_id, args['check_oci']) - update_canary_metric(CM_SIMPLE, int(res)) - success = False if res == False else success - - _log("Running system test: Ext3 file system", as_banner=True) - res = _test_create_volume(compartment_id, - _create_yaml("../../examples/example-claim-ext3.template", test_id, None), - "demooci-ext3-" + test_id, args['check_oci']) - update_canary_metric(CM_EXT3, int(res)) - success = False if res == False else success - - _log("Running system test: No AD specified", as_banner=True) - res = _test_create_volume(compartment_id, - _create_yaml("../../examples/example-claim-no-AD.template", test_id, None), - "demooci-no-ad-" + test_id, args['check_oci']) - update_canary_metric(CM_NO_AD, int(res)) - success = False if res == False else success - - if args['check_oci']: - _log("Running system test: Create volume from backup", as_banner=True) - terraform_env = _get_terraform_env() - _backup_ocid, _availability_domain = _setup_create_volume_from_backup(terraform_env, test_id) - _claim_target = _create_yaml("../../examples/example-claim-from-backup.template", test_id, - region=_availability_domain.split(':')[1], backup_id=_backup_ocid) - res = _test_create_volume(compartment_id, _claim_target, - "demooci-from-backup-" + test_id, args['check_oci'], - test_id=test_id, availability_domain=_availability_domain, - verify_func=_volume_from_backup_check) - update_canary_metric(CM_VOLUME_FROM_BACKUP, int(res)) - success = False if res == False else success - _tear_down_create_volume_from_backup(terraform_env, _backup_ocid) - - if not success: - _finish_with_exit_code(1) - else: - _finish_with_exit_code(0) + BlockSystemTests(test_id=test_id, setup=args['setup'], check_oci=args['check_oci'], canaryMetrics=canaryMetrics).run() + FSSSystemTests(subnet_ocid=os.environ.get(SUBNET_OCID), test_id=test_id, setup=args['setup'], check_oci=args['check_oci'], canaryMetrics=canaryMetrics).run() + BackupVolumeSystemTest(test_id=test_id, setup=args['setup'], check_oci=args['check_oci'], canaryMetrics=canaryMetrics).run() + canaryMetrics.finish_canary_metrics() + utils.finish_with_exit_code(0) if __name__ == "__main__": _main() - - - diff --git a/examples/example-claim-ext3.template b/test/system/templates/example-claim-ext3.template similarity index 100% rename from examples/example-claim-ext3.template rename to test/system/templates/example-claim-ext3.template diff --git a/examples/example-claim-from-backup.template b/test/system/templates/example-claim-from-backup.template similarity index 100% rename from examples/example-claim-from-backup.template rename to test/system/templates/example-claim-from-backup.template diff --git a/test/system/templates/example-claim-fss.template b/test/system/templates/example-claim-fss.template new file mode 100644 index 000000000..15f4e77fe --- /dev/null +++ b/test/system/templates/example-claim-fss.template @@ -0,0 +1,14 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: demooci-fss-{{TEST_ID}} +spec: + storageClassName: "oci-fss" + selector: + matchLabels: + oci-availability-domain: "{{REGION}}" + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi # Required by Kubernetes but not used by FSS diff --git a/examples/example-claim-no-AD.template b/test/system/templates/example-claim-no-AD.template similarity index 100% rename from examples/example-claim-no-AD.template rename to test/system/templates/example-claim-no-AD.template diff --git a/examples/example-claim.template b/test/system/templates/example-claim.template similarity index 100% rename from examples/example-claim.template rename to test/system/templates/example-claim.template diff --git a/test/system/templates/example-pod-fss.template b/test/system/templates/example-pod-fss.template new file mode 100644 index 000000000..2f5e14e97 --- /dev/null +++ b/test/system/templates/example-pod-fss.template @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: demooci-fss-pod-{{TEST_ID}} + labels: + name: demooci-fss-pod-{{TEST_ID}} +spec: + containers: + - name: demooci-fss-pod-{{TEST_ID}} + image: fedora/nginx + ports: + - name: web + containerPort: 80 + volumeMounts: + - name: nginx + mountPath: /usr/share/nginx/html + volumes: + - name: nginx + persistentVolumeClaim: + claimName: demooci-fss-{{TEST_ID}} diff --git a/test/system/replication-controller-with-volume-claim.yaml.template b/test/system/templates/example-replication-controller-with-volume-claim.template similarity index 100% rename from test/system/replication-controller-with-volume-claim.yaml.template rename to test/system/templates/example-replication-controller-with-volume-claim.template diff --git a/test/system/replication-controller.yaml.template b/test/system/templates/example-replication-controller.template similarity index 100% rename from test/system/replication-controller.yaml.template rename to test/system/templates/example-replication-controller.template diff --git a/test/system/templates/example-storage-class-fss-empty.template b/test/system/templates/example-storage-class-fss-empty.template new file mode 100644 index 000000000..34a3447ed --- /dev/null +++ b/test/system/templates/example-storage-class-fss-empty.template @@ -0,0 +1,7 @@ +# Storage class to create mount target on without any additional information +# Assumes a mount target has already been created and a random one shall be chosen +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: oci-fss +provisioner: oracle.com/oci-fss diff --git a/test/system/templates/example-storage-class-fss-mnt.template b/test/system/templates/example-storage-class-fss-mnt.template new file mode 100644 index 000000000..0954ca269 --- /dev/null +++ b/test/system/templates/example-storage-class-fss-mnt.template @@ -0,0 +1,8 @@ +# Storage class with subnet OCID to create mount target on +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: oci-fss +provisioner: oracle.com/oci-fss +parameters: + mntTargetId: {{MNT_TARGET_OCID}} diff --git a/test/system/templates/example-storage-class-fss-subnet.template b/test/system/templates/example-storage-class-fss-subnet.template new file mode 100644 index 000000000..743e216fa --- /dev/null +++ b/test/system/templates/example-storage-class-fss-subnet.template @@ -0,0 +1,9 @@ + +# Storage class with subnet OCID to create mount target on +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: oci-fss +provisioner: oracle.com/oci-fss +parameters: + subnetId: {{SUBNET_OCID}} diff --git a/test/system/utils.py b/test/system/utils.py new file mode 100644 index 000000000..3d46d02aa --- /dev/null +++ b/test/system/utils.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time +import sys +import select +import subprocess +from shutil import copyfile + +DEBUG_FILE = "runner.log" +REPORT_DIR_PATH="/tmp/results" +REPORT_FILE="done" +TMP_KUBECONFIG = "/tmp/kubeconfig.conf" + +def _banner(as_banner, bold): + if as_banner: + if bold: + print "********************************************************" + else: + print "--------------------------------------------------------" + +def _process_stream(stream, read_fds, global_buf, line_buf): + char = stream.read(1) + if char == '': + read_fds.remove(stream) + global_buf.append(char) + line_buf.append(char) + if char == '\n': + _debug_file(''.join(line_buf)) + line_buf = [] + return line_buf + +def _poll(stdout, stderr): + stdoutbuf = [] + stdoutbuf_line = [] + stderrbuf = [] + stderrbuf_line = [] + read_fds = [stdout, stderr] + x_fds = [stdout, stderr] + while read_fds: + rlist, _, _ = select.select(read_fds, [], x_fds) + if rlist: + for stream in rlist: + if stream == stdout: + stdoutbuf_line = _process_stream(stream, read_fds, stdoutbuf, stdoutbuf_line) + if stream == stderr: + stderrbuf_line = _process_stream(stream, read_fds, stderrbuf, stderrbuf_line) + return (''.join(stdoutbuf), ''.join(stderrbuf)) + +# On exit return 0 for success or any other integer for a failure. +# If write_report is true then write a completion file to the Sonabuoy plugin result file. +# The default location is: /tmp/results/done +def finish_with_exit_code(exit_code, write_report=True, report_dir_path=REPORT_DIR_PATH, report_file=REPORT_FILE): + print "finishing with exit code: " + str(exit_code) + if write_report: + if not os.path.exists(report_dir_path): + os.makedirs(report_dir_path) + if exit_code == 0: + _debug_file("\nTest Suite Success\n") + else: + _debug_file("\nTest Suite Failed\n") + time.sleep(3) + copyfile(DEBUG_FILE, report_dir_path + "/" + DEBUG_FILE) + with open(report_dir_path + "/" + report_file, "w+") as file: + file.write(str(report_dir_path + "/" + DEBUG_FILE)) + sys.exit(exit_code) + +def reset_debug_file(): + if os.path.exists(DEBUG_FILE): + os.remove(DEBUG_FILE) + +def _debug_file(string): + with open(DEBUG_FILE, "a") as debug_file: + debug_file.write(string) + + +def log(string, as_banner=False, bold=False): + _banner(as_banner, bold) + print string + _banner(as_banner, bold) + +def run_command(cmd, cwd, display_errors=True): + log(cwd + ": " + cmd) + process = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, cwd=cwd) + (stdout, stderr) = _poll(process.stdout, process.stderr) + returncode = process.wait() + if returncode != 0 and display_errors: + log(" stdout: " + stdout) + log(" stderr: " + stderr) + log(" result: " + str(returncode)) + return (stdout, stderr, returncode) + +def _get_kubeconfig(): + return os.environ['KUBECONFIG'] if "KUBECONFIG" in os.environ else TMP_KUBECONFIG + +def kubectl(action, exit_on_error=True, display_errors=True, log_stdout=True): + if "KUBECONFIG" not in os.environ and "KUBECONFIG_VAR" not in os.environ: + (stdout, _, returncode) = run_command("kubectl " + action, ".", display_errors) + else: + (stdout, _, returncode) = run_command("KUBECONFIG=" + _get_kubeconfig() + " kubectl " + action, ".", display_errors) + if exit_on_error and returncode != 0: + log("Error running kubectl") + finish_with_exit_code(1) + if log_stdout: + log(stdout) + return stdout diff --git a/test/system/vol_provisioner_system_test.py b/test/system/vol_provisioner_system_test.py new file mode 100644 index 000000000..3710058f1 --- /dev/null +++ b/test/system/vol_provisioner_system_test.py @@ -0,0 +1,382 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import utils +import time +import oci +import yaml +import os +import json +import datetime +import sys +import re +from yaml_utils import PopulateYaml + + +def _retrieve_metrics(test_func): + def wrapper(self, *args, **kwargs): + _canaryMetricName = kwargs.pop(self.CANARY_METRIC_NAME) + _res = test_func(self, *args, **kwargs) + if self._canaryMetrics: + self._canaryMetrics.update_canary_metric(_canaryMetricName, int(_res)) + return _res + return wrapper + +class VolumeProvisionerSystemTestInterface(object): + + TERRAFORM_CLUSTER = "terraform/cluster" + TERRAFORM_DIR = "terraform" + # Variable name correspond to the ones found in the terraform config file + TERRAFORM_AVAILABILITY_DOMAIN = "availability_domain" + TERRAFORM_VOLUME_OCID = "volume_ocid" + POD_CONTROLLER = "controller" + POD_VOLUME = "volume" + BLOCK_STORAGE = "block" + FS_STORAGE = "fileSystem" + TIMEOUT = 600 + BOUND = "Bound" + TMP_OCI_API_KEY_FILE = "/tmp/oci_api_key.pem" + TMP_OCICONFIG = "/tmp/ociconfig" + LIFECYCLE_STATE_ON = {BLOCK_STORAGE: 'AVAILABLE', + FS_STORAGE: 'ACTIVE'} + LIFECYCLE_STATE_OFF = {BLOCK_STORAGE: 'TERMINATED', + FS_STORAGE:'DELETED'} + DEFAULT_AVAILABILITY_DOMAIN="NWuj:PHX-AD-2" + CANARY_METRIC_NAME="canaryMetricName" + + def __init__(self, test_id=None, setup=False, compartment_id=None, check_oci=False, k8Resources=None, canaryMetrics=None): + '''@param test_id: Id of currently running test + @type test_id: C{Str} + @param setup: Flag that indicated whether the provisioner needs to be setup on the cluster + @type setup: C{Bool} + @param compartment_id: Compartment Id to use to creaate/delete resources + @type compartment_id: C{Str} + @param check_oci: 'Check with OCI that the volumes have been created/destroyed (requires --setup)' + @type check_oci: C{Bool} + ''' + self._test_id = test_id if test_id else "demotest" + self._setup = setup + self._compartment_id = compartment_id + self._region = self._get_region() + self._check_oci = check_oci + self._oci_config = self._get_oci_config() if check_oci else None + self._terraform_env = self._get_terraform_env() + self._k8sResources = k8Resources if k8Resources else [] + self._canaryMetrics = canaryMetrics + self._testSuccess=True + + def run(self): + if self._setup: + # Cleanup in case any existing state exists in the cluster + self.cleanup(display_errors=False) + utils.log("Setting up the volume provisioner", as_banner=True) + utils.kubectl("-n kube-system create secret generic oci-volume-provisioner " + \ + "--from-file=config.yaml=" + self._get_oci_config_file(), + exit_on_error=False) + for _res in self._k8sResources: + utils.kubectl("create -f " + _res, exit_on_error=False) + pod_name, _, _ = self._wait_for_pod_status("Running", self.POD_VOLUME) + self._compartment_id = self._get_compartment_id(pod_name) + def cleanup(self, exit_on_error=False, display_errors=True): + for _res in self._k8sResources: + utils.kubectl("delete -f " + _res, exit_on_error, display_errors) + + def _checkTestSuccess(self): + '''Check whether any tests failed or not''' + if not self._testSuccess: + utils.finish_with_exit_code(1) + + @staticmethod + def _get_region(): + nodes_json = utils.kubectl("get nodes -o json", log_stdout=False) + nodes = json.loads(nodes_json) + for node in nodes['items']: + return node['metadata']['labels']['failure-domain.beta.kubernetes.io/zone'] + utils.log("Region lookup failed") + utils.finish_with_exit_code(1) + + def _get_oci_config_file(self): + return os.environ['OCICONFIG'] if "OCICONFIG" in os.environ else self.TMP_OCICONFIG + + def _get_oci_config(self): + config = dict(oci.config.DEFAULT_CONFIG) + oci_config_file = self._get_oci_config_file() + with open(oci_config_file, 'r') as stream: + try: + cnf = yaml.load(stream) + config["user"] = cnf['auth']['user'] + config["tenancy"] = cnf['auth']['tenancy'] + config["fingerprint"] = cnf['auth']['fingerprint'] + config["key_file"] = self.TMP_OCI_API_KEY_FILE + config["region"] = cnf['auth']['region'] + return config + except yaml.YAMLError: + utils.log("Error. Failed to parse oci config file " + oci_config_file) + utils.finish_with_exit_code(1) + + def _get_terraform_env(self): + timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') + return "TF_VAR_test_id=" + timestamp + + def _terraform(self, action, cwd): + '''Execute terraform command''' + (stdout, _, returncode) = utils.run_command(self._terraform_env + " terraform " + action, cwd) + if returncode != 0: + utils.log("Error running terraform") + sys.exit(1) + return stdout + + def _get_terraform_output_var(self, var_name): + '''Retrieve variable value from terraform output from state file + @param var_name: Name of variable to retrieve from output + @type var_name: C{Str} + @return: Value of variable + @rtype: C{Str}''' + output = self._terraform("output -json", self.TERRAFORM_DIR,) + jsn = json.loads(output) + return jsn[var_name]["value"] + + def _get_volume_name(self): + '''Retrieve volume name from terraform status output + @param terraform_env: Terraform test id + @type terraform_env: C{Str} + @return: Volume OCID + @rtype: C{Str}''' + _ocid = self._get_terraform_output_var(self.TERRAFORM_VOLUME_OCID).split('.') + return _ocid[len(_ocid)-1] + + def _get_volume(self, volume_name): + stdout = utils.kubectl("get PersistentVolumeClaim -o wide") + for line in stdout.split("\n"): + line_array = line.split() + if len(line_array) >= 3: + name = line_array[0] + status = line_array[1] + if name == volume_name and status == self.BOUND: + return line_array[2] + + def _get_volume_and_wait(self, volume_name): + num_polls = 0 + volume = self._get_volume(volume_name) + while not volume: + utils.log(" waiting...") + time.sleep(1) + num_polls += 1 + if num_polls == self.TIMEOUT: + return False + volume = self._get_volume(volume_name) + return volume + + @staticmethod + def _get_json_doc(response): + decoder = json.JSONDecoder() + try: + doc = decoder.decode(response) + except (ValueError, UnicodeError) as _: + utils.log('Invalid JSON in response: %s' % str(response)) + utils.finish_with_exit_code(1) + return doc + + def _volume_exists(self, volume, state, compartment_id=None, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): + '''Verify whether the volume is available or not + @param storageType: Storage type to search for volumes in + @type storageType: C{Str} + @param availability_domain: Availability domain to look in for + @type availability_domain: C{Str}''' + if compartment_id is None: + compartment_id = self._compartment_id + if storageType == self.BLOCK_STORAGE: + utils.log("Retrieving block volumes for compartmentID %s" % compartment_id) + client = oci.core.blockstorage_client.BlockstorageClient(self._oci_config) + if backup: + volumes= oci.pagination.list_call_get_all_results(client.list_volume_backups, compartment_id) + else: + volumes = oci.pagination.list_call_get_all_results(client.list_volumes, compartment_id) + else: + utils.log("Retrieving file systems") + client = oci.file_storage.FileStorageClient(self._oci_config) + volumes = oci.pagination.list_call_get_all_results(client.list_file_systems, compartment_id, + availability_domain) + utils.log("Getting status for volume %s" % volume) + for vol in self._get_json_doc(str(volumes.data)): + if vol['id'].endswith(volume) and vol['lifecycle_state'] == state: + return True + return False + + def _wait_for_volume(self, volume, state, compartment_id=None, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): + num_polls = 0 + while not self._volume_exists(volume, state, compartment_id=compartment_id, backup=backup, storageType=storageType, + availability_domain=availability_domain,): + utils.log(" waiting...") + time.sleep(1) + num_polls += 1 + if num_polls == self.TIMEOUT: + return False + return True + + def _wait_for_volume_to_create(self, volume, compartment_id=None, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): + return self._wait_for_volume(volume, self.LIFECYCLE_STATE_ON[storageType], compartment_id=compartment_id, backup=backup, storageType=storageType, + availability_domain=availability_domain) + + + def _wait_for_volume_to_delete(self, volume, compartment_id=None, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): + return self._wait_for_volume(volume, self.LIFECYCLE_STATE_OFF[storageType], compartment_id=compartment_id, backup=backup, storageType=storageType, + availability_domain=availability_domain) + + @_retrieve_metrics + def _test_create_volume(self, claim_target, claim_volume_name, availability_domain=None, verify_func=None, + storageType=BLOCK_STORAGE, canaryMetricName=None): + '''Test making a volume claim from a configuration file + @param backup_ocid: Verify whether the volume created from a backup contains backup info + @type backup_ocid: C{Str}''' + utils.kubectl("create -f " + claim_target, exit_on_error=False) + + volume = self._get_volume_and_wait(claim_volume_name) + utils.log("Created volume with name: %s" % str(volume)) + + if self._check_oci: + utils.log("Querying the OCI api to make sure a volume with this name exists...") + if not self._wait_for_volume_to_create(volume, storageType=storageType, + availability_domain=availability_domain): + utils.log("Failed to find volume with name: " + volume) + return False + utils.log("Volume: " + volume + " is present and available") + + if verify_func: + verify_func(self._test_id, availability_domain, volume) + utils.log("Delete the volume claim") + utils.kubectl("delete -f " + claim_target, exit_on_error=False) + + if self._check_oci: + utils.log("Querying the OCI api to make sure a volume with this name now doesnt exist...") + self._wait_for_volume_to_delete(volume, storageType=storageType, + availability_domain=availability_domain) + if not self._volume_exists(volume, self.LIFECYCLE_STATE_OFF[storageType], + compartment_id=self._compartment_id, storageType=storageType, + availability_domain=availability_domain): + utils.log("Volume with name: " + volume + " still exists") + return False + utils.log("Volume: " + volume + " has now been terminated") + return True + + def _create_rc_or_pod(self, config, availability_domain, volume_name="default_volume"): + '''Create replication controller or pod and wait for it to start + @param rc_config: Replication controller configuration file to patch + @type rc_config: C{Str} + @param availability_domain: Availability domain to start rc in + @type availability_domain: C{Str} + @param volume_name: Volume name used by the replication controller + @type volume_name: C{Str} + @return: Tuple containing the name of the created rc and its config file + @rtype: C{Tuple}''' + _config = PopulateYaml(config, self._test_id, volume_name=volume_name, availability_domain=availability_domain).generateFile() + utils.log("Starting the replication controller (creates a single nginx pod).") + utils.kubectl("delete -f " + _config, exit_on_error=False, display_errors=False) + utils.kubectl("create -f " + _config) + utils.log("Waiting for the pod to start.") + _name, _, _ = self._wait_for_pod_status("Running", self.POD_CONTROLLER) + return _name, _config + + def _wait_for_pod_status(self, desired_status, pod_type): + '''Wait until the pod gets to the desired status + @param desired_status: Status to wait for + @type desired_status: C{Str} + @param pod_type: Pod type to query + @type pod_type: C{Str} + @return: Tuple containing the name of the resource, its status and the + node it's running on + @rtype: C{Tuple}''' + infos = self._get_pod_infos(pod_type) + num_polls = 0 + while not any(i[1] == desired_status for i in infos): + for i in infos: + utils.log(" - pod: " + i[0] + ", status: " + i[1] + ", node: " + i[2]) + time.sleep(1) + num_polls += 1 + if num_polls == self.TIMEOUT: + for i in infos: + utils.log("Error: Pod: " + i[0] + " " + + "failed to achieve status: " + desired_status + "." + + "Final status was: " + i[1]) + sys.exit(1) + infos = self._get_pod_infos(pod_type) + for i in infos: + if i[1] == desired_status: + return (i[0], i[1], i[2]) + # Should never get here. + return (None, None, None) + + def _get_pod_infos(self, pod_type): + '''Retrieve pod information from kube-system + @param pod_type: Pod type to search for + @type pod_type: C{Str} + @return: Tuple containing the name of the resource, its status and the + node it's running on + @rtype: C{Tuple}''' + _namespace = "-n kube-system" if pod_type == self.POD_VOLUME else "" + stdout = utils.kubectl(_namespace + " get pods -o wide") + infos = [] + for line in stdout.split("\n"): + line_array = line.split() + if len(line_array) > 0: + name = line_array[0] + if name.startswith('oci-volume-provisioner') and pod_type == self.POD_VOLUME: + status = line_array[2] + node = line_array[6] + infos.append((name, status, node)) + if re.match(r"nginx-controller-" + self._test_id + ".*", line) and pod_type == self.POD_CONTROLLER: + name = line_array[0] + status = line_array[2] + node = line_array[6] + infos.append((name, status, node)) + if re.match(r"demooci-fss-pod-" + self._test_id + ".*", line) and pod_type == self.POD_CONTROLLER: + name = line_array[0] + status = line_array[2] + node = line_array[6] + infos.append((name, status, node)) + return infos + + def _get_compartment_id(self, pod_name): + '''Gets the oci compartment_id from the oci-volume-provisioner pod host. + This is where oci volume resources will be created.''' + result = utils.kubectl("-n kube-system exec %s -- curl -s http://169.254.169.254/opc/v1/instance/" % pod_name, + exit_on_error=False, log_stdout=False) + result_json = self._get_json_doc(str(result)) + compartment_id = result_json["compartmentId"] + return compartment_id + + @staticmethod + def _create_file_via_replication_controller(rc_name, file_name="hello.txt"): + '''Create file via the replication controller + @param rcName: Name of the replication controller to write data to + @type rcName: C{Str} + @param fileName: Name of file to create + @type fileName: C{Str}''' + utils.kubectl("exec " + rc_name + " -- touch /usr/share/nginx/html/" + file_name) + + @staticmethod + def _verify_file_existance_via_replication_controller(rc_name, file_name="hello.txt"): + '''Verify whether file exists via the replication controller + @param rcName: Name of the replication controller to verify + @type rcName: C{Str} + @param fileName: Name of file to create + @type fileName: C{Str}''' + utils.log("Does the new file exist?") + stdout = utils.kubectl("exec " + rc_name + " -- ls /usr/share/nginx/html") + if file_name not in stdout.split("\n"): + utils.log("Error: Failed to find file %s in mounted volume" % file_name) + sys.exit(1) + utils.log("Yes it does!") diff --git a/test/system/yaml_utils.py b/test/system/yaml_utils.py new file mode 100644 index 000000000..e701d5015 --- /dev/null +++ b/test/system/yaml_utils.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import utils + +class PopulateYaml(): + + TEST_ID = "{{TEST_ID}}" + REGION = "{{REGION}}" + BACKUP_ID = "{{BACKUP_ID}}" + MNT_TARGET_OCID = "{{MNT_TARGET_OCID}}" + SUBNET_OCID = "{{SUBNET_OCID}}" + VOLUME_NAME = "{{VOLUME_NAME}}" + AVAILABILITY_DOMAIN = "{{AVAILABILITY_DOMAIN}}" + TEMPLATE_ELEMENTS = {'_test_id': TEST_ID, '_region': REGION, + '_backup_id': BACKUP_ID, '_mount_target_ocid': MNT_TARGET_OCID, + '_subnet_ocid': SUBNET_OCID, '_volume_name': VOLUME_NAME, + '_availability_domain': AVAILABILITY_DOMAIN} + + def __init__(self, template_file, test_id, region=None, backup_id=None, + mount_target_ocid=None, subnet_ocid=None, volume_name=None, availability_domain=None): + '''@param template: Name of file to use as template + @type template: C{Str} + @param test_id: Used for tagging resources with test id + @type test_id: C{Str} + @param region: Used for selecting resources from specified region + @type region: C{Str} + @param backup_id: Backup id to create PVC from + @type backup_id: C{Str} + @param mount_target_ocid: Mount target OCID to populate config with + @type mount_target_ocid: C{Str} + @param volume_name: Name used to create volume + @type volume_name: C{Str} + @param availability_domain: Availability domain (used for pvc) + @type availability_domain: C{Str}''' + self._template_file = template_file + self._test_id = test_id + self._region = region + self._backup_id = backup_id + self._mount_target_ocid = mount_target_ocid + self._subnet_ocid = subnet_ocid + self._volume_name = volume_name + # yaml config does not allow ':' + self._availability_domain = availability_domain.replace(':', '-') if availability_domain else None + + def generateFile(self): + '''Generate yaml based on the given template and fill in additional details + @return: Name of generated config file + @rtype: C{Str}''' + yaml_file = self._template_file + ".yaml" + with open(self._template_file, "r") as sources: + lines = sources.readlines() + with open(yaml_file, "w") as sources: + for line in lines: + patched_line = line + for _elem, _elemName in self.TEMPLATE_ELEMENTS.iteritems(): + if getattr(self, _elem) is not None: + patched_line = re.sub(_elemName, getattr(self, _elem), patched_line) + elif _elemName in [self.MNT_TARGET_OCID, self.SUBNET_OCID] and _elemName in patched_line: + # Remove lines from config files if attribute is not specified + utils.log("%s not specified. Removing reference from config" % _elemName) + patched_line = "" + sources.write(patched_line) + return yaml_file + + + diff --git a/vendor/github.com/kubernetes-incubator/external-storage/nfs-client/cmd/nfs-client-provisioner/provisioner.go b/vendor/github.com/kubernetes-incubator/external-storage/nfs-client/cmd/nfs-client-provisioner/provisioner.go new file mode 100644 index 000000000..5f0293083 --- /dev/null +++ b/vendor/github.com/kubernetes-incubator/external-storage/nfs-client/cmd/nfs-client-provisioner/provisioner.go @@ -0,0 +1,147 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "errors" + "flag" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/lib/controller" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +const ( + provisionerNameKey = "PROVISIONER_NAME" +) + +type nfsProvisioner struct { + client kubernetes.Interface + server string + path string +} + +const ( + mountPath = "/persistentvolumes" +) + +var _ controller.Provisioner = &nfsProvisioner{} + +func (p *nfsProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error) { + if options.PVC.Spec.Selector != nil { + return nil, fmt.Errorf("claim Selector is not supported") + } + glog.V(4).Infof("nfs provisioner: VolumeOptions %v", options) + + pvcNamespace := options.PVC.Namespace + pvcName := options.PVC.Name + + pvName := strings.Join([]string{pvcNamespace, pvcName, options.PVName}, "-") + + fullPath := filepath.Join(mountPath, pvName) + glog.V(4).Infof("creating path %s", fullPath) + if err := os.MkdirAll(fullPath, 0777); err != nil { + return nil, errors.New("unable to create directory to provision new pv: " + err.Error()) + } + os.Chmod(fullPath, 0777) + + path := filepath.Join(p.path, pvName) + + pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: options.PVName, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, + AccessModes: options.PVC.Spec.AccessModes, + MountOptions: options.MountOptions, + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + NFS: &v1.NFSVolumeSource{ + Server: p.server, + Path: path, + ReadOnly: false, + }, + }, + }, + } + return pv, nil +} + +func (p *nfsProvisioner) Delete(volume *v1.PersistentVolume) error { + path := volume.Spec.PersistentVolumeSource.NFS.Path + pvName := filepath.Base(path) + oldPath := filepath.Join(mountPath, pvName) + archivePath := filepath.Join(mountPath, "archived-"+pvName) + glog.V(4).Infof("archiving path %s to %s", oldPath, archivePath) + return os.Rename(oldPath, archivePath) +} + +func main() { + flag.Parse() + flag.Set("logtostderr", "true") + + server := os.Getenv("NFS_SERVER") + if server == "" { + glog.Fatal("NFS_SERVER not set") + } + path := os.Getenv("NFS_PATH") + if path == "" { + glog.Fatal("NFS_PATH not set") + } + provisionerName := os.Getenv(provisionerNameKey) + if provisionerName == "" { + glog.Fatalf("environment variable %s is not set! Please set it.", provisionerNameKey) + } + + // Create an InClusterConfig and use it to create a client for the controller + // to use to communicate with Kubernetes + config, err := rest.InClusterConfig() + if err != nil { + glog.Fatalf("Failed to create config: %v", err) + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + glog.Fatalf("Failed to create client: %v", err) + } + + // The controller needs to know what the server version is because out-of-tree + // provisioners aren't officially supported until 1.5 + serverVersion, err := clientset.Discovery().ServerVersion() + if err != nil { + glog.Fatalf("Error getting server version: %v", err) + } + + clientNFSProvisioner := &nfsProvisioner{ + server: server, + path: path, + } + // Start the provision controller which will dynamically provision efs NFS + // PVs + pc := controller.NewProvisionController(clientset, provisionerName, clientNFSProvisioner, serverVersion.GitVersion) + pc.Run(wait.NeverStop) +}