From c533b119fd430aa410255592e709e2695cbbd40b Mon Sep 17 00:00:00 2001 From: MadalinaPatrichi Date: Thu, 31 May 2018 12:14:06 +0100 Subject: [PATCH 1/5] Specify kubeconfig when running terraform actions --- Makefile | 1 + ...sw.template => example-claim-fss.template} | 7 +- examples/example-claim-fss.yaml | 14 +++ manifests/storage-class-fss.yaml | 5 + pkg/helpers/mock_interfaces.go | 119 ++++++++++++++++++ pkg/oci/client/client.go | 19 +++ pkg/provisioner/block/block_test.go | 8 +- pkg/provisioner/core/provisioner.go | 9 ++ pkg/provisioner/filestorage/filestorage.go | 114 +++++++++++++++++ .../filestorage/filestorage_test.go | 41 ++++++ test/system/runner.py | 79 ++++++++---- 11 files changed, 390 insertions(+), 26 deletions(-) rename examples/{example-claim-ffsw.template => example-claim-fss.template} (51%) create mode 100644 examples/example-claim-fss.yaml create mode 100644 manifests/storage-class-fss.yaml create mode 100644 pkg/helpers/mock_interfaces.go create mode 100644 pkg/provisioner/filestorage/filestorage.go create mode 100644 pkg/provisioner/filestorage/filestorage_test.go diff --git a/Makefile b/Makefile index b0d695c18..d948e8b33 100644 --- a/Makefile +++ b/Makefile @@ -61,6 +61,7 @@ build: ${DIR}/${BIN} manifests/oci-volume-provisioner.yaml > $(DIR)/oci-volume-provisioner.yaml cp manifests/storage-class.yaml $(DIR)/storage-class.yaml cp manifests/storage-class-ext3.yaml $(DIR)/storage-class-ext3.yaml + cp manifests/storage-class-fss.yaml $(DIR)/storage-class-fss.yaml cp manifests/oci-volume-provisioner-rbac.yaml $(DIR)/oci-volume-provisioner-rbac.yaml diff --git a/examples/example-claim-ffsw.template b/examples/example-claim-fss.template similarity index 51% rename from examples/example-claim-ffsw.template rename to examples/example-claim-fss.template index 5cb0b9907..a50c4af55 100644 --- a/examples/example-claim-ffsw.template +++ b/examples/example-claim-fss.template @@ -1,9 +1,12 @@ kind: PersistentVolumeClaim apiVersion: v1 metadata: - name: demooci-ffsw-{{TEST_ID}} + name: demooci-fss-{{TEST_ID}} spec: - storageClassName: "ffsw" + storageClassName: "oci-fss" + selector: + matchLabels: + oci-availability-domain: "{{REGION}}" accessModes: - ReadWriteMany resources: diff --git a/examples/example-claim-fss.yaml b/examples/example-claim-fss.yaml new file mode 100644 index 000000000..48b4c7f60 --- /dev/null +++ b/examples/example-claim-fss.yaml @@ -0,0 +1,14 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: demooci +spec: + storageClassName: "fss" + selector: + matchLabels: + oci-availability-domain: "PHX-AD-1" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi \ No newline at end of file diff --git a/manifests/storage-class-fss.yaml b/manifests/storage-class-fss.yaml new file mode 100644 index 000000000..d0bda0578 --- /dev/null +++ b/manifests/storage-class-fss.yaml @@ -0,0 +1,5 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: oci-fss +provisioner: oracle.com/oci \ No newline at end of file diff --git a/pkg/helpers/mock_interfaces.go b/pkg/helpers/mock_interfaces.go new file mode 100644 index 000000000..ce4b6d44d --- /dev/null +++ b/pkg/helpers/mock_interfaces.go @@ -0,0 +1,119 @@ +// Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helpers + +import ( + "context" + "time" + + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/core" + "github.com/oracle/oci-go-sdk/filestorage" + "github.com/oracle/oci-go-sdk/identity" + "github.com/oracle/oci-volume-provisioner/pkg/oci/client" +) + +var ( + // VolumeBackupID of backup volume + VolumeBackupID = "dummyVolumeBackupId" + fileSystemID = "dummyFileSystemId" +) + +// MockBlockStorageClient mocks BlockStorage client implementation +type MockBlockStorageClient struct { +} + +// CreateVolume mocks the BlockStorage CreateVolume implementation +func (c *MockBlockStorageClient) CreateVolume(ctx context.Context, request core.CreateVolumeRequest) (response core.CreateVolumeResponse, err error) { + return core.CreateVolumeResponse{Volume: core.Volume{Id: common.String(VolumeBackupID)}}, nil +} + +// DeleteVolume mocks the BlockStorage DeleteVolume implementation +func (c *MockBlockStorageClient) DeleteVolume(ctx context.Context, request core.DeleteVolumeRequest) (response core.DeleteVolumeResponse, err error) { + return core.DeleteVolumeResponse{}, nil +} + +// GetVolume mocks the BlockStorage GetVolume implementation +func (c *MockBlockStorageClient) GetVolume(ctx context.Context, request core.GetVolumeRequest) (response core.GetVolumeResponse, err error) { + return core.GetVolumeResponse{Volume: core.Volume{Id: common.String(VolumeBackupID)}}, nil +} + +// MockFileStorageClient mocks FileStorage client implementation +type MockFileStorageClient struct { +} + +// CreateFileSystem mocks the FileStorage CreateFileSystem implementation +func (c *MockFileStorageClient) CreateFileSystem(ctx context.Context, request filestorage.CreateFileSystemRequest) (response filestorage.CreateFileSystemResponse, err error) { + return filestorage.CreateFileSystemResponse{FileSystem: filestorage.FileSystem{Id: common.String(fileSystemID)}}, nil +} + +// DeleteFileSystem mocks the FileStorage DeleteFileSystem implementation +func (c *MockFileStorageClient) DeleteFileSystem(ctx context.Context, request filestorage.DeleteFileSystemRequest) (response filestorage.DeleteFileSystemResponse, err error) { + return filestorage.DeleteFileSystemResponse{}, nil +} + +// MockIdentityClient mocks identity client structure +type MockIdentityClient struct { + common.BaseClient +} + +// ListAvailabilityDomains mocks the client ListAvailabilityDomains implementation +func (client MockIdentityClient) ListAvailabilityDomains(ctx context.Context, request identity.ListAvailabilityDomainsRequest) (response identity.ListAvailabilityDomainsResponse, err error) { + return +} + +// MockProvisionerClient mocks client structure +type MockProvisionerClient struct { +} + +// BlockStorage mocks client BlockStorage implementation +func (p *MockProvisionerClient) BlockStorage() client.BlockStorage { + return &MockBlockStorageClient{} +} + +// FileStorage mocks client FileStorage implementation +func (p *MockProvisionerClient) FileStorage() client.FileStorage { + return &MockFileStorageClient{} +} + +// Identity mocks client Identity implementation +func (p *MockProvisionerClient) Identity() client.Identity { + return &MockIdentityClient{} +} + +// Context mocks client Context implementation +func (p *MockProvisionerClient) Context() context.Context { + return context.Background() +} + +// Timeout mocks client Timeout implementation +func (p *MockProvisionerClient) Timeout() time.Duration { + return 30 * time.Second +} + +// CompartmentOCID mocks client CompartmentOCID implementation +func (p *MockProvisionerClient) CompartmentOCID() (compartmentOCID string) { + return "" +} + +// TenancyOCID mocks client TenancyOCID implementation +func (p *MockProvisionerClient) TenancyOCID() string { + return "ocid1.tenancy.oc1..aaaaaaaatyn7scrtwtqedvgrxgr2xunzeo6uanvyhzxqblctwkrpisvke4kq" +} + +// NewClientProvisioner creates an OCI client from the given configuration. +func NewClientProvisioner(pcData client.ProvisionerClient) client.ProvisionerClient { + return &MockProvisionerClient{} +} diff --git a/pkg/oci/client/client.go b/pkg/oci/client/client.go index 50b310a60..6ea32eb73 100644 --- a/pkg/oci/client/client.go +++ b/pkg/oci/client/client.go @@ -33,6 +33,7 @@ import ( "github.com/oracle/oci-go-sdk/common" "github.com/oracle/oci-go-sdk/common/auth" "github.com/oracle/oci-go-sdk/core" + "github.com/oracle/oci-go-sdk/filestorage" "github.com/oracle/oci-go-sdk/identity" "github.com/oracle/oci-volume-provisioner/pkg/oci/instancemeta" ) @@ -42,6 +43,7 @@ type provisionerClient struct { cfg *Config blockStorage *core.BlockstorageClient identity *identity.IdentityClient + fileStorage *filestorage.FileStorageClient context context.Context timeout time.Duration metadata *instancemeta.InstanceMetadata @@ -59,10 +61,17 @@ type Identity interface { ListAvailabilityDomains(ctx context.Context, request identity.ListAvailabilityDomainsRequest) (response identity.ListAvailabilityDomainsResponse, err error) } +// FileStorage specifies the subset of the OCI core API utilised by the provisioner. +type FileStorage interface { + CreateFileSystem(ctx context.Context, request filestorage.CreateFileSystemRequest) (response filestorage.CreateFileSystemResponse, err error) + DeleteFileSystem(ctx context.Context, request filestorage.DeleteFileSystemRequest) (response filestorage.DeleteFileSystemResponse, err error) +} + // ProvisionerClient is passed to all sub clients to provision a volume type ProvisionerClient interface { BlockStorage() BlockStorage Identity() Identity + FileStorage() FileStorage Context() context.Context Timeout() time.Duration CompartmentOCID() string @@ -77,6 +86,10 @@ func (p *provisionerClient) Identity() Identity { return p.identity } +func (p *provisionerClient) FileStorage() FileStorage { + return p.fileStorage +} + func (p *provisionerClient) Context() context.Context { return p.context } @@ -119,6 +132,11 @@ func FromConfig(cfg *Config) (ProvisionerClient, error) { return nil, err } + fileStorage, err := filestorage.NewFileStorageClientWithConfigurationProvider(config) + if err != nil { + return nil, err + } + identity, err := identity.NewIdentityClientWithConfigurationProvider(config) if err != nil { return nil, err @@ -137,6 +155,7 @@ func FromConfig(cfg *Config) (ProvisionerClient, error) { cfg: cfg, blockStorage: &blockStorage, identity: &identity, + fileStorage: &fileStorage, timeout: 3 * time.Minute, context: context.Background(), metadata: metadata, diff --git a/pkg/provisioner/block/block_test.go b/pkg/provisioner/block/block_test.go index 5575ea629..47c1b2761 100644 --- a/pkg/provisioner/block/block_test.go +++ b/pkg/provisioner/block/block_test.go @@ -20,6 +20,7 @@ import ( "testing" "time" + "github.com/oracle/oci-volume-provisioner/pkg/helpers" "github.com/oracle/oci-volume-provisioner/pkg/oci/client" "github.com/oracle/oci-volume-provisioner/pkg/oci/instancemeta" @@ -35,6 +36,7 @@ import ( var ( volumeBackupID = "dummyVolumeBackupId" defaultAD = identity.AvailabilityDomain{Name: common.String("PHX-AD-1"), CompartmentId: common.String("ocid1.compartment.oc1")} + fileSystemID = "dummyFileSystemId" ) func TestResolveFSTypeWhenNotConfigured(t *testing.T) { @@ -121,7 +123,7 @@ func TestCreateVolumeFromBackup(t *testing.T) { PVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - ociVolumeBackupID: volumeBackupID, + ociVolumeBackupID: helpers.VolumeBackupID, }, }, Spec: v1.PersistentVolumeClaimSpec{ @@ -148,8 +150,8 @@ func TestCreateVolumeFromBackup(t *testing.T) { if err != nil { t.Fatalf("Failed to provision volume from block storage: %v", err) } - if provisionedVolume.Annotations[ociVolumeID] != volumeBackupID { - t.Fatalf("Failed to assign the id of the blockID: %s, assigned %s instead", volumeBackupID, + if provisionedVolume.Annotations[ociVolumeID] != helpers.VolumeBackupID { + t.Fatalf("Failed to assign the id of the blockID: %s, assigned %s instead", helpers.VolumeBackupID, provisionedVolume.Annotations[ociVolumeID]) } } diff --git a/pkg/provisioner/core/provisioner.go b/pkg/provisioner/core/provisioner.go index 6f32b8d31..33abb8537 100644 --- a/pkg/provisioner/core/provisioner.go +++ b/pkg/provisioner/core/provisioner.go @@ -34,6 +34,7 @@ import ( "github.com/oracle/oci-volume-provisioner/pkg/oci/client" "github.com/oracle/oci-volume-provisioner/pkg/oci/instancemeta" "github.com/oracle/oci-volume-provisioner/pkg/provisioner/block" + "github.com/oracle/oci-volume-provisioner/pkg/provisioner/filestorage" "github.com/oracle/oci-volume-provisioner/pkg/provisioner/plugin" ) @@ -88,7 +89,15 @@ func NewOCIProvisioner(kubeClient kubernetes.Interface, nodeInformer informersv1 kubeClient: kubeClient, nodeLister: nodeInformer.Lister(), nodeListerSynced: nodeInformer.Informer().HasSynced, +<<<<<<< HEAD provisioner: blockProvisioner, +======= + storageClassProvisioners: map[string]plugin.ProvisionerPlugin{ + "oci": blockProvisioner, + "oci-ext3": blockProvisioner, + "oci-fss": filestorage.NewFilesystemProvisioner(client), + }, +>>>>>>> File system storage } } diff --git a/pkg/provisioner/filestorage/filestorage.go b/pkg/provisioner/filestorage/filestorage.go new file mode 100644 index 000000000..282036666 --- /dev/null +++ b/pkg/provisioner/filestorage/filestorage.go @@ -0,0 +1,114 @@ +// Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filestorage + +import ( + "context" + "errors" + "fmt" + "os" + + "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/filestorage" + "github.com/oracle/oci-go-sdk/identity" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" + + "github.com/oracle/oci-volume-provisioner/pkg/oci/client" + "github.com/oracle/oci-volume-provisioner/pkg/provisioner/plugin" +) + +const ( + ociVolumeID = "ociVolumeID" + volumePrefixEnvVarName = "OCI_VOLUME_NAME_PREFIX" + fsType = "fsType" +) + +// filesystemProvisioner is the internal provisioner for OCI filesystem volumes +type filesystemProvisioner struct { + client client.ProvisionerClient +} + +var _ plugin.ProvisionerPlugin = &filesystemProvisioner{} + +// NewFilesystemProvisioner creates a new file system provisioner that creates +// filsystems using OCI file system service. +func NewFilesystemProvisioner(client client.ProvisionerClient) plugin.ProvisionerPlugin { + return &filesystemProvisioner{ + client: client, + } +} + +func (filesystem *filesystemProvisioner) Provision( + options controller.VolumeOptions, + availabilityDomain *identity.AvailabilityDomain) (*v1.PersistentVolume, error) { + + ctx, cancel := context.WithTimeout(filesystem.client.Context(), filesystem.client.Timeout()) + defer cancel() + + response, err := filesystem.client.FileStorage().CreateFileSystem(ctx, filestorage.CreateFileSystemRequest{ + CreateFileSystemDetails: filestorage.CreateFileSystemDetails{ + AvailabilityDomain: availabilityDomain.Name, + CompartmentId: common.String(filesystem.client.CompartmentOCID()), + DisplayName: common.String(fmt.Sprintf("%s%s", os.Getenv(volumePrefixEnvVarName), options.PVC.Name)), + }, + }) + if err != nil { + glog.Errorf("Failed to create a volume:%#v, %s", options, err) + return nil, err + } + + return &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: *response.FileSystem.Id, + Annotations: map[string]string{ + ociVolumeID: *response.FileSystem.Id, + }, + Labels: map[string]string{}, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, + AccessModes: options.PVC.Spec.AccessModes, + //FIXME: fs storage doesn't enforce quota, capacity is meaningless here. + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + FlexVolume: &v1.FlexVolumeSource{ + Driver: plugin.OCIProvisionerName, + FSType: "NFSv3", + }, + }, + }, + }, nil +} + +// Delete destroys a OCI volume created by Provision +func (filesystem *filesystemProvisioner) Delete(volume *v1.PersistentVolume) error { + filesystemID, ok := volume.Annotations[ociVolumeID] + if !ok { + return errors.New("filesystemid annotation not found on PV") + } + glog.Infof("Deleting volume %v with filesystemID %v", volume, filesystemID) + ctx, cancel := context.WithTimeout(filesystem.client.Context(), filesystem.client.Timeout()) + defer cancel() + _, err := filesystem.client.FileStorage().DeleteFileSystem(ctx, + filestorage.DeleteFileSystemRequest{ + FileSystemId: &filesystemID, + }) + return err +} diff --git a/pkg/provisioner/filestorage/filestorage_test.go b/pkg/provisioner/filestorage/filestorage_test.go new file mode 100644 index 000000000..54fe3e2a2 --- /dev/null +++ b/pkg/provisioner/filestorage/filestorage_test.go @@ -0,0 +1,41 @@ +// Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filestorage + +import ( + "testing" + + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/identity" + "github.com/oracle/oci-volume-provisioner/pkg/helpers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" +) + +func TestCreateVolumeWithFSS(t *testing.T) { + // test creating a volume on a file system storage + options := controller.VolumeOptions{PVName: "dummyVolumeOptions", + PVC: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{}, + }} + ad := identity.AvailabilityDomain{Name: common.String("dummyAdName"), CompartmentId: common.String("dummyCompartmentId")} + fss := filesystemProvisioner{client: helpers.NewClientProvisioner(nil)} + _, err := fss.Provision(options, &ad) + if err != nil { + t.Fatalf("Failed to provision volume from block storage: %v", err) + } + +} diff --git a/test/system/runner.py b/test/system/runner.py index 958cd6aeb..85ab206c4 100755 --- a/test/system/runner.py +++ b/test/system/runner.py @@ -46,6 +46,13 @@ REPORT_FILE="done" POD_CONTROLLER = "controller" POD_VOLUME = "volume" +BLOCK_STORAGE = "block" +FS_STORAGE = "fileSystem" +DEFAULT_AVAILABILITY_DOMAIN="NWuj:PHX-AD-2" +LIFECYCLE_STATE_ON = {BLOCK_STORAGE: 'AVAILABLE', + FS_STORAGE: 'ACTIVE'} +LIFECYCLE_STATE_OFF = {BLOCK_STORAGE: 'TERMINATED', + FS_STORAGE:'DELETED'} # On exit return 0 for success or any other integer for a failure. # If write_report is true then write a completion file to the Sonabuoy plugin result file. @@ -285,13 +292,24 @@ def _oci_config(): _finish_with_exit_code(1) -def _volume_exists(compartment_id, volume, state, backup=False): - '''Verify whether the volume is available or not''' - client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) - if backup: - volumes= oci.pagination.list_call_get_all_results(client.list_volume_backups, compartment_id) +def _volume_exists(compartment_id, volume, state, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): + '''Verify whether the volume is available or not + @param storageType: Storage type to search for volumes in + @type storageType: C{Str} + @param availability_domain: Availability domain to look in for + @type availability_domain: C{Str}''' + if storageType == BLOCK_STORAGE: + _log("Retrieving block volumes") + client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) + if backup: + volumes= oci.pagination.list_call_get_all_results(client.list_volume_backups, compartment_id) + else: + volumes = oci.pagination.list_call_get_all_results(client.list_volumes, compartment_id) else: - volumes = oci.pagination.list_call_get_all_results(client.list_volumes, compartment_id) + _log("Retrieving file systems") + client = oci.file_storage.FileStorageClient(_oci_config()) + volumes = oci.pagination.list_call_get_all_results(client.list_file_systems, compartment_id, + availability_domain) _log("Getting status for volume %s" % volume) for vol in _get_json_doc(str(volumes.data)): if vol['id'].endswith(volume) and vol['lifecycle_state'] == state: @@ -336,9 +354,10 @@ def _create_volume_from_backup(backup_ocid, test_id, availability_domain, compar except Exception as exc: _log("Failed to create volume from backup %s" % exc) -def _wait_for_volume(compartment_id, volume, state, backup=False): +def _wait_for_volume(compartment_id, volume, state, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): num_polls = 0 - while not _volume_exists(compartment_id, volume, state, backup): + while not _volume_exists(compartment_id, volume, state, backup, storageType=storageType, + availability_domain=availability_domain): _log(" waiting...") time.sleep(1) num_polls += 1 @@ -346,12 +365,14 @@ def _wait_for_volume(compartment_id, volume, state, backup=False): return False return True -def _wait_for_volume_to_create(compartment_id, volume, backup=False): - return _wait_for_volume(compartment_id, volume, 'AVAILABLE', backup) +def _wait_for_volume_to_create(compartment_id, volume, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): + return _wait_for_volume(compartment_id, volume, LIFECYCLE_STATE_ON[storageType], backup, storageType=storageType, + availability_domain=availability_domain) -def _wait_for_volume_to_delete(compartment_id, volume, backup=False): - return _wait_for_volume(compartment_id, volume, 'TERMINATED', backup) +def _wait_for_volume_to_delete(compartment_id, volume, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): + return _wait_for_volume(compartment_id, volume, LIFECYCLE_STATE_OFF[storageType], backup, storageType=storageType, + availability_domain=availability_domain) def _get_compartment_id(pod_name): @@ -398,6 +419,8 @@ def _cleanup(exit_on_error=False, display_errors=True): exit_on_error, display_errors) _kubectl("delete -f ../../dist/storage-class.yaml", exit_on_error, display_errors) + _kubectl("delete -f ../../dist/storage-class-fss.yaml", + exit_on_error, display_errors) _kubectl("delete -f ../../dist/storage-class-ext3.yaml", exit_on_error, display_errors) _kubectl("-n kube-system delete secret oci-volume-provisioner", @@ -441,18 +464,19 @@ def _create_yaml(template, test_id, region=None, backup_id=None): def _test_create_volume(compartment_id, claim_target, claim_volume_name, check_oci, test_id=None, - availability_domain=None, verify_func=None): + availability_domain=None, verify_func=None, storageType=BLOCK_STORAGE): '''Test making a volume claim from a configuration file @param backup_ocid: Verify whether the volume created from a backup contains backup info @type backup_ocid: C{Str}''' _kubectl("create -f " + claim_target, exit_on_error=False) volume = _get_volume_and_wait(claim_volume_name) - _log("Created volume with name: " + volume) + _log("Created volume with name: %s" % str(volume)) if check_oci: _log("Querying the OCI api to make sure a volume with this name exists...") - if not _wait_for_volume_to_create(compartment_id, volume): + if not _wait_for_volume_to_create(compartment_id, volume, storageType=storageType, + availability_domain=availability_domain): _log("Failed to find volume with name: " + volume) return False _log("Volume: " + volume + " is present and available") @@ -465,8 +489,10 @@ def _test_create_volume(compartment_id, claim_target, claim_volume_name, check_o if check_oci: _log("Querying the OCI api to make sure a volume with this name now doesnt exist...") - _wait_for_volume_to_delete(compartment_id, volume) - if not _volume_exists(compartment_id, volume, 'TERMINATED'): + _wait_for_volume_to_delete(compartment_id, volume, storageType=storageType, + availability_domain=availability_domain) + if not _volume_exists(compartment_id, volume, LIFECYCLE_STATE_OFF[storageType], storageType=storageType, + availability_domain=availability_domain): _log("Volume with name: " + volume + " still exists") return False _log("Volume: " + volume + " has now been terminated") @@ -599,7 +625,7 @@ def _verify_file_existance_via_replication_controller(rc_name, file_name="hello. sys.exit(1) _log("Yes it does!") -def _setup_create_volume_from_backup(terraform_env, test_id): +def _setup_create_volume_from_backup(terraform_env, test_id, storageType=BLOCK_STORAGE, availability_domain=None): '''Setup environment for creating a volume from a backup device @param test_id: Test id used to append to component names @type test_id : C{Str} @@ -616,8 +642,9 @@ def _setup_create_volume_from_backup(terraform_env, test_id): _verify_file_existance_via_replication_controller(_rc_name) # Create backup from generated volume _backup_ocid, compartment_id, _volume_name = _create_backup(_get_terraform_output_var(terraform_env, TERRAFORM_VOLUME_OCID), test_id) - if not _wait_for_volume_to_create(compartment_id, _backup_ocid, backup=True): - _log("Failed to find backup with name: " + _volume_name) + if not _wait_for_volume_to_create(compartment_id, _backup_ocid, backup=True, storageType=storageType, + availability_domain=availability_domain): + _log("Failed to find backup with name: " + _volume_name) return _backup_ocid, _availability_domain def _tear_down_create_volume_from_backup(terraform_env, backup_ocid): @@ -716,6 +743,7 @@ def _destroy_key_files_atexit(): exit_on_error=False) _kubectl("create -f ../../dist/storage-class.yaml", exit_on_error=False) _kubectl("create -f ../../dist/storage-class-ext3.yaml", exit_on_error=False) + _kubectl("create -f ../../dist/storage-class-fss.yaml", exit_on_error=False) _kubectl("create -f ../../dist/oci-volume-provisioner-rbac.yaml", exit_on_error=False) _kubectl("create -f ../../dist/oci-volume-provisioner.yaml", exit_on_error=False) pod_name, _, _ = _wait_for_pod_status("Running", test_id, POD_VOLUME) @@ -749,9 +777,18 @@ def _teardown_atexit(): res = _test_create_volume(compartment_id, _create_yaml("../../examples/example-claim-no-AD.template", test_id, None), "demooci-no-ad-" + test_id, args['check_oci']) +<<<<<<< HEAD update_canary_metric(CM_NO_AD, int(res)) success = False if res == False else success - +======= +>>>>>>> cc023ec8... File system storage + + _log("Running system test: Create volume with FSS", as_banner=True) + _test_create_volume(compartment_id, + _create_yaml("../../examples/example-claim-fss.template", test_id, _get_region()), + "demooci-fss-" + test_id, args['check_oci'], availability_domain=DEFAULT_AVAILABILITY_DOMAIN, + storageType=FS_STORAGE) + _log("Running system test: Create volume from backup", as_banner=True) if args['check_oci']: _log("Running system test: Create volume from backup", as_banner=True) terraform_env = _get_terraform_env() From df40f795e8f068d29ab5fc3ceb9512eb05294255 Mon Sep 17 00:00:00 2001 From: MadalinaPatrichi Date: Wed, 18 Jul 2018 14:52:07 +0100 Subject: [PATCH 2/5] FSS one-to-one implementation --- Dockerfile.test | 2 +- cmd/main.go | 14 ++++- manifests/oci-volume-provisioner.yaml | 2 + manifests/storage-class.yaml | 2 +- pkg/oci/client/client.go | 3 ++ pkg/provisioner/core/provisioner.go | 9 ---- pkg/provisioner/filestorage/filestorage.go | 62 ++++++++++++++++++++-- test/system/README.md | 4 ++ test/system/runner.py | 28 +++++++++- 9 files changed, 107 insertions(+), 19 deletions(-) diff --git a/Dockerfile.test b/Dockerfile.test index a0783c848..ed4fcf639 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -1,4 +1,4 @@ -FROM iad.ocir.io/oracle/oci-volume-provisioner-system-test:1.0.0 +FROM iad.ocir.io/oracle/oci-volume-provisioner-system-test:1.0.2 COPY dist /dist COPY manifests /manifests diff --git a/cmd/main.go b/cmd/main.go index e9e63e0d3..40710e1e5 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -36,13 +36,15 @@ import ( const ( resyncPeriod = 15 * time.Second minResyncPeriod = 12 * time.Hour - provisionerName = "oracle.com/oci" + provisionerNameBlock = "oracle.com/oci" + provisionerNameFss = "oracle.com/oci-fss" exponentialBackOffOnError = false failedRetryThreshold = 5 leasePeriod = controller.DefaultLeaseDuration retryPeriod = controller.DefaultRetryPeriod renewDeadline = controller.DefaultRenewDeadline termLimit = controller.DefaultTermLimit + provisionerTypeArg = "provisionerType" ) // informerResyncPeriod computes the time interval a shared informer waits @@ -92,6 +94,14 @@ func main() { glog.Fatal("env variable NODE_NAME must be set so that this provisioner can identify itself") } + // Decides what type of provider to deploy, either block or fss + provisionerType := os.Getenv("PROVISIONER_TYPE") + if provisionerType == "" { + provisionerType = provisionerNameBlock + } + + glog.Infof("Starting volume provisioner in %s mode", provisionerType) + sharedInformerFactory := informers.NewSharedInformerFactory(clientset, informerResyncPeriod(minResyncPeriod)()) volumeSizeLowerBound, err := resource.ParseQuantity(*minVolumeSize) @@ -107,7 +117,7 @@ func main() { // PVs pc := controller.NewProvisionController( clientset, - provisionerName, + provisionerType, ociProvisioner, serverVersion.GitVersion, controller.ResyncPeriod(resyncPeriod), diff --git a/manifests/oci-volume-provisioner.yaml b/manifests/oci-volume-provisioner.yaml index a42359f11..4d89a8498 100644 --- a/manifests/oci-volume-provisioner.yaml +++ b/manifests/oci-volume-provisioner.yaml @@ -19,6 +19,8 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: PROVISIONER_TYPE + value: oracle.com/oci-fss volumeMounts: - name: config mountPath: /etc/oci/ diff --git a/manifests/storage-class.yaml b/manifests/storage-class.yaml index e8657b065..c7e950c1a 100644 --- a/manifests/storage-class.yaml +++ b/manifests/storage-class.yaml @@ -2,4 +2,4 @@ kind: StorageClass apiVersion: storage.k8s.io/v1beta1 metadata: name: oci -provisioner: oracle.com/oci +provisioner: oracle.com/oci-bvs diff --git a/pkg/oci/client/client.go b/pkg/oci/client/client.go index 6ea32eb73..534cf2a58 100644 --- a/pkg/oci/client/client.go +++ b/pkg/oci/client/client.go @@ -65,6 +65,9 @@ type Identity interface { type FileStorage interface { CreateFileSystem(ctx context.Context, request filestorage.CreateFileSystemRequest) (response filestorage.CreateFileSystemResponse, err error) DeleteFileSystem(ctx context.Context, request filestorage.DeleteFileSystemRequest) (response filestorage.DeleteFileSystemResponse, err error) + CreateMountTarget(ctx context.Context, request filestorage.CreateMountTargetRequest) (response filestorage.CreateMountTargetResponse, err error) + CreateExport(ctx context.Context, request filestorage.CreateExportRequest) (response filestorage.CreateExportResponse, err error) + GetMountTarget(ctx context.Context, request filestorage.GetMountTargetRequest) (response filestorage.GetMountTargetResponse, err error) } // ProvisionerClient is passed to all sub clients to provision a volume diff --git a/pkg/provisioner/core/provisioner.go b/pkg/provisioner/core/provisioner.go index 33abb8537..6f32b8d31 100644 --- a/pkg/provisioner/core/provisioner.go +++ b/pkg/provisioner/core/provisioner.go @@ -34,7 +34,6 @@ import ( "github.com/oracle/oci-volume-provisioner/pkg/oci/client" "github.com/oracle/oci-volume-provisioner/pkg/oci/instancemeta" "github.com/oracle/oci-volume-provisioner/pkg/provisioner/block" - "github.com/oracle/oci-volume-provisioner/pkg/provisioner/filestorage" "github.com/oracle/oci-volume-provisioner/pkg/provisioner/plugin" ) @@ -89,15 +88,7 @@ func NewOCIProvisioner(kubeClient kubernetes.Interface, nodeInformer informersv1 kubeClient: kubeClient, nodeLister: nodeInformer.Lister(), nodeListerSynced: nodeInformer.Informer().HasSynced, -<<<<<<< HEAD provisioner: blockProvisioner, -======= - storageClassProvisioners: map[string]plugin.ProvisionerPlugin{ - "oci": blockProvisioner, - "oci-ext3": blockProvisioner, - "oci-fss": filestorage.NewFilesystemProvisioner(client), - }, ->>>>>>> File system storage } } diff --git a/pkg/provisioner/filestorage/filestorage.go b/pkg/provisioner/filestorage/filestorage.go index 282036666..9566419ff 100644 --- a/pkg/provisioner/filestorage/filestorage.go +++ b/pkg/provisioner/filestorage/filestorage.go @@ -36,6 +36,8 @@ const ( ociVolumeID = "ociVolumeID" volumePrefixEnvVarName = "OCI_VOLUME_NAME_PREFIX" fsType = "fsType" + subnetID = "subnetId" + mntTargetID = "mntTargetId" ) // filesystemProvisioner is the internal provisioner for OCI filesystem volumes @@ -53,6 +55,18 @@ func NewFilesystemProvisioner(client client.ProvisionerClient) plugin.Provisione } } +// getMountTargetFromID retrieves mountTarget from given mountTargetID +func getMountTargetFromID(ctx context.Context, mountTargetID string, fileStorageClient client.FileStorage) *filestorage.MountTarget { + responseMnt, err := fileStorageClient.GetMountTarget(ctx, filestorage.GetMountTargetRequest{ + MountTargetId: common.String(mountTargetID), + }) + if err != nil { + glog.Errorf("Failed to retrieve mount point: %s", err) + return nil + } + return &responseMnt.MountTarget +} + func (filesystem *filesystemProvisioner) Provision( options controller.VolumeOptions, availabilityDomain *identity.AvailabilityDomain) (*v1.PersistentVolume, error) { @@ -60,7 +74,8 @@ func (filesystem *filesystemProvisioner) Provision( ctx, cancel := context.WithTimeout(filesystem.client.Context(), filesystem.client.Timeout()) defer cancel() - response, err := filesystem.client.FileStorage().CreateFileSystem(ctx, filestorage.CreateFileSystemRequest{ + fileStorageClient := filesystem.client.FileStorage() + response, err := fileStorageClient.CreateFileSystem(ctx, filestorage.CreateFileSystemRequest{ CreateFileSystemDetails: filestorage.CreateFileSystemDetails{ AvailabilityDomain: availabilityDomain.Name, CompartmentId: common.String(filesystem.client.CompartmentOCID()), @@ -68,10 +83,46 @@ func (filesystem *filesystemProvisioner) Provision( }, }) if err != nil { - glog.Errorf("Failed to create a volume:%#v, %s", options, err) + glog.Errorf("Failed to create a file system storage:%#v, %s", options, err) + return nil, err + } + + mntTargetResp := filestorage.MountTarget{} + if options.Parameters[mntTargetID] == "" { + // Mount target not created, create a new one + responseMnt, err := fileStorageClient.CreateMountTarget(ctx, filestorage.CreateMountTargetRequest{ + CreateMountTargetDetails: filestorage.CreateMountTargetDetails{ + AvailabilityDomain: availabilityDomain.Name, + SubnetId: common.String(options.Parameters[subnetID]), + CompartmentId: common.String(filesystem.client.CompartmentOCID()), + DisplayName: common.String(fmt.Sprintf("%s%s", os.Getenv(volumePrefixEnvVarName), "mnt")), + }, + }) + if err != nil { + glog.Errorf("Failed to create a mount point:%#v, %s", options, err) + return nil, err + } + mntTargetResp = responseMnt.MountTarget + } else { + // Mount target already specified in the configuration file, find it in the list of mount targets + mntTargetResp = *getMountTargetFromID(ctx, options.Parameters[mntTargetID], fileStorageClient) + } + + glog.Infof("Creating export set") + _, err = fileStorageClient.CreateExport(ctx, filestorage.CreateExportRequest{ + CreateExportDetails: filestorage.CreateExportDetails{ + ExportSetId: mntTargetResp.ExportSetId, + FileSystemId: response.FileSystem.Id, + Path: common.String("/"), + }, + }) + + if err != nil { + glog.Errorf("Failed to create export:%s", err) return nil, err } + glog.Infof("Creating persistent volume") return &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: *response.FileSystem.Id, @@ -88,9 +139,10 @@ func (filesystem *filesystemProvisioner) Provision( v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], }, PersistentVolumeSource: v1.PersistentVolumeSource{ - FlexVolume: &v1.FlexVolumeSource{ - Driver: plugin.OCIProvisionerName, - FSType: "NFSv3", + NFS: &v1.NFSVolumeSource{ + Server: *mntTargetResp.SubnetId, + Path: "/", + ReadOnly: false, }, }, }, diff --git a/test/system/README.md b/test/system/README.md index 2297a6138..ebda0c8f5 100644 --- a/test/system/README.md +++ b/test/system/README.md @@ -18,6 +18,10 @@ files. Alternatively, OCICONFIG_VAR/KUBECONFIG_VAR must contain the content of the required files (base64 encoded). If both are set, the former will take precedence. +* $MNT_TARGET_OCID + +Mount target ocid used to attach file systems to for the oci-fss provisioner type + We can then run the system test as follows: ``` diff --git a/test/system/runner.py b/test/system/runner.py index 85ab206c4..4962a9be2 100755 --- a/test/system/runner.py +++ b/test/system/runner.py @@ -29,6 +29,8 @@ import oci import yaml + +MNT_TARGET_OCID = "MNT_TARGET_OCID" DEBUG_FILE = "runner.log" TERRAFORM_CLUSTER = "terraform/cluster" TERRAFORM_DIR = "terraform" @@ -109,7 +111,6 @@ def _destroy_key_files(check_oci): if check_oci: os.remove(TMP_OCI_API_KEY_FILE) - def _get_kubeconfig(): return os.environ['KUBECONFIG'] if "KUBECONFIG" in os.environ else TMP_KUBECONFIG @@ -121,6 +122,30 @@ def _get_oci_config_file(): def _get_oci_api_key_file(): return TMP_OCI_API_KEY_FILE +def _create_fss_storage_config(infile, outfile, test): + '''Based on the storage config template file and the mount arget OCID info, generate + a valid storage config yaml to use for creating an OCI Export + @param infile: File to use as template + @type infile: C{Str} + @param outfile: Outfile to storage class config file to + @type outfile: C{Str}''' + _log("Creating fss storage claim configuration file") + _mntTargetOCID = os.environ[MNT_TARGET_OCID] + if not _mntTargetOCID: + _log("No mount target OCID provided") + with open(infile, "r") as sources: + lines = sources.readlines() + with open(outfile + "." + test_id, "w") as sources: + for line in lines: + patched_line = line + if volume_name is not None: + patched_line = re.sub('{{VOLUME_NAME}}', volume_name, patched_line) + patched_line = re.sub('{{TEST_ID}}', test_id, patched_line) + if availability_domain: + availability_domain = availability_domain.replace(':', '-') # yaml config does not allow ':' + patched_line = re.sub('{{AVAILABILITY_DOMAIN}}', availability_domain, patched_line) + sources.write(patched_line) + return outfile + "." + test_id def _banner(as_banner, bold): if as_banner: @@ -734,6 +759,7 @@ def _destroy_key_files_atexit(): success = True + _create_fss_storage_config() if args['setup']: # Cleanup in case any existing state exists in the cluster _cleanup(display_errors=False) From b85fd87c75363cf4a8d94c19ec3108d6be14a92a Mon Sep 17 00:00:00 2001 From: MadalinaPatrichi Date: Mon, 23 Jul 2018 14:48:41 +0100 Subject: [PATCH 3/5] Added system test for fss -Correctly mock out FileStorageClient -Start separate volume provisioner for fss case _ start of impl of list mount target -Add check for listing mount targets + add .yaml files to gitignore -Remove reference to non existing pvc and update the oci provisioner fss config --- .gitignore | 1 + Makefile | 3 +- examples/example-claim-fss.template | 2 +- examples/example-pod-fss.yaml | 18 +++ .../example-storage-class-fss.template | 4 +- manifests/oci-volume-provisioner-fss.yaml | 31 ++++ manifests/oci-volume-provisioner.yaml | 2 +- manifests/storage-class.yaml | 2 +- pkg/helpers/mock_interfaces.go | 20 +++ pkg/oci/client/client.go | 1 + pkg/provisioner/filestorage/filestorage.go | 46 ++++-- test/system/README.md | 4 +- test/system/runner.py | 70 +++------ .../cmd/nfs-client-provisioner/provisioner.go | 147 ++++++++++++++++++ 14 files changed, 283 insertions(+), 68 deletions(-) create mode 100644 examples/example-pod-fss.yaml rename manifests/storage-class-fss.yaml => examples/example-storage-class-fss.template (51%) create mode 100644 manifests/oci-volume-provisioner-fss.yaml create mode 100644 vendor/github.com/kubernetes-incubator/external-storage/nfs-client/cmd/nfs-client-provisioner/provisioner.go diff --git a/.gitignore b/.gitignore index 839823718..0dd5b90b7 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ env.sh *.log test/system/venv/ test/system/run-test-image.yaml* +examples/*.yaml \ No newline at end of file diff --git a/Makefile b/Makefile index d948e8b33..6638c6f0f 100644 --- a/Makefile +++ b/Makefile @@ -59,9 +59,10 @@ test: build: ${DIR}/${BIN} sed 's#@VERSION@#${VERSION}#g; s#@IMAGE@#${IMAGE}#g' \ manifests/oci-volume-provisioner.yaml > $(DIR)/oci-volume-provisioner.yaml + sed 's#@VERSION@#${VERSION}#g; s#@IMAGE@#${IMAGE}#g' \ + manifests/oci-volume-provisioner-fss.yaml > $(DIR)/oci-volume-provisioner-fss.yaml cp manifests/storage-class.yaml $(DIR)/storage-class.yaml cp manifests/storage-class-ext3.yaml $(DIR)/storage-class-ext3.yaml - cp manifests/storage-class-fss.yaml $(DIR)/storage-class-fss.yaml cp manifests/oci-volume-provisioner-rbac.yaml $(DIR)/oci-volume-provisioner-rbac.yaml diff --git a/examples/example-claim-fss.template b/examples/example-claim-fss.template index a50c4af55..15f4e77fe 100644 --- a/examples/example-claim-fss.template +++ b/examples/example-claim-fss.template @@ -11,4 +11,4 @@ spec: - ReadWriteMany resources: requests: - storage: 50Gi + storage: 1Gi # Required by Kubernetes but not used by FSS diff --git a/examples/example-pod-fss.yaml b/examples/example-pod-fss.yaml new file mode 100644 index 000000000..72515a2a1 --- /dev/null +++ b/examples/example-pod-fss.yaml @@ -0,0 +1,18 @@ +kind: Pod +apiVersion: v1 +metadata: + name: ocidemo-fss +spec: + volumes: + - name: nginx + persistentVolumeClaim: + claimName: fss-pvc + containers: + - name: task-pv-container + image: nginx + ports: + - containerPort: 80 + name: "http-server" + volumeMounts: + - mountPath: "/usr/share/nginx/html" + name: task-pv-storage diff --git a/manifests/storage-class-fss.yaml b/examples/example-storage-class-fss.template similarity index 51% rename from manifests/storage-class-fss.yaml rename to examples/example-storage-class-fss.template index d0bda0578..9b1a87a4f 100644 --- a/manifests/storage-class-fss.yaml +++ b/examples/example-storage-class-fss.template @@ -2,4 +2,6 @@ kind: StorageClass apiVersion: storage.k8s.io/v1beta1 metadata: name: oci-fss -provisioner: oracle.com/oci \ No newline at end of file +provisioner: oracle.com/oci-fss +parameters: + subnetId: {{MNT_TARGET_OCID}} \ No newline at end of file diff --git a/manifests/oci-volume-provisioner-fss.yaml b/manifests/oci-volume-provisioner-fss.yaml new file mode 100644 index 000000000..a4fd4f032 --- /dev/null +++ b/manifests/oci-volume-provisioner-fss.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: oci-volume-provisioner-fss + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + app: oci-volume-provisioner + spec: + serviceAccountName: oci-volume-provisioner + containers: + - name: oci-volume-provisioner + image: @IMAGE@:@VERSION@ + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: PROVISIONER_TYPE + value: oracle.com/oci-fss + volumeMounts: + - name: config + mountPath: /etc/oci/ + readOnly: true + volumes: + - name: config + secret: + secretName: oci-volume-provisioner diff --git a/manifests/oci-volume-provisioner.yaml b/manifests/oci-volume-provisioner.yaml index 4d89a8498..052169fa8 100644 --- a/manifests/oci-volume-provisioner.yaml +++ b/manifests/oci-volume-provisioner.yaml @@ -20,7 +20,7 @@ spec: fieldRef: fieldPath: spec.nodeName - name: PROVISIONER_TYPE - value: oracle.com/oci-fss + value: oracle.com/oci volumeMounts: - name: config mountPath: /etc/oci/ diff --git a/manifests/storage-class.yaml b/manifests/storage-class.yaml index c7e950c1a..e8657b065 100644 --- a/manifests/storage-class.yaml +++ b/manifests/storage-class.yaml @@ -2,4 +2,4 @@ kind: StorageClass apiVersion: storage.k8s.io/v1beta1 metadata: name: oci -provisioner: oracle.com/oci-bvs +provisioner: oracle.com/oci diff --git a/pkg/helpers/mock_interfaces.go b/pkg/helpers/mock_interfaces.go index ce4b6d44d..7c8ac40bc 100644 --- a/pkg/helpers/mock_interfaces.go +++ b/pkg/helpers/mock_interfaces.go @@ -64,6 +64,26 @@ func (c *MockFileStorageClient) DeleteFileSystem(ctx context.Context, request fi return filestorage.DeleteFileSystemResponse{}, nil } +// CreateExport mocks the FileStorage CreateExport implementation +func (c *MockFileStorageClient) CreateExport(ctx context.Context, request filestorage.CreateExportRequest) (response filestorage.CreateExportResponse, err error) { + return filestorage.CreateExportResponse{}, nil +} + +// CreateMountTarget mocks the FileStorage CreateMountTarget implementation +func (c *MockFileStorageClient) CreateMountTarget(ctx context.Context, request filestorage.CreateMountTargetRequest) (response filestorage.CreateMountTargetResponse, err error) { + return filestorage.CreateMountTargetResponse{}, nil +} + +// GetMountTarget mocks the FileStorage GetMountTarget implementation +func (c *MockFileStorageClient) GetMountTarget(ctx context.Context, request filestorage.GetMountTargetRequest) (response filestorage.GetMountTargetResponse, err error) { + return filestorage.GetMountTargetResponse{}, nil +} + +// ListMountTargets mocks the FileStorage ListMountTargets implementation +func (c *MockFileStorageClient) ListMountTargets(ctx context.Context, request filestorage.ListMountTargetsRequest) (response filestorage.ListMountTargetsResponse, err error) { + return filestorage.ListMountTargetsResponse{}, nil +} + // MockIdentityClient mocks identity client structure type MockIdentityClient struct { common.BaseClient diff --git a/pkg/oci/client/client.go b/pkg/oci/client/client.go index 534cf2a58..bf91ccb96 100644 --- a/pkg/oci/client/client.go +++ b/pkg/oci/client/client.go @@ -68,6 +68,7 @@ type FileStorage interface { CreateMountTarget(ctx context.Context, request filestorage.CreateMountTargetRequest) (response filestorage.CreateMountTargetResponse, err error) CreateExport(ctx context.Context, request filestorage.CreateExportRequest) (response filestorage.CreateExportResponse, err error) GetMountTarget(ctx context.Context, request filestorage.GetMountTargetRequest) (response filestorage.GetMountTargetResponse, err error) + ListMountTargets(ctx context.Context, request filestorage.ListMountTargetsRequest) (response filestorage.ListMountTargetsResponse, err error) } // ProvisionerClient is passed to all sub clients to provision a volume diff --git a/pkg/provisioner/filestorage/filestorage.go b/pkg/provisioner/filestorage/filestorage.go index 9566419ff..0d6e4602d 100644 --- a/pkg/provisioner/filestorage/filestorage.go +++ b/pkg/provisioner/filestorage/filestorage.go @@ -18,7 +18,9 @@ import ( "context" "errors" "fmt" + "math/rand" "os" + "time" "github.com/golang/glog" "github.com/kubernetes-incubator/external-storage/lib/controller" @@ -89,20 +91,37 @@ func (filesystem *filesystemProvisioner) Provision( mntTargetResp := filestorage.MountTarget{} if options.Parameters[mntTargetID] == "" { - // Mount target not created, create a new one - responseMnt, err := fileStorageClient.CreateMountTarget(ctx, filestorage.CreateMountTargetRequest{ - CreateMountTargetDetails: filestorage.CreateMountTargetDetails{ - AvailabilityDomain: availabilityDomain.Name, - SubnetId: common.String(options.Parameters[subnetID]), - CompartmentId: common.String(filesystem.client.CompartmentOCID()), - DisplayName: common.String(fmt.Sprintf("%s%s", os.Getenv(volumePrefixEnvVarName), "mnt")), - }, + // Check if there there already is a mount target in the existing compartment + glog.Infof("Looking up existing mount targets") + responseListMnt, err := fileStorageClient.ListMountTargets(ctx, filestorage.ListMountTargetsRequest{ + AvailabilityDomain: availabilityDomain.Name, + CompartmentId: common.String(filesystem.client.CompartmentOCID()), }) if err != nil { - glog.Errorf("Failed to create a mount point:%#v, %s", options, err) + glog.Errorf("Failed to list mount targets:%#v, %s", options, err) return nil, err } - mntTargetResp = responseMnt.MountTarget + if len(responseListMnt.Items) != 0 { + glog.Infof("Found mount targets to use") + rand.Seed(time.Now().Unix()) + mntTargetSummary := responseListMnt.Items[rand.Int()%len(responseListMnt.Items)] + mntTargetResp = *getMountTargetFromID(ctx, *mntTargetSummary.Id, fileStorageClient) + } else { + // Mount target not created, create a new one + responseMnt, err := fileStorageClient.CreateMountTarget(ctx, filestorage.CreateMountTargetRequest{ + CreateMountTargetDetails: filestorage.CreateMountTargetDetails{ + AvailabilityDomain: availabilityDomain.Name, + SubnetId: common.String(options.Parameters[subnetID]), + CompartmentId: common.String(filesystem.client.CompartmentOCID()), + DisplayName: common.String(fmt.Sprintf("%s%s", os.Getenv(volumePrefixEnvVarName), "mnt")), + }, + }) + if err != nil { + glog.Errorf("Failed to create a mount target:%#v, %s", options, err) + return nil, err + } + mntTargetResp = responseMnt.MountTarget + } } else { // Mount target already specified in the configuration file, find it in the list of mount targets mntTargetResp = *getMountTargetFromID(ctx, options.Parameters[mntTargetID], fileStorageClient) @@ -121,7 +140,10 @@ func (filesystem *filesystemProvisioner) Provision( glog.Errorf("Failed to create export:%s", err) return nil, err } - + mntTargetSubnetIDPtr := "" + if mntTargetResp.SubnetId != nil { + mntTargetSubnetIDPtr = *mntTargetResp.SubnetId + } glog.Infof("Creating persistent volume") return &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ @@ -140,7 +162,7 @@ func (filesystem *filesystemProvisioner) Provision( }, PersistentVolumeSource: v1.PersistentVolumeSource{ NFS: &v1.NFSVolumeSource{ - Server: *mntTargetResp.SubnetId, + Server: mntTargetSubnetIDPtr, Path: "/", ReadOnly: false, }, diff --git a/test/system/README.md b/test/system/README.md index ebda0c8f5..8ed8d2a28 100644 --- a/test/system/README.md +++ b/test/system/README.md @@ -20,7 +20,9 @@ take precedence. * $MNT_TARGET_OCID -Mount target ocid used to attach file systems to for the oci-fss provisioner type +Mount target ocid used to attach file systems to for the oci-fss provisioner type. +If not mount target ocid is specified, the volume provisioner will randomly select one +from the existing ones. If no mount targets exist, a new one will be created. We can then run the system test as follows: diff --git a/test/system/runner.py b/test/system/runner.py index 4962a9be2..72335565e 100755 --- a/test/system/runner.py +++ b/test/system/runner.py @@ -122,31 +122,6 @@ def _get_oci_config_file(): def _get_oci_api_key_file(): return TMP_OCI_API_KEY_FILE -def _create_fss_storage_config(infile, outfile, test): - '''Based on the storage config template file and the mount arget OCID info, generate - a valid storage config yaml to use for creating an OCI Export - @param infile: File to use as template - @type infile: C{Str} - @param outfile: Outfile to storage class config file to - @type outfile: C{Str}''' - _log("Creating fss storage claim configuration file") - _mntTargetOCID = os.environ[MNT_TARGET_OCID] - if not _mntTargetOCID: - _log("No mount target OCID provided") - with open(infile, "r") as sources: - lines = sources.readlines() - with open(outfile + "." + test_id, "w") as sources: - for line in lines: - patched_line = line - if volume_name is not None: - patched_line = re.sub('{{VOLUME_NAME}}', volume_name, patched_line) - patched_line = re.sub('{{TEST_ID}}', test_id, patched_line) - if availability_domain: - availability_domain = availability_domain.replace(':', '-') # yaml config does not allow ':' - patched_line = re.sub('{{AVAILABILITY_DOMAIN}}', availability_domain, patched_line) - sources.write(patched_line) - return outfile + "." + test_id - def _banner(as_banner, bold): if as_banner: if bold: @@ -437,20 +412,9 @@ def _handle_args(): return args -def _cleanup(exit_on_error=False, display_errors=True): - _kubectl("delete -f ../../dist/oci-volume-provisioner.yaml", - exit_on_error, display_errors) - _kubectl("delete -f ../../dist/oci-volume-provisioner-rbac.yaml", - exit_on_error, display_errors) - _kubectl("delete -f ../../dist/storage-class.yaml", - exit_on_error, display_errors) - _kubectl("delete -f ../../dist/storage-class-fss.yaml", - exit_on_error, display_errors) - _kubectl("delete -f ../../dist/storage-class-ext3.yaml", - exit_on_error, display_errors) - _kubectl("-n kube-system delete secret oci-volume-provisioner", - exit_on_error, display_errors) - +def _cleanup(k8sResources=[], exit_on_error=False, display_errors=True): + for _res in k8sResources: + _kubectl("delete -f " + _res, exit_on_error, display_errors) def _get_region(): nodes_json = _kubectl("get nodes -o json", log_stdout=False) @@ -461,7 +425,7 @@ def _get_region(): _finish_with_exit_code(1) -def _create_yaml(template, test_id, region=None, backup_id=None): +def _create_yaml(template, test_id, region=None, backup_id=None, mount_target_ocid=None): '''Generate yaml based on the given template and fill in additional details @param template: Name of file to use as template @type template: C{Str} @@ -484,10 +448,13 @@ def _create_yaml(template, test_id, region=None, backup_id=None): patched_line = re.sub('{{REGION}}', region, patched_line) if backup_id is not None: patched_line = re.sub('{{BACKUP_ID}}', backup_id, patched_line) + if mount_target_ocid is not None: + patched_line = re.sub('{{MNT_TARGET_OCID}}', mount_target_ocid, patched_line) + elif "MNT_TARGET_OCID" in patched_line: + patched_line = "" sources.write(patched_line) return yaml_file - def _test_create_volume(compartment_id, claim_target, claim_volume_name, check_oci, test_id=None, availability_domain=None, verify_func=None, storageType=BLOCK_STORAGE): '''Test making a volume claim from a configuration file @@ -758,20 +725,23 @@ def _destroy_key_files_atexit(): test_id = str(uuid.uuid4())[:8] success = True - - _create_fss_storage_config() + _storageClassFile = _create_yaml("../../examples/example-storage-class-fss.template", test_id, + mount_target_ocid=os.environ.get(MNT_TARGET_OCID)) + + _k8sResources = [_storageClassFile, + "../../dist/storage-class.yaml", "../../dist/storage-class-ext3.yaml", + "../../dist/oci-volume-provisioner-rbac.yaml", + "../../dist/oci-volume-provisioner-fss.yaml", + "../../dist/oci-volume-provisioner.yaml"] if args['setup']: # Cleanup in case any existing state exists in the cluster - _cleanup(display_errors=False) + _cleanup(k8sResources=_k8sResources, display_errors=False) _log("Setting up the volume provisioner", as_banner=True) _kubectl("-n kube-system create secret generic oci-volume-provisioner " + \ "--from-file=config.yaml=" + _get_oci_config_file(), exit_on_error=False) - _kubectl("create -f ../../dist/storage-class.yaml", exit_on_error=False) - _kubectl("create -f ../../dist/storage-class-ext3.yaml", exit_on_error=False) - _kubectl("create -f ../../dist/storage-class-fss.yaml", exit_on_error=False) - _kubectl("create -f ../../dist/oci-volume-provisioner-rbac.yaml", exit_on_error=False) - _kubectl("create -f ../../dist/oci-volume-provisioner.yaml", exit_on_error=False) + for _res in _k8sResources: + _kubectl("create -f " + _res, exit_on_error=False) pod_name, _, _ = _wait_for_pod_status("Running", test_id, POD_VOLUME) compartment_id = _get_compartment_id(pod_name) else: @@ -780,7 +750,7 @@ def _destroy_key_files_atexit(): if args['teardown']: def _teardown_atexit(): _log("Tearing down the volume provisioner", as_banner=True) - _cleanup() + _cleanup(k8sResources=_k8sResources) atexit.register(_teardown_atexit) if not args['no_test']: diff --git a/vendor/github.com/kubernetes-incubator/external-storage/nfs-client/cmd/nfs-client-provisioner/provisioner.go b/vendor/github.com/kubernetes-incubator/external-storage/nfs-client/cmd/nfs-client-provisioner/provisioner.go new file mode 100644 index 000000000..5f0293083 --- /dev/null +++ b/vendor/github.com/kubernetes-incubator/external-storage/nfs-client/cmd/nfs-client-provisioner/provisioner.go @@ -0,0 +1,147 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "errors" + "flag" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/lib/controller" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +const ( + provisionerNameKey = "PROVISIONER_NAME" +) + +type nfsProvisioner struct { + client kubernetes.Interface + server string + path string +} + +const ( + mountPath = "/persistentvolumes" +) + +var _ controller.Provisioner = &nfsProvisioner{} + +func (p *nfsProvisioner) Provision(options controller.VolumeOptions) (*v1.PersistentVolume, error) { + if options.PVC.Spec.Selector != nil { + return nil, fmt.Errorf("claim Selector is not supported") + } + glog.V(4).Infof("nfs provisioner: VolumeOptions %v", options) + + pvcNamespace := options.PVC.Namespace + pvcName := options.PVC.Name + + pvName := strings.Join([]string{pvcNamespace, pvcName, options.PVName}, "-") + + fullPath := filepath.Join(mountPath, pvName) + glog.V(4).Infof("creating path %s", fullPath) + if err := os.MkdirAll(fullPath, 0777); err != nil { + return nil, errors.New("unable to create directory to provision new pv: " + err.Error()) + } + os.Chmod(fullPath, 0777) + + path := filepath.Join(p.path, pvName) + + pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: options.PVName, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, + AccessModes: options.PVC.Spec.AccessModes, + MountOptions: options.MountOptions, + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + NFS: &v1.NFSVolumeSource{ + Server: p.server, + Path: path, + ReadOnly: false, + }, + }, + }, + } + return pv, nil +} + +func (p *nfsProvisioner) Delete(volume *v1.PersistentVolume) error { + path := volume.Spec.PersistentVolumeSource.NFS.Path + pvName := filepath.Base(path) + oldPath := filepath.Join(mountPath, pvName) + archivePath := filepath.Join(mountPath, "archived-"+pvName) + glog.V(4).Infof("archiving path %s to %s", oldPath, archivePath) + return os.Rename(oldPath, archivePath) +} + +func main() { + flag.Parse() + flag.Set("logtostderr", "true") + + server := os.Getenv("NFS_SERVER") + if server == "" { + glog.Fatal("NFS_SERVER not set") + } + path := os.Getenv("NFS_PATH") + if path == "" { + glog.Fatal("NFS_PATH not set") + } + provisionerName := os.Getenv(provisionerNameKey) + if provisionerName == "" { + glog.Fatalf("environment variable %s is not set! Please set it.", provisionerNameKey) + } + + // Create an InClusterConfig and use it to create a client for the controller + // to use to communicate with Kubernetes + config, err := rest.InClusterConfig() + if err != nil { + glog.Fatalf("Failed to create config: %v", err) + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + glog.Fatalf("Failed to create client: %v", err) + } + + // The controller needs to know what the server version is because out-of-tree + // provisioners aren't officially supported until 1.5 + serverVersion, err := clientset.Discovery().ServerVersion() + if err != nil { + glog.Fatalf("Error getting server version: %v", err) + } + + clientNFSProvisioner := &nfsProvisioner{ + server: server, + path: path, + } + // Start the provision controller which will dynamically provision efs NFS + // PVs + pc := controller.NewProvisionController(clientset, provisionerName, clientNFSProvisioner, serverVersion.GitVersion) + pc.Run(wait.NeverStop) +} From faf1247b41d8c65b242eb73df3a4d2926aa52ca1 Mon Sep 17 00:00:00 2001 From: MadalinaPatrichi Date: Tue, 24 Jul 2018 17:42:04 +0100 Subject: [PATCH 4/5] Added support for export deletion -Retrieve pirvate ip for mount target and fixed system tests -Ignore linting mock_interfaces -Fix for config file for creating rc with volume claim --- examples/example-pod-fss.template | 20 ++ ...tion-controller-with-volume-claim.template | 0 .../example-replication-controller.template | 0 hack/check-golint.sh | 3 +- pkg/helpers/mock_interfaces.go | 26 +- pkg/oci/client/client.go | 46 ++- pkg/provisioner/filestorage/filestorage.go | 52 ++- test/system/runner.py | 330 +++++++++--------- test/system/utils.py | 107 ++++++ 9 files changed, 385 insertions(+), 199 deletions(-) create mode 100644 examples/example-pod-fss.template rename test/system/replication-controller-with-volume-claim.yaml.template => examples/example-replication-controller-with-volume-claim.template (100%) rename test/system/replication-controller.yaml.template => examples/example-replication-controller.template (100%) create mode 100644 test/system/utils.py diff --git a/examples/example-pod-fss.template b/examples/example-pod-fss.template new file mode 100644 index 000000000..ece0687c5 --- /dev/null +++ b/examples/example-pod-fss.template @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: demooci-fss-pod-{{TEST_ID}} + labels: + name: demooci-fss-pod-{{TEST_ID}} +spec: + containers: + - name: demooci-fss-pod-{{TEST_ID}} + image: fedora/nginx + ports: + - name: web + containerPort: 80 + volumeMounts: + - name: nginx + mountPath: /usr/share/nginx/html + volumes: + - name: nginx + persistentVolumeClaim: + claimName: demooci-fss-{{TEST_ID}} \ No newline at end of file diff --git a/test/system/replication-controller-with-volume-claim.yaml.template b/examples/example-replication-controller-with-volume-claim.template similarity index 100% rename from test/system/replication-controller-with-volume-claim.yaml.template rename to examples/example-replication-controller-with-volume-claim.template diff --git a/test/system/replication-controller.yaml.template b/examples/example-replication-controller.template similarity index 100% rename from test/system/replication-controller.yaml.template rename to examples/example-replication-controller.template diff --git a/hack/check-golint.sh b/hack/check-golint.sh index 6d6ec6cba..0dd0ea17e 100755 --- a/hack/check-golint.sh +++ b/hack/check-golint.sh @@ -21,9 +21,8 @@ set -o nounset set -o pipefail TARGETS=$(for d in "$@"; do echo ./$d/...; done) - echo -n "Checking golint: " -ERRS=$(golint ${TARGETS} 2>&1 || true) +ERRS=$(golint ${TARGETS} | grep -v mock_interfaces.go 2>&1 || true) if [ -n "${ERRS}" ]; then echo "FAIL" echo "${ERRS}" diff --git a/pkg/helpers/mock_interfaces.go b/pkg/helpers/mock_interfaces.go index 7c8ac40bc..9af204b9c 100644 --- a/pkg/helpers/mock_interfaces.go +++ b/pkg/helpers/mock_interfaces.go @@ -29,6 +29,9 @@ var ( // VolumeBackupID of backup volume VolumeBackupID = "dummyVolumeBackupId" fileSystemID = "dummyFileSystemId" + exportID = "dummyExportID" + serverIPs = []string{"dummyServerIP"} + privateIP = "127.0.0.1" ) // MockBlockStorageClient mocks BlockStorage client implementation @@ -66,12 +69,17 @@ func (c *MockFileStorageClient) DeleteFileSystem(ctx context.Context, request fi // CreateExport mocks the FileStorage CreateExport implementation func (c *MockFileStorageClient) CreateExport(ctx context.Context, request filestorage.CreateExportRequest) (response filestorage.CreateExportResponse, err error) { - return filestorage.CreateExportResponse{}, nil + return filestorage.CreateExportResponse{Export: filestorage.Export{Id: common.String(exportID)}}, nil +} + +// DeleteExport mocks the FileStorage DeleteExport implementation +func (c *MockFileStorageClient) DeleteExport(ctx context.Context, request filestorage.DeleteExportRequest) (response filestorage.DeleteExportResponse, err error) { + return filestorage.DeleteExportResponse{}, nil } // CreateMountTarget mocks the FileStorage CreateMountTarget implementation func (c *MockFileStorageClient) CreateMountTarget(ctx context.Context, request filestorage.CreateMountTargetRequest) (response filestorage.CreateMountTargetResponse, err error) { - return filestorage.CreateMountTargetResponse{}, nil + return filestorage.CreateMountTargetResponse{MountTarget: filestorage.MountTarget{PrivateIpIds: serverIPs}}, nil } // GetMountTarget mocks the FileStorage GetMountTarget implementation @@ -84,6 +92,15 @@ func (c *MockFileStorageClient) ListMountTargets(ctx context.Context, request fi return filestorage.ListMountTargetsResponse{}, nil } +// MockVirtualNetworkClient mocks VirtualNetwork client implementation +type MockVirtualNetworkClient struct { +} + +// GetPrivateIp mocks the VirtualNetwork GetPrivateIp implementation +func (c *MockVirtualNetworkClient) GetPrivateIp(ctx context.Context, request core.GetPrivateIpRequest) (response core.GetPrivateIpResponse, err error) { + return core.GetPrivateIpResponse{PrivateIp: core.PrivateIp{IpAddress: common.String(privateIP)}}, nil +} + // MockIdentityClient mocks identity client structure type MockIdentityClient struct { common.BaseClient @@ -108,6 +125,11 @@ func (p *MockProvisionerClient) FileStorage() client.FileStorage { return &MockFileStorageClient{} } +// VirtualNetwork mocks client VirtualNetwork implementation +func (p *MockProvisionerClient) VirtualNetwork() client.VirtualNetwork { + return &MockVirtualNetworkClient{} +} + // Identity mocks client Identity implementation func (p *MockProvisionerClient) Identity() client.Identity { return &MockIdentityClient{} diff --git a/pkg/oci/client/client.go b/pkg/oci/client/client.go index bf91ccb96..f0ee6dfa2 100644 --- a/pkg/oci/client/client.go +++ b/pkg/oci/client/client.go @@ -40,13 +40,14 @@ import ( // ProvisionerClient wraps the OCI sub-clients required for volume provisioning. type provisionerClient struct { - cfg *Config - blockStorage *core.BlockstorageClient - identity *identity.IdentityClient - fileStorage *filestorage.FileStorageClient - context context.Context - timeout time.Duration - metadata *instancemeta.InstanceMetadata + cfg *Config + blockStorage *core.BlockstorageClient + identity *identity.IdentityClient + fileStorage *filestorage.FileStorageClient + virtualNetwork *core.VirtualNetworkClient + context context.Context + timeout time.Duration + metadata *instancemeta.InstanceMetadata } // BlockStorage specifies the subset of the OCI core API utilised by the provisioner. @@ -67,15 +68,22 @@ type FileStorage interface { DeleteFileSystem(ctx context.Context, request filestorage.DeleteFileSystemRequest) (response filestorage.DeleteFileSystemResponse, err error) CreateMountTarget(ctx context.Context, request filestorage.CreateMountTargetRequest) (response filestorage.CreateMountTargetResponse, err error) CreateExport(ctx context.Context, request filestorage.CreateExportRequest) (response filestorage.CreateExportResponse, err error) + DeleteExport(ctx context.Context, request filestorage.DeleteExportRequest) (response filestorage.DeleteExportResponse, err error) GetMountTarget(ctx context.Context, request filestorage.GetMountTargetRequest) (response filestorage.GetMountTargetResponse, err error) ListMountTargets(ctx context.Context, request filestorage.ListMountTargetsRequest) (response filestorage.ListMountTargetsResponse, err error) } +//VirtualNetwork specifies the subset of the OCI core API utilised by the provisioner. +type VirtualNetwork interface { + GetPrivateIp(ctx context.Context, request core.GetPrivateIpRequest) (response core.GetPrivateIpResponse, err error) +} + // ProvisionerClient is passed to all sub clients to provision a volume type ProvisionerClient interface { BlockStorage() BlockStorage Identity() Identity FileStorage() FileStorage + VirtualNetwork() VirtualNetwork Context() context.Context Timeout() time.Duration CompartmentOCID() string @@ -94,6 +102,10 @@ func (p *provisionerClient) FileStorage() FileStorage { return p.fileStorage } +func (p *provisionerClient) VirtualNetwork() VirtualNetwork { + return p.virtualNetwork +} + func (p *provisionerClient) Context() context.Context { return p.context } @@ -141,6 +153,11 @@ func FromConfig(cfg *Config) (ProvisionerClient, error) { return nil, err } + virtualNetwork, err := core.NewVirtualNetworkClientWithConfigurationProvider(config) + if err != nil { + return nil, err + } + identity, err := identity.NewIdentityClientWithConfigurationProvider(config) if err != nil { return nil, err @@ -156,13 +173,14 @@ func FromConfig(cfg *Config) (ProvisionerClient, error) { } return &provisionerClient{ - cfg: cfg, - blockStorage: &blockStorage, - identity: &identity, - fileStorage: &fileStorage, - timeout: 3 * time.Minute, - context: context.Background(), - metadata: metadata, + cfg: cfg, + blockStorage: &blockStorage, + identity: &identity, + fileStorage: &fileStorage, + virtualNetwork: &virtualNetwork, + timeout: 3 * time.Minute, + context: context.Background(), + metadata: metadata, }, nil } diff --git a/pkg/provisioner/filestorage/filestorage.go b/pkg/provisioner/filestorage/filestorage.go index 0d6e4602d..0fc1fbcdc 100644 --- a/pkg/provisioner/filestorage/filestorage.go +++ b/pkg/provisioner/filestorage/filestorage.go @@ -25,6 +25,7 @@ import ( "github.com/golang/glog" "github.com/kubernetes-incubator/external-storage/lib/controller" "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/core" "github.com/oracle/oci-go-sdk/filestorage" "github.com/oracle/oci-go-sdk/identity" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -36,6 +37,7 @@ import ( const ( ociVolumeID = "ociVolumeID" + ociExportID = "ociExportID" volumePrefixEnvVarName = "OCI_VOLUME_NAME_PREFIX" fsType = "fsType" subnetID = "subnetId" @@ -128,11 +130,11 @@ func (filesystem *filesystemProvisioner) Provision( } glog.Infof("Creating export set") - _, err = fileStorageClient.CreateExport(ctx, filestorage.CreateExportRequest{ + createExportResponse, err := fileStorageClient.CreateExport(ctx, filestorage.CreateExportRequest{ CreateExportDetails: filestorage.CreateExportDetails{ ExportSetId: mntTargetResp.ExportSetId, FileSystemId: response.FileSystem.Id, - Path: common.String("/"), + Path: common.String("/" + *response.FileSystem.Id), }, }) @@ -140,16 +142,30 @@ func (filesystem *filesystemProvisioner) Provision( glog.Errorf("Failed to create export:%s", err) return nil, err } - mntTargetSubnetIDPtr := "" - if mntTargetResp.SubnetId != nil { - mntTargetSubnetIDPtr = *mntTargetResp.SubnetId + serverIP := "" + if len(mntTargetResp.PrivateIpIds) != 0 { + privateIPID := mntTargetResp.PrivateIpIds[rand.Int()%len(mntTargetResp.PrivateIpIds)] + virtualNetworkClient := filesystem.client.VirtualNetwork() + getPrivateIPResponse, err := virtualNetworkClient.GetPrivateIp(ctx, core.GetPrivateIpRequest{ + PrivateIpId: common.String(privateIPID), + }) + if err != nil { + glog.Errorf("Failed to retrieve IP address for mount target:%s", err) + return nil, err + } + serverIP = *getPrivateIPResponse.PrivateIp.IpAddress + } else { + glog.Errorf("Failed to find server IDs associated with the mount target to provision a persistent volume") + return nil, fmt.Errorf("Failed to find server IDs associated with the mount target") } - glog.Infof("Creating persistent volume") + + glog.Infof("Creating persistent volume on mount target with private IP address %s", serverIP) return &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: *response.FileSystem.Id, Annotations: map[string]string{ ociVolumeID: *response.FileSystem.Id, + ociExportID: *createExportResponse.Export.Id, }, Labels: map[string]string{}, }, @@ -162,8 +178,9 @@ func (filesystem *filesystemProvisioner) Provision( }, PersistentVolumeSource: v1.PersistentVolumeSource{ NFS: &v1.NFSVolumeSource{ - Server: mntTargetSubnetIDPtr, - Path: "/", + // Randomnly select IP address associated with the mount target to use for attachment + Server: serverIP, + Path: *common.String("/" + *response.FileSystem.Id), ReadOnly: false, }, }, @@ -173,14 +190,27 @@ func (filesystem *filesystemProvisioner) Provision( // Delete destroys a OCI volume created by Provision func (filesystem *filesystemProvisioner) Delete(volume *v1.PersistentVolume) error { + exportID, ok := volume.Annotations[ociExportID] + if !ok { + return errors.New("Export ID annotation not found on PV") + } filesystemID, ok := volume.Annotations[ociVolumeID] if !ok { - return errors.New("filesystemid annotation not found on PV") + return errors.New("Filesystem ID annotation not found on PV") } - glog.Infof("Deleting volume %v with filesystemID %v", volume, filesystemID) ctx, cancel := context.WithTimeout(filesystem.client.Context(), filesystem.client.Timeout()) defer cancel() - _, err := filesystem.client.FileStorage().DeleteFileSystem(ctx, + glog.Infof("Deleting export for filesystemID %v", filesystemID) + _, err := filesystem.client.FileStorage().DeleteExport(ctx, + filestorage.DeleteExportRequest{ + ExportId: &exportID, + }) + if err != nil { + glog.Errorf("Failed to delete export:%s, %s", exportID, err) + return err + } + glog.Infof("Deleting volume %v with filesystemID %v", volume, filesystemID) + _, err = filesystem.client.FileStorage().DeleteFileSystem(ctx, filestorage.DeleteFileSystemRequest{ FileSystemId: &filesystemID, }) diff --git a/test/system/runner.py b/test/system/runner.py index 72335565e..a8c2e19a6 100755 --- a/test/system/runner.py +++ b/test/system/runner.py @@ -20,18 +20,14 @@ import json import os import re -import select -from shutil import copyfile -import subprocess import sys import time import uuid import oci import yaml - +import utils MNT_TARGET_OCID = "MNT_TARGET_OCID" -DEBUG_FILE = "runner.log" TERRAFORM_CLUSTER = "terraform/cluster" TERRAFORM_DIR = "terraform" # Variable name correspond to the ones found in the terraform config file @@ -44,8 +40,6 @@ REGION = "us-ashburn-1" TIMEOUT = 600 WRITE_REPORT=True -REPORT_DIR_PATH="/tmp/results" -REPORT_FILE="done" POD_CONTROLLER = "controller" POD_VOLUME = "volume" BLOCK_STORAGE = "block" @@ -54,6 +48,7 @@ LIFECYCLE_STATE_ON = {BLOCK_STORAGE: 'AVAILABLE', FS_STORAGE: 'ACTIVE'} LIFECYCLE_STATE_OFF = {BLOCK_STORAGE: 'TERMINATED', +<<<<<<< HEAD FS_STORAGE:'DELETED'} # On exit return 0 for success or any other integer for a failure. @@ -70,26 +65,29 @@ def _finish_with_exit_code(exit_code, write_report=True, report_dir_path=REPORT_ _debug_file("\nTest Suite Failed\n") time.sleep(3) copyfile(DEBUG_FILE, report_dir_path + "/" + DEBUG_FILE) - with open(report_dir_path + "/" + report_file, "w+") as file: + with open(report_dir_path + "/" + report_file, "w+") as file: file.write(str(report_dir_path + "/" + DEBUG_FILE)) finish_canary_metrics() - sys.exit(exit_code) + sys.exit(exit_code) +======= + FS_STORAGE:'DELETED'} +>>>>>>> fcfa92b6... Retrieve pirvate ip for mount target and fixed system tests def _check_env(check_oci): if check_oci: if "OCICONFIG" not in os.environ and "OCICONFIG_VAR" not in os.environ: - _log("Error. Can't find either OCICONFIG or OCICONFIG_VAR in the environment.") - _finish_with_exit_code(1) + utils.log("Error. Can't find either OCICONFIG or OCICONFIG_VAR in the environment.") + utils.finish_with_exit_code(1) def _create_key_files(check_oci): - _log("Setting environment variables") + utils.log("Setting environment variables") if "OCICONFIG_VAR" in os.environ: - _run_command("echo \"$OCICONFIG_VAR\" | openssl enc -base64 -d -A > " + TMP_OCICONFIG, ".") - _run_command("chmod 600 " + TMP_OCICONFIG, ".") + utils.run_command("echo \"$OCICONFIG_VAR\" | openssl enc -base64 -d -A > " + TMP_OCICONFIG, ".") + utils.run_command("chmod 600 " + TMP_OCICONFIG, ".") if "KUBECONFIG_VAR" in os.environ: - _run_command("echo \"$KUBECONFIG_VAR\" | openssl enc -base64 -d -A > " + TMP_KUBECONFIG, ".") + utils.run_command("echo \"$KUBECONFIG_VAR\" | openssl enc -base64 -d -A > " + TMP_KUBECONFIG, ".") if check_oci: oci_config_file = _get_oci_config_file() @@ -99,8 +97,8 @@ def _create_key_files(check_oci): with open(TMP_OCI_API_KEY_FILE, 'w') as stream: stream.write(cnf['auth']['key']) except yaml.YAMLError as err: - _log("Error. Failed to parse oci config file %s. Error: %s " % (oci_config_file, err)) - _finish_with_exit_code(1) + utils.log("Error. Failed to parse oci config file %s. Error: %s " % (oci_config_file, err)) + utils.finish_with_exit_code(1) def _destroy_key_files(check_oci): @@ -122,72 +120,6 @@ def _get_oci_config_file(): def _get_oci_api_key_file(): return TMP_OCI_API_KEY_FILE -def _banner(as_banner, bold): - if as_banner: - if bold: - print "********************************************************" - else: - print "--------------------------------------------------------" - - -def _reset_debug_file(): - if os.path.exists(DEBUG_FILE): - os.remove(DEBUG_FILE) - - -def _debug_file(string): - with open(DEBUG_FILE, "a") as debug_file: - debug_file.write(string) - - -def _log(string, as_banner=False, bold=False): - _banner(as_banner, bold) - print string - _banner(as_banner, bold) - - -def _process_stream(stream, read_fds, global_buf, line_buf): - char = stream.read(1) - if char == '': - read_fds.remove(stream) - global_buf.append(char) - line_buf.append(char) - if char == '\n': - _debug_file(''.join(line_buf)) - line_buf = [] - return line_buf - -def _poll(stdout, stderr): - stdoutbuf = [] - stdoutbuf_line = [] - stderrbuf = [] - stderrbuf_line = [] - read_fds = [stdout, stderr] - x_fds = [stdout, stderr] - while read_fds: - rlist, _, _ = select.select(read_fds, [], x_fds) - if rlist: - for stream in rlist: - if stream == stdout: - stdoutbuf_line = _process_stream(stream, read_fds, stdoutbuf, stdoutbuf_line) - if stream == stderr: - stderrbuf_line = _process_stream(stream, read_fds, stderrbuf, stderrbuf_line) - return (''.join(stdoutbuf), ''.join(stderrbuf)) - -def _run_command(cmd, cwd, display_errors=True): - _log(cwd + ": " + cmd) - process = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True, cwd=cwd) - (stdout, stderr) = _poll(process.stdout, process.stderr) - returncode = process.wait() - if returncode != 0 and display_errors: - _log(" stdout: " + stdout) - _log(" stderr: " + stderr) - _log(" result: " + str(returncode)) - return (stdout, stderr, returncode) - def _get_timestamp(test_id): return test_id if test_id is not None else datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') @@ -196,22 +128,22 @@ def _get_terraform_env(): return "TF_VAR_test_id=" + timestamp def _terraform(action, cwd, terraform_env): - (stdout, _, returncode) = _run_command(terraform_env + " terraform " + action, cwd) + (stdout, _, returncode) = utils.run_command(terraform_env + " terraform " + action, cwd) if returncode != 0: - _log("Error running terraform") + utils.log("Error running terraform") sys.exit(1) return stdout def _kubectl(action, exit_on_error=True, display_errors=True, log_stdout=True): if "KUBECONFIG" not in os.environ and "KUBECONFIG_VAR" not in os.environ: - (stdout, _, returncode) = _run_command("kubectl " + action, ".", display_errors) + (stdout, _, returncode) = utils.run_command("kubectl " + action, ".", display_errors) else: - (stdout, _, returncode) = _run_command("KUBECONFIG=" + _get_kubeconfig() + " kubectl " + action, ".", display_errors) + (stdout, _, returncode) = utils.run_command("KUBECONFIG=" + _get_kubeconfig() + " kubectl " + action, ".", display_errors) if exit_on_error and returncode != 0: - _log("Error running kubectl") - _finish_with_exit_code(1) + utils.log("Error running kubectl") + utils.finish_with_exit_code(1) if log_stdout: - _log(stdout) + utils.log(stdout) return stdout def _get_pod_infos(test_id, pod_type): @@ -220,7 +152,7 @@ def _get_pod_infos(test_id, pod_type): @type test_id: C{Str} @param pod_type: Pod type to search for @type pod_type: C{Str} - @return: Tuple containing the name of the resource, its status and the + @return: Tuple containing the name of the resource, its status and the node it's running on @rtype: C{Tuple}''' _namespace = "-n kube-system" if pod_type == POD_VOLUME else "" @@ -239,6 +171,11 @@ def _get_pod_infos(test_id, pod_type): status = line_array[2] node = line_array[6] infos.append((name, status, node)) + if re.match(r"demooci-fss-pod-" + test_id + ".*", line) and pod_type == POD_CONTROLLER: + name = line_array[0] + status = line_array[2] + node = line_array[6] + infos.append((name, status, node)) return infos def _get_volume(volume_name): @@ -256,7 +193,7 @@ def _get_volume_and_wait(volume_name): num_polls = 0 volume = _get_volume(volume_name) while not volume: - _log(" waiting...") + utils.log(" waiting...") time.sleep(1) num_polls += 1 if num_polls == TIMEOUT: @@ -270,8 +207,13 @@ def _get_json_doc(response): try: doc = decoder.decode(response) except (ValueError, UnicodeError) as _: +<<<<<<< HEAD _log('Invalid JSON in response: %s' % str(response)) - _finish_with_exit_code(1) + _finish_with_exit_code(1) +======= + utils.log('Invalid JSON in response: %s' % str(response)) + utils.finish_with_exit_code(1) +>>>>>>> fcfa92b6... Retrieve pirvate ip for mount target and fixed system tests return doc @@ -288,8 +230,8 @@ def _oci_config(): config["region"] = cnf['auth']['region'] return config except yaml.YAMLError: - _log("Error. Failed to parse oci config file " + oci_config_file) - _finish_with_exit_code(1) + utils.log("Error. Failed to parse oci config file " + oci_config_file) + utils.finish_with_exit_code(1) def _volume_exists(compartment_id, volume, state, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): @@ -299,18 +241,18 @@ def _volume_exists(compartment_id, volume, state, backup=False, storageType=BLOC @param availability_domain: Availability domain to look in for @type availability_domain: C{Str}''' if storageType == BLOCK_STORAGE: - _log("Retrieving block volumes") + utils.log("Retrieving block volumes") client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) if backup: volumes= oci.pagination.list_call_get_all_results(client.list_volume_backups, compartment_id) else: volumes = oci.pagination.list_call_get_all_results(client.list_volumes, compartment_id) else: - _log("Retrieving file systems") + utils.log("Retrieving file systems") client = oci.file_storage.FileStorageClient(_oci_config()) volumes = oci.pagination.list_call_get_all_results(client.list_file_systems, compartment_id, availability_domain) - _log("Getting status for volume %s" % volume) + utils.log("Getting status for volume %s" % volume) for vol in _get_json_doc(str(volumes.data)): if vol['id'].endswith(volume) and vol['lifecycle_state'] == state: return True @@ -325,10 +267,10 @@ def _create_backup(volume_ocid, test_id): @return: Tuple containing the backup id, compartment id and display name @rtype: C{Tuple}''' client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) - _backup_details = oci.core.models.CreateVolumeBackupDetails(volume_id=volume_ocid, + _backup_details = oci.core.models.CreateVolumeBackupDetails(volume_id=volume_ocid, display_name="backup_volume_system_test" + test_id) _response = client.create_volume_backup(_backup_details) - _log("Response for creating backup for volume %s: %s" % (volume_ocid, _response.data)) + utils.log("Response for creating backup for volume %s: %s" % (volume_ocid, _response.data)) _res = _get_json_doc(str(_response.data)) return _res['id'], _res['compartment_id'], _res['display_name'] @@ -338,27 +280,27 @@ def _delete_backup(backup_ocid): @type backup_ocid: C{Str}''' client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) _response = client.delete_volume_backup(backup_ocid) - _log("Response for deleting volume backup %s: %s" % (backup_ocid, _response.data)) + utils.log("Response for deleting volume backup %s: %s" % (backup_ocid, _response.data)) def _create_volume_from_backup(backup_ocid, test_id, availability_domain, compartment_id): client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) - _volume_details = oci.core.models.CreateVolumeDetails(volume_backup_id=backup_ocid, + _volume_details = oci.core.models.CreateVolumeDetails(volume_backup_id=backup_ocid, display_name="restored_volume_system_test" + test_id, availability_domain=availability_domain, compartment_id=compartment_id) try: _response = client.create_volume(_volume_details) - _log("Response for creating volume from backup %s: %s %s" % (_response.data, _get_json_doc(str(_response.data))['id'], compartment_id)) + utils.log("Response for creating volume from backup %s: %s %s" % (_response.data, _get_json_doc(str(_response.data))['id'], compartment_id)) return _get_json_doc(str(_response.data))['id'] except Exception as exc: - _log("Failed to create volume from backup %s" % exc) + utils.log("Failed to create volume from backup %s" % exc) def _wait_for_volume(compartment_id, volume, state, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): num_polls = 0 - while not _volume_exists(compartment_id, volume, state, backup, storageType=storageType, + while not _volume_exists(compartment_id, volume, state, backup, storageType=storageType, availability_domain=availability_domain): - _log(" waiting...") + utils.log(" waiting...") time.sleep(1) num_polls += 1 if num_polls == TIMEOUT: @@ -366,7 +308,7 @@ def _wait_for_volume(compartment_id, volume, state, backup=False, storageType=BL return True def _wait_for_volume_to_create(compartment_id, volume, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): - return _wait_for_volume(compartment_id, volume, LIFECYCLE_STATE_ON[storageType], backup, storageType=storageType, + return _wait_for_volume(compartment_id, volume, LIFECYCLE_STATE_ON[storageType], backup, storageType=storageType, availability_domain=availability_domain) @@ -406,14 +348,14 @@ def _handle_args(): args = vars(parser.parse_args()) if args['check_oci'] and not args['setup']: - _log("If --check-oci is specified, then --setup also needs to be set.") - _finish_with_exit_code(1) + utils.log("If --check-oci is specified, then --setup also needs to be set.") + utils.finish_with_exit_code(1) return args def _cleanup(k8sResources=[], exit_on_error=False, display_errors=True): - for _res in k8sResources: + for _res in k8sResources: _kubectl("delete -f " + _res, exit_on_error, display_errors) def _get_region(): @@ -421,11 +363,11 @@ def _get_region(): nodes = json.loads(nodes_json) for node in nodes['items']: return node['metadata']['labels']['failure-domain.beta.kubernetes.io/zone'] - _log("Region lookup failed") - _finish_with_exit_code(1) + utils.log("Region lookup failed") + utils.finish_with_exit_code(1) -def _create_yaml(template, test_id, region=None, backup_id=None, mount_target_ocid=None): +def _create_yaml(template, test_id, region=None, backup_id=None, mount_target_ocid=None, volume_name=None, availability_domain=None): '''Generate yaml based on the given template and fill in additional details @param template: Name of file to use as template @type template: C{Str} @@ -435,6 +377,12 @@ def _create_yaml(template, test_id, region=None, backup_id=None, mount_target_oc @type region: C{Str} @param backup_id: Backup id to create PVC from @type backup_id: C{Str} + @param mount_target_ocid: Mount target OCID to populate config with + @type mount_target_ocid: C{Str} + @param volume_name: Name used to create volume + @type volume_name: C{Str} + @param availability_domain: Availability domain (used for pvc) + @type availability_domain: C{Str} @return: Name of generated config file @rtype: C{Str}''' yaml_file = template + ".yaml" @@ -444,10 +392,15 @@ def _create_yaml(template, test_id, region=None, backup_id=None, mount_target_oc for line in lines: patched_line = line patched_line = re.sub('{{TEST_ID}}', test_id, patched_line) + if volume_name is not None: + patched_line = re.sub('{{VOLUME_NAME}}', volume_name, patched_line) if region is not None: patched_line = re.sub('{{REGION}}', region, patched_line) if backup_id is not None: patched_line = re.sub('{{BACKUP_ID}}', backup_id, patched_line) + if availability_domain: + availability_domain = availability_domain.replace(':', '-') # yaml config does not allow ':' + patched_line = re.sub('{{AVAILABILITY_DOMAIN}}', availability_domain, patched_line) if mount_target_ocid is not None: patched_line = re.sub('{{MNT_TARGET_OCID}}', mount_target_ocid, patched_line) elif "MNT_TARGET_OCID" in patched_line: @@ -455,7 +408,7 @@ def _create_yaml(template, test_id, region=None, backup_id=None, mount_target_oc sources.write(patched_line) return yaml_file -def _test_create_volume(compartment_id, claim_target, claim_volume_name, check_oci, test_id=None, +def _test_create_volume(compartment_id, claim_target, claim_volume_name, check_oci, test_id=None, availability_domain=None, verify_func=None, storageType=BLOCK_STORAGE): '''Test making a volume claim from a configuration file @param backup_ocid: Verify whether the volume created from a backup contains backup info @@ -463,11 +416,12 @@ def _test_create_volume(compartment_id, claim_target, claim_volume_name, check_o _kubectl("create -f " + claim_target, exit_on_error=False) volume = _get_volume_and_wait(claim_volume_name) - _log("Created volume with name: %s" % str(volume)) + utils.log("Created volume with name: %s" % str(volume)) if check_oci: +<<<<<<< HEAD _log("Querying the OCI api to make sure a volume with this name exists...") - if not _wait_for_volume_to_create(compartment_id, volume, storageType=storageType, + if not _wait_for_volume_to_create(compartment_id, volume, storageType=storageType, availability_domain=availability_domain): _log("Failed to find volume with name: " + volume) return False @@ -475,20 +429,39 @@ def _test_create_volume(compartment_id, claim_target, claim_volume_name, check_o if verify_func: verify_func(test_id, availability_domain, volume) - + _log("Delete the volume claim") +======= + utils.log("Querying the OCI api to make sure a volume with this name exists...") + if not _wait_for_volume_to_create(compartment_id, volume, storageType=storageType, + availability_domain=availability_domain): + utils.log("Failed to find volume with name: " + volume) + utils.finish_with_exit_code(1) + utils.log("Volume: " + volume + " is present and available") + + if verify_func: + verify_func(test_id, availability_domain, volume) + + utils.log("Delete the volume claim") +>>>>>>> fcfa92b6... Retrieve pirvate ip for mount target and fixed system tests _kubectl("delete -f " + claim_target, exit_on_error=False) if check_oci: - _log("Querying the OCI api to make sure a volume with this name now doesnt exist...") + utils.log("Querying the OCI api to make sure a volume with this name now doesnt exist...") _wait_for_volume_to_delete(compartment_id, volume, storageType=storageType, availability_domain=availability_domain) if not _volume_exists(compartment_id, volume, LIFECYCLE_STATE_OFF[storageType], storageType=storageType, availability_domain=availability_domain): +<<<<<<< HEAD _log("Volume with name: " + volume + " still exists") return False _log("Volume: " + volume + " has now been terminated") - +======= + utils.log("Volume with name: " + volume + " still exists") + utils.finish_with_exit_code(1) + utils.log("Volume: " + volume + " has now been terminated") +>>>>>>> fcfa92b6... Retrieve pirvate ip for mount target and fixed system tests + return True def _patch_template_file(infile, outfile, volume_name, test_id, availability_domain): @@ -515,17 +488,6 @@ def _patch_template_file(infile, outfile, volume_name, test_id, availability_dom sources.write(patched_line) return outfile + "." + test_id -def _create_rc_yaml(using_oci, volume_name, test_id, availability_domain): - '''Generate replication controller yaml file from provided templates''' - if using_oci: - return _patch_template_file( "replication-controller.yaml.template", - "replication-controller.yaml", - volume_name, test_id, availability_domain) - else: - return _patch_template_file( "replication-controller-with-volume-claim.yaml.template", - "replication-controller-with-volume-claim.yaml", - volume_name, test_id, availability_domain) - def _get_terraform_output_var(terraform_env, var_name): '''Retrieve variable value from terraform output from state file @param terraform_env: Terraform test id @@ -555,19 +517,19 @@ def _wait_for_pod_status(desired_status, test_id, pod_type): @type test_id: C{Str} @param pod_type: Pod type to query @type pod_type: C{Str} - @return: Tuple containing the name of the resource, its status and the + @return: Tuple containing the name of the resource, its status and the node it's running on @rtype: C{Tuple}''' infos = _get_pod_infos(test_id, pod_type) num_polls = 0 while not any(i[1] == desired_status for i in infos): for i in infos: - _log(" - pod: " + i[0] + ", status: " + i[1] + ", node: " + i[2]) + utils.log(" - pod: " + i[0] + ", status: " + i[1] + ", node: " + i[2]) time.sleep(1) num_polls += 1 if num_polls == TIMEOUT: for i in infos: - _log("Error: Pod: " + i[0] + " " + + utils.log("Error: Pod: " + i[0] + " " + "failed to achieve status: " + desired_status + "." + "Final status was: " + i[1]) sys.exit(1) @@ -578,8 +540,10 @@ def _wait_for_pod_status(desired_status, test_id, pod_type): # Should never get here. return (None, None, None) -def _create_replication_controller(test_id, availability_domain, volume_name="default_volume"): - '''Create replication controller and wait for it to start +def _create_rc_or_pod(config, test_id, availability_domain, volume_name="default_volume"): + '''Create replication controller or pod and wait for it to start + @param rc_config: Replication controller configuration file to patch + @type rc_config: C{Str} @param test_id: Test id used to append to component names @type test_id : C{Str} @param availability_domain: Availability domain to start rc in @@ -588,13 +552,13 @@ def _create_replication_controller(test_id, availability_domain, volume_name="de @type volume_name: C{Str} @return: Tuple containing the name of the created rc and its config file @rtype: C{Tuple}''' - _rc_config = _create_rc_yaml(True, volume_name, test_id, availability_domain) - _log("Starting the replication controller (creates a single nginx pod).") - _kubectl("delete -f " + _rc_config, exit_on_error=False, display_errors=False) - _kubectl("create -f " + _rc_config) - _log("Waiting for the pod to start.") - _rc_name, _, _ = _wait_for_pod_status("Running", test_id, POD_CONTROLLER) - return _rc_name, _rc_config + _config = _patch_template_file(config, config + '.yaml', volume_name, test_id, availability_domain) + utils.log("Starting the replication controller (creates a single nginx pod).") + _kubectl("delete -f " + _config, exit_on_error=False, display_errors=False) + _kubectl("create -f " + _config) + utils.log("Waiting for the pod to start.") + _name, _, _ = _wait_for_pod_status("Running", test_id, POD_CONTROLLER) + return _name, _config def _create_file_via_replication_controller(rc_name, file_name="hello.txt"): '''Create file via the replication controller @@ -610,12 +574,12 @@ def _verify_file_existance_via_replication_controller(rc_name, file_name="hello. @type rcName: C{Str} @param fileName: Name of file to create @type fileName: C{Str}''' - _log("Does the new file exist?") + utils.log("Does the new file exist?") stdout = _kubectl("exec " + rc_name + " -- ls /usr/share/nginx/html") if file_name not in stdout.split("\n"): - _log("Error: Failed to find file %s in mounted volume" % file_name) + utils.log("Error: Failed to find file %s in mounted volume" % file_name) sys.exit(1) - _log("Yes it does!") + utils.log("Yes it does!") def _setup_create_volume_from_backup(terraform_env, test_id, storageType=BLOCK_STORAGE, availability_domain=None): '''Setup environment for creating a volume from a backup device @@ -623,20 +587,25 @@ def _setup_create_volume_from_backup(terraform_env, test_id, storageType=BLOCK_ @type test_id : C{Str} @return: OCID of generated backup @rtype: C{Str}''' - _log("Creating test volume (using terraform)", as_banner=True) + utils.log("Creating test volume (using terraform)", as_banner=True) _terraform("init", TERRAFORM_DIR, terraform_env) _terraform("apply", TERRAFORM_DIR, terraform_env) _availability_domain = _get_terraform_output_var(terraform_env, TERRAFORM_AVAILABILITY_DOMAIN) - _log(_terraform("output -json", TERRAFORM_DIR, terraform_env)) + utils.log(_terraform("output -json", TERRAFORM_DIR, terraform_env)) # Create replication controller and write data to the generated volume - _rc_name, _rc_config = _create_replication_controller(test_id, _availability_domain, volume_name=_get_volume_name(terraform_env)) + _rc_name, _rc_config = _create_rc_or_pod("../../examples/example-replication-controller-with-volume-claim.template", + test_id, _availability_domain, volume_name=_get_volume_name(terraform_env)) _create_file_via_replication_controller(_rc_name) _verify_file_existance_via_replication_controller(_rc_name) # Create backup from generated volume _backup_ocid, compartment_id, _volume_name = _create_backup(_get_terraform_output_var(terraform_env, TERRAFORM_VOLUME_OCID), test_id) if not _wait_for_volume_to_create(compartment_id, _backup_ocid, backup=True, storageType=storageType, availability_domain=availability_domain): - _log("Failed to find backup with name: " + _volume_name) +<<<<<<< HEAD + _log("Failed to find backup with name: " + _volume_name) +======= + utils.log("Failed to find backup with name: " + _volume_name) +>>>>>>> fcfa92b6... Retrieve pirvate ip for mount target and fixed system tests return _backup_ocid, _availability_domain def _tear_down_create_volume_from_backup(terraform_env, backup_ocid): @@ -646,7 +615,7 @@ def _tear_down_create_volume_from_backup(terraform_env, backup_ocid): @param backup_ocid: OCID of backup from which the test volume was created @type backup_ocid: C{Str}''' def _destroy_test_volume_atexit(): - _log("Destroying test volume (using terraform)", as_banner=True) + utils.log("Destroying test volume (using terraform)", as_banner=True) _terraform("destroy -force", TERRAFORM_DIR, terraform_env) atexit.register(_destroy_test_volume_atexit) _delete_backup(backup_ocid) @@ -663,22 +632,44 @@ def _volume_from_backup_check(test_id, availability_domain, volume, file_name='h @type file_name: C{Str}''' _ocid = volume.split('.') _ocid = _ocid[-1] - _rc_name, _rc_config = _create_replication_controller(test_id, availability_domain, _ocid) - _log("Does the file from the previous backup exist?") + _rc_name, _rc_config = _create_rc_or_pod("../../examples/example-replication-controller.template", test_id, availability_domain, _ocid) + utils.log("Does the file from the previous backup exist?") stdout = _kubectl("exec " + _rc_name + " -- ls /usr/share/nginx/html") if file_name not in stdout.split("\n"): - _log("Error: Failed to find file %s in mounted volume" % file_name) - _log("Deleting the replication controller (deletes the single nginx pod).") + utils.log("Error: Failed to find file %s in mounted volume" % file_name) + utils.log("Deleting the replication controller (deletes the single nginx pod).") + _kubectl("delete -f " + _rc_config) + +def _volume_from_fss_dynamic_check(test_id, availability_domain, volume, file_name='hello.txt'): + '''Verify whether the file system is attached to the pod and can be written to + @param test_id: Test id to use for creating components + @type test_id: C{Str} + @param availability_domain: Availability domain to create resource in + @type availability_domain: C{Str} + @param volume: Name of volume to verify + @type volume: C{Str} + @param file_name: Name of file to do checks for + @type file_name: C{Str}''' + _ocid = volume.split('.') + _ocid = _ocid[-1] + _rc_name, _rc_config = _create_rc_or_pod("../../examples/example-pod-fss.template", + test_id, availability_domain, _ocid) + utils.log("Does the file from the previous backup exist?") + stdout = _kubectl("exec " + _rc_name + " -- ls /usr/share/nginx/html") + if file_name not in stdout.split("\n"): + utils.log("Error: Failed to find file %s in mounted volume" % file_name) + utils.log("Deleting the replication controller (deletes the single nginx pod).") _kubectl("delete -f " + _rc_config) +<<<<<<< HEAD # Canary Metrics ************************************************************** -# +# CM_SIMPLE = "volume_provisioner_simple" CM_EXT3 = "volume_provisioner_ext3" CM_NO_AD = "volume_provisioner_no_ad" -CM_VOLUME_FROM_BACKUP = "volume_provisioner_volume_from_backup" +CM_VOLUME_FROM_BACKUP = "volume_provisioner_volume_from_backup" def canary_metric_date(): return datetime.datetime.today().strftime('%Y-%m-%d-%H%m%S') @@ -692,7 +683,7 @@ def init_canary_metrics(check_oci): canary_metrics[CM_EXT3] = 0 canary_metrics[CM_NO_AD] = 0 if check_oci: - canary_metrics[CM_VOLUME_FROM_BACKUP] = 0 + canary_metrics[CM_VOLUME_FROM_BACKUP] = 0 with open(os.environ.get("METRICS_FILE"), 'w') as metrics_file: json.dump(canary_metrics, metrics_file, sort_keys=True, indent=4) @@ -710,10 +701,12 @@ def finish_canary_metrics(): # Main ************************************************************************ -# +# +======= +>>>>>>> fcfa92b6... Retrieve pirvate ip for mount target and fixed system tests def _main(): - _reset_debug_file() + utils.reset_debug_file() args = _handle_args() _check_env(args['check_oci']) @@ -725,7 +718,7 @@ def _destroy_key_files_atexit(): test_id = str(uuid.uuid4())[:8] success = True - _storageClassFile = _create_yaml("../../examples/example-storage-class-fss.template", test_id, + _storageClassFile = _create_yaml("../../examples/example-storage-class-fss.template", test_id, mount_target_ocid=os.environ.get(MNT_TARGET_OCID)) _k8sResources = [_storageClassFile, @@ -736,7 +729,7 @@ def _destroy_key_files_atexit(): if args['setup']: # Cleanup in case any existing state exists in the cluster _cleanup(k8sResources=_k8sResources, display_errors=False) - _log("Setting up the volume provisioner", as_banner=True) + utils.log("Setting up the volume provisioner", as_banner=True) _kubectl("-n kube-system create secret generic oci-volume-provisioner " + \ "--from-file=config.yaml=" + _get_oci_config_file(), exit_on_error=False) @@ -749,13 +742,13 @@ def _destroy_key_files_atexit(): if args['teardown']: def _teardown_atexit(): - _log("Tearing down the volume provisioner", as_banner=True) + utils.log("Tearing down the volume provisioner", as_banner=True) _cleanup(k8sResources=_k8sResources) atexit.register(_teardown_atexit) if not args['no_test']: _log("Running system test: Simple", as_banner=True) - init_canary_metrics(args['check_oci']) + init_canary_metrics(args['check_oci']) res = _test_create_volume(compartment_id, _create_yaml("../../examples/example-claim.template", test_id, _get_region()), "demooci-" + test_id, args['check_oci']) @@ -767,17 +760,14 @@ def _teardown_atexit(): _create_yaml("../../examples/example-claim-ext3.template", test_id, None), "demooci-ext3-" + test_id, args['check_oci']) update_canary_metric(CM_EXT3, int(res)) - success = False if res == False else success + success = False if res == False else success _log("Running system test: No AD specified", as_banner=True) res = _test_create_volume(compartment_id, _create_yaml("../../examples/example-claim-no-AD.template", test_id, None), "demooci-no-ad-" + test_id, args['check_oci']) -<<<<<<< HEAD update_canary_metric(CM_NO_AD, int(res)) success = False if res == False else success -======= ->>>>>>> cc023ec8... File system storage _log("Running system test: Create volume with FSS", as_banner=True) _test_create_volume(compartment_id, @@ -785,11 +775,11 @@ def _teardown_atexit(): "demooci-fss-" + test_id, args['check_oci'], availability_domain=DEFAULT_AVAILABILITY_DOMAIN, storageType=FS_STORAGE) _log("Running system test: Create volume from backup", as_banner=True) - if args['check_oci']: + if args['check_oci']: _log("Running system test: Create volume from backup", as_banner=True) terraform_env = _get_terraform_env() _backup_ocid, _availability_domain = _setup_create_volume_from_backup(terraform_env, test_id) - _claim_target = _create_yaml("../../examples/example-claim-from-backup.template", test_id, + _claim_target = _create_yaml("../../examples/example-claim-from-backup.template", test_id, region=_availability_domain.split(':')[1], backup_id=_backup_ocid) res = _test_create_volume(compartment_id, _claim_target, "demooci-from-backup-" + test_id, args['check_oci'], @@ -801,7 +791,7 @@ def _teardown_atexit(): if not success: _finish_with_exit_code(1) - else: + else: _finish_with_exit_code(0) if __name__ == "__main__": diff --git a/test/system/utils.py b/test/system/utils.py new file mode 100644 index 000000000..aea35fb59 --- /dev/null +++ b/test/system/utils.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time +import sys +import select +import subprocess +from shutil import copyfile + +DEBUG_FILE = "runner.log" +REPORT_DIR_PATH="/tmp/results" +REPORT_FILE="done" + +def _banner(as_banner, bold): + if as_banner: + if bold: + print "********************************************************" + else: + print "--------------------------------------------------------" + +def _process_stream(stream, read_fds, global_buf, line_buf): + char = stream.read(1) + if char == '': + read_fds.remove(stream) + global_buf.append(char) + line_buf.append(char) + if char == '\n': + _debug_file(''.join(line_buf)) + line_buf = [] + return line_buf + +def _poll(stdout, stderr): + stdoutbuf = [] + stdoutbuf_line = [] + stderrbuf = [] + stderrbuf_line = [] + read_fds = [stdout, stderr] + x_fds = [stdout, stderr] + while read_fds: + rlist, _, _ = select.select(read_fds, [], x_fds) + if rlist: + for stream in rlist: + if stream == stdout: + stdoutbuf_line = _process_stream(stream, read_fds, stdoutbuf, stdoutbuf_line) + if stream == stderr: + stderrbuf_line = _process_stream(stream, read_fds, stderrbuf, stderrbuf_line) + return (''.join(stdoutbuf), ''.join(stderrbuf)) + +# On exit return 0 for success or any other integer for a failure. +# If write_report is true then write a completion file to the Sonabuoy plugin result file. +# The default location is: /tmp/results/done +def finish_with_exit_code(exit_code, write_report=True, report_dir_path=REPORT_DIR_PATH, report_file=REPORT_FILE): + print "finishing with exit code: " + str(exit_code) + if write_report: + if not os.path.exists(report_dir_path): + os.makedirs(report_dir_path) + if exit_code == 0: + _debug_file("\nTest Suite Success\n") + else: + _debug_file("\nTest Suite Failed\n") + time.sleep(3) + copyfile(DEBUG_FILE, report_dir_path + "/" + DEBUG_FILE) + with open(report_dir_path + "/" + report_file, "w+") as file: + file.write(str(report_dir_path + "/" + DEBUG_FILE)) + sys.exit(exit_code) + +def reset_debug_file(): + if os.path.exists(DEBUG_FILE): + os.remove(DEBUG_FILE) + +def _debug_file(string): + with open(DEBUG_FILE, "a") as debug_file: + debug_file.write(string) + + +def log(string, as_banner=False, bold=False): + _banner(as_banner, bold) + print string + _banner(as_banner, bold) + +def run_command(cmd, cwd, display_errors=True): + log(cwd + ": " + cmd) + process = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, cwd=cwd) + (stdout, stderr) = _poll(process.stdout, process.stderr) + returncode = process.wait() + if returncode != 0 and display_errors: + log(" stdout: " + stdout) + log(" stderr: " + stderr) + log(" result: " + str(returncode)) + return (stdout, stderr, returncode) \ No newline at end of file From 8fa9e37d3eda3c0792f10e5314cc44d5d0088b45 Mon Sep 17 00:00:00 2001 From: MadalinaPatrichi Date: Mon, 30 Jul 2018 16:54:16 +0100 Subject: [PATCH 5/5] Refactored system unit tests -Added unit test for mount target ocid specified -Rebased from master and refactored provisioner deployment --- .gitignore | 4 +- Gopkg.toml | 2 +- cmd/main.go | 16 +- examples/example-claim-fss.yaml | 6 +- hack/check-golint.sh | 2 +- pkg/oci/client/client.go | 20 +- pkg/provisioner/block/block_test.go | 79 +- pkg/provisioner/core/provisioner.go | 34 +- pkg/provisioner/core/utils.go | 12 +- pkg/provisioner/filestorage/filestorage.go | 218 ------ .../filestorage/filestorage_test.go | 41 - pkg/provisioner/fss/fss.go | 271 +++++++ pkg/provisioner/fss/fss_test.go | 80 ++ .../mock_interfaces.go | 45 +- pkg/provisioner/plugin/plugin.go | 2 +- test/system/README.md | 4 +- test/system/backup_vol_system_test.py | 134 ++++ test/system/block_system_test.py | 54 ++ test/system/canary_metrics.py | 48 ++ test/system/fss_system_test.py | 88 +++ test/system/runner.py | 730 +----------------- .../templates}/example-claim-ext3.template | 0 .../example-claim-from-backup.template | 0 .../templates}/example-claim-fss.template | 0 .../templates}/example-claim-no-AD.template | 0 .../system/templates}/example-claim.template | 0 .../templates}/example-pod-fss.template | 2 +- ...tion-controller-with-volume-claim.template | 0 .../example-replication-controller.template | 0 .../example-storage-class-fss-empty.template | 7 + .../example-storage-class-fss-mnt.template | 8 + .../example-storage-class-fss-subnet.template | 4 +- test/system/utils.py | 22 +- test/system/vol_provisioner_system_test.py | 382 +++++++++ test/system/yaml_utils.py | 81 ++ 35 files changed, 1308 insertions(+), 1088 deletions(-) delete mode 100644 pkg/provisioner/filestorage/filestorage.go delete mode 100644 pkg/provisioner/filestorage/filestorage_test.go create mode 100644 pkg/provisioner/fss/fss.go create mode 100644 pkg/provisioner/fss/fss_test.go rename pkg/{helpers => provisioner}/mock_interfaces.go (77%) create mode 100644 test/system/backup_vol_system_test.py create mode 100644 test/system/block_system_test.py create mode 100644 test/system/canary_metrics.py create mode 100644 test/system/fss_system_test.py rename {examples => test/system/templates}/example-claim-ext3.template (100%) rename {examples => test/system/templates}/example-claim-from-backup.template (100%) rename {examples => test/system/templates}/example-claim-fss.template (100%) rename {examples => test/system/templates}/example-claim-no-AD.template (100%) rename {examples => test/system/templates}/example-claim.template (100%) rename {examples => test/system/templates}/example-pod-fss.template (90%) rename {examples => test/system/templates}/example-replication-controller-with-volume-claim.template (100%) rename {examples => test/system/templates}/example-replication-controller.template (100%) create mode 100644 test/system/templates/example-storage-class-fss-empty.template create mode 100644 test/system/templates/example-storage-class-fss-mnt.template rename examples/example-storage-class-fss.template => test/system/templates/example-storage-class-fss-subnet.template (58%) create mode 100644 test/system/vol_provisioner_system_test.py create mode 100644 test/system/yaml_utils.py diff --git a/.gitignore b/.gitignore index 0dd5b90b7..eb4fb23d8 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,6 @@ env.sh *.log test/system/venv/ test/system/run-test-image.yaml* -examples/*.yaml \ No newline at end of file +test/system/templates/*.yaml +*.pyc +.vscode/launch.json diff --git a/Gopkg.toml b/Gopkg.toml index 9efa44d40..0083b064e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -48,7 +48,7 @@ ignored = ["k8s.io/client-go/pkg/api/v1"] [[override]] name = "github.com/docker/distribution" -revision = "f0cc927784781fa395c06317c58dea2841ece3a9" # Lock in to version 2.6.3 when it is released +revision = "f0cc927784781fa395c06317c58dea2841ece3a9" # Lock in to version 2.6.3 when it is released [[override]] name = "k8s.io/api" diff --git a/cmd/main.go b/cmd/main.go index 40710e1e5..3bb180faf 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -26,25 +26,21 @@ import ( "github.com/oracle/oci-volume-provisioner/pkg/provisioner/core" "github.com/oracle/oci-volume-provisioner/pkg/signals" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - - "k8s.io/apimachinery/pkg/api/resource" ) const ( resyncPeriod = 15 * time.Second minResyncPeriod = 12 * time.Hour - provisionerNameBlock = "oracle.com/oci" - provisionerNameFss = "oracle.com/oci-fss" exponentialBackOffOnError = false failedRetryThreshold = 5 leasePeriod = controller.DefaultLeaseDuration retryPeriod = controller.DefaultRetryPeriod renewDeadline = controller.DefaultRenewDeadline termLimit = controller.DefaultTermLimit - provisionerTypeArg = "provisionerType" ) // informerResyncPeriod computes the time interval a shared informer waits @@ -58,7 +54,7 @@ func informerResyncPeriod(minResyncPeriod time.Duration) func() time.Duration { func main() { syscall.Umask(0) - + rand.Seed(time.Now().Unix()) kubeconfig := flag.String("kubeconfig", "", "Path to Kubeconfig file with authorization and master location information.") volumeRoundingEnabled := flag.Bool("rounding-enabled", true, "When enabled volumes will be rounded up if less than 'minVolumeSizeMB'") minVolumeSize := flag.String("min-volume-size", "50Gi", "The minimum size for a block volume. By default OCI only supports block volumes > 50GB") @@ -97,7 +93,7 @@ func main() { // Decides what type of provider to deploy, either block or fss provisionerType := os.Getenv("PROVISIONER_TYPE") if provisionerType == "" { - provisionerType = provisionerNameBlock + provisionerType = core.ProvisionerNameBlock } glog.Infof("Starting volume provisioner in %s mode", provisionerType) @@ -111,8 +107,10 @@ func main() { // Create the provisioner: it implements the Provisioner interface expected by // the controller - ociProvisioner := core.NewOCIProvisioner(clientset, sharedInformerFactory.Core().V1().Nodes(), nodeName, *volumeRoundingEnabled, volumeSizeLowerBound) - + ociProvisioner, err := core.NewOCIProvisioner(clientset, sharedInformerFactory.Core().V1().Nodes(), provisionerType, nodeName, *volumeRoundingEnabled, volumeSizeLowerBound) + if err != nil { + glog.Fatalf("Cannot create volume provisioner %v", err) + } // Start the provision controller which will dynamically provision oci // PVs pc := controller.NewProvisionController( diff --git a/examples/example-claim-fss.yaml b/examples/example-claim-fss.yaml index 48b4c7f60..f0e513abd 100644 --- a/examples/example-claim-fss.yaml +++ b/examples/example-claim-fss.yaml @@ -3,12 +3,12 @@ apiVersion: v1 metadata: name: demooci spec: - storageClassName: "fss" + storageClassName: "oci-fss" selector: matchLabels: - oci-availability-domain: "PHX-AD-1" + failure-domain.beta.kubernetes.io/zone: "PHX-AD-1" accessModes: - ReadWriteOnce resources: requests: - storage: 50Gi \ No newline at end of file + storage: 50Gi diff --git a/hack/check-golint.sh b/hack/check-golint.sh index 0dd0ea17e..17b40516d 100755 --- a/hack/check-golint.sh +++ b/hack/check-golint.sh @@ -22,7 +22,7 @@ set -o pipefail TARGETS=$(for d in "$@"; do echo ./$d/...; done) echo -n "Checking golint: " -ERRS=$(golint ${TARGETS} | grep -v mock_interfaces.go 2>&1 || true) +ERRS=$(golint ${TARGETS} 2>&1 | grep -v mock_interfaces.go || true) if [ -n "${ERRS}" ]; then echo "FAIL" echo "${ERRS}" diff --git a/pkg/oci/client/client.go b/pkg/oci/client/client.go index f0ee6dfa2..0907c79d4 100644 --- a/pkg/oci/client/client.go +++ b/pkg/oci/client/client.go @@ -28,13 +28,13 @@ import ( "time" "github.com/golang/glog" - "github.com/pkg/errors" - "github.com/oracle/oci-go-sdk/common" "github.com/oracle/oci-go-sdk/common/auth" "github.com/oracle/oci-go-sdk/core" "github.com/oracle/oci-go-sdk/filestorage" "github.com/oracle/oci-go-sdk/identity" + "github.com/pkg/errors" + "github.com/oracle/oci-volume-provisioner/pkg/oci/instancemeta" ) @@ -62,8 +62,8 @@ type Identity interface { ListAvailabilityDomains(ctx context.Context, request identity.ListAvailabilityDomainsRequest) (response identity.ListAvailabilityDomainsResponse, err error) } -// FileStorage specifies the subset of the OCI core API utilised by the provisioner. -type FileStorage interface { +// FSS specifies the subset of the OCI core API utilised by the provisioner. +type FSS interface { CreateFileSystem(ctx context.Context, request filestorage.CreateFileSystemRequest) (response filestorage.CreateFileSystemResponse, err error) DeleteFileSystem(ctx context.Context, request filestorage.DeleteFileSystemRequest) (response filestorage.DeleteFileSystemResponse, err error) CreateMountTarget(ctx context.Context, request filestorage.CreateMountTargetRequest) (response filestorage.CreateMountTargetResponse, err error) @@ -73,8 +73,8 @@ type FileStorage interface { ListMountTargets(ctx context.Context, request filestorage.ListMountTargetsRequest) (response filestorage.ListMountTargetsResponse, err error) } -//VirtualNetwork specifies the subset of the OCI core API utilised by the provisioner. -type VirtualNetwork interface { +//VCN specifies the subset of the OCI core API utilised by the provisioner. +type VCN interface { GetPrivateIp(ctx context.Context, request core.GetPrivateIpRequest) (response core.GetPrivateIpResponse, err error) } @@ -82,8 +82,8 @@ type VirtualNetwork interface { type ProvisionerClient interface { BlockStorage() BlockStorage Identity() Identity - FileStorage() FileStorage - VirtualNetwork() VirtualNetwork + FSS() FSS + VCN() VCN Context() context.Context Timeout() time.Duration CompartmentOCID() string @@ -98,11 +98,11 @@ func (p *provisionerClient) Identity() Identity { return p.identity } -func (p *provisionerClient) FileStorage() FileStorage { +func (p *provisionerClient) FSS() FSS { return p.fileStorage } -func (p *provisionerClient) VirtualNetwork() VirtualNetwork { +func (p *provisionerClient) VCN() VCN { return p.virtualNetwork } diff --git a/pkg/provisioner/block/block_test.go b/pkg/provisioner/block/block_test.go index 47c1b2761..d26a5e4af 100644 --- a/pkg/provisioner/block/block_test.go +++ b/pkg/provisioner/block/block_test.go @@ -15,19 +15,18 @@ package block import ( - "context" "fmt" "testing" "time" - "github.com/oracle/oci-volume-provisioner/pkg/helpers" - "github.com/oracle/oci-volume-provisioner/pkg/oci/client" "github.com/oracle/oci-volume-provisioner/pkg/oci/instancemeta" + "github.com/oracle/oci-volume-provisioner/pkg/provisioner" "github.com/kubernetes-incubator/external-storage/lib/controller" "github.com/oracle/oci-go-sdk/common" "github.com/oracle/oci-go-sdk/core" "github.com/oracle/oci-go-sdk/identity" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,6 +36,9 @@ var ( volumeBackupID = "dummyVolumeBackupId" defaultAD = identity.AvailabilityDomain{Name: common.String("PHX-AD-1"), CompartmentId: common.String("ocid1.compartment.oc1")} fileSystemID = "dummyFileSystemId" + exportID = "dummyExportID" + serverIPs = []string{"dummyServerIP"} + privateIP = "127.0.0.1" ) func TestResolveFSTypeWhenNotConfigured(t *testing.T) { @@ -57,65 +59,6 @@ func TestResolveFSTypeWhenConfigured(t *testing.T) { } } -type mockBlockStorageClient struct { - volumeState core.VolumeLifecycleStateEnum -} - -func (c *mockBlockStorageClient) CreateVolume(ctx context.Context, request core.CreateVolumeRequest) (response core.CreateVolumeResponse, err error) { - return core.CreateVolumeResponse{Volume: core.Volume{Id: common.String(volumeBackupID)}}, nil -} - -func (c *mockBlockStorageClient) DeleteVolume(ctx context.Context, request core.DeleteVolumeRequest) (response core.DeleteVolumeResponse, err error) { - return core.DeleteVolumeResponse{}, nil -} - -func (c *mockBlockStorageClient) GetVolume(ctx context.Context, request core.GetVolumeRequest) (response core.GetVolumeResponse, err error) { - return core.GetVolumeResponse{Volume: core.Volume{LifecycleState: c.volumeState}}, nil -} - -type mockIdentityClient struct { - common.BaseClient -} - -func (client mockIdentityClient) ListAvailabilityDomains(ctx context.Context, request identity.ListAvailabilityDomainsRequest) (response identity.ListAvailabilityDomainsResponse, err error) { - return -} - -type mockProvisionerClient struct { - storage *mockBlockStorageClient -} - -func (p *mockProvisionerClient) BlockStorage() client.BlockStorage { - return p.storage -} - -func (p *mockProvisionerClient) Identity() client.Identity { - return &mockIdentityClient{} -} - -func (p *mockProvisionerClient) Context() context.Context { - return context.Background() -} - -func (p *mockProvisionerClient) Timeout() time.Duration { - return 30 * time.Second -} - -func (p *mockProvisionerClient) CompartmentOCID() (compartmentOCID string) { - return "" -} - -func (p *mockProvisionerClient) TenancyOCID() string { - return "ocid1.tenancy.oc1..aaaaaaaatyn7scrtwtqedvgrxgr2xunzeo6uanvyhzxqblctwkrpisvke4kq" -} - -// NewClientProvisioner creates an OCI client from the given configuration. -func NewClientProvisioner(pcData client.ProvisionerClient, - storage *mockBlockStorageClient, -) client.ProvisionerClient { - return &mockProvisionerClient{storage: storage} -} - func TestCreateVolumeFromBackup(t *testing.T) { // test creating a volume from an existing backup options := controller.VolumeOptions{ @@ -123,7 +66,7 @@ func TestCreateVolumeFromBackup(t *testing.T) { PVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - ociVolumeBackupID: helpers.VolumeBackupID, + ociVolumeBackupID: volumeBackupID, }, }, Spec: v1.PersistentVolumeClaimSpec{ @@ -137,7 +80,7 @@ func TestCreateVolumeFromBackup(t *testing.T) { }} block := NewBlockProvisioner( - NewClientProvisioner(nil, &mockBlockStorageClient{volumeState: core.VolumeLifecycleStateAvailable}), + provisioner.NewClientProvisioner(nil, &provisioner.MockBlockStorageClient{VolumeState: core.VolumeLifecycleStateAvailable}), instancemeta.NewMock(&instancemeta.InstanceMetadata{ CompartmentOCID: "", Region: "phx", @@ -150,8 +93,8 @@ func TestCreateVolumeFromBackup(t *testing.T) { if err != nil { t.Fatalf("Failed to provision volume from block storage: %v", err) } - if provisionedVolume.Annotations[ociVolumeID] != helpers.VolumeBackupID { - t.Fatalf("Failed to assign the id of the blockID: %s, assigned %s instead", helpers.VolumeBackupID, + if provisionedVolume.Annotations[ociVolumeID] != volumeBackupID { + t.Fatalf("Failed to assign the id of the blockID: %s, assigned %s instead", volumeBackupID, provisionedVolume.Annotations[ociVolumeID]) } } @@ -183,7 +126,7 @@ func TestCreateVolumeFailure(t *testing.T) { }, }} - block := NewBlockProvisioner(NewClientProvisioner(nil, &mockBlockStorageClient{volumeState: tt.state}), + block := NewBlockProvisioner(provisioner.NewClientProvisioner(nil, &provisioner.MockBlockStorageClient{VolumeState: tt.state}), instancemeta.NewMock(&instancemeta.InstanceMetadata{ CompartmentOCID: "", Region: "phx", @@ -223,7 +166,7 @@ func TestVolumeRoundingLogic(t *testing.T) { CompartmentOCID: "", Region: "phx", }) - block := NewBlockProvisioner(NewClientProvisioner(nil, &mockBlockStorageClient{volumeState: core.VolumeLifecycleStateAvailable}), + block := NewBlockProvisioner(provisioner.NewClientProvisioner(nil, &provisioner.MockBlockStorageClient{VolumeState: core.VolumeLifecycleStateAvailable}), metadata, tt.enabled, tt.minVolumeSize, diff --git a/pkg/provisioner/core/provisioner.go b/pkg/provisioner/core/provisioner.go index 6f32b8d31..70f54e5f6 100644 --- a/pkg/provisioner/core/provisioner.go +++ b/pkg/provisioner/core/provisioner.go @@ -15,14 +15,14 @@ package core import ( - "errors" "os" "strings" "time" "github.com/golang/glog" - "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/pkg/errors" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" informersv1 "k8s.io/client-go/informers/core/v1" @@ -34,10 +34,17 @@ import ( "github.com/oracle/oci-volume-provisioner/pkg/oci/client" "github.com/oracle/oci-volume-provisioner/pkg/oci/instancemeta" "github.com/oracle/oci-volume-provisioner/pkg/provisioner/block" + "github.com/oracle/oci-volume-provisioner/pkg/provisioner/fss" "github.com/oracle/oci-volume-provisioner/pkg/provisioner/plugin" ) const ( + // ProvisionerNameDefault is the name of the default OCI volume provisioner (block) + ProvisionerNameDefault = "oracle.com/oci" + // ProvisionerNameBlock is the name of the OCI block volume provisioner + ProvisionerNameBlock = "oracle.com/oci-block" + // ProvisionerNameFss is the name of the OCI FSS dedicated storage provisioner + ProvisionerNameFss = "oracle.com/oci-fss" ociProvisionerIdentity = "ociProvisionerIdentity" ociAvailabilityDomain = "ociAvailabilityDomain" ociCompartment = "ociCompartment" @@ -56,7 +63,7 @@ type OCIProvisioner struct { } // NewOCIProvisioner creates a new OCI provisioner. -func NewOCIProvisioner(kubeClient kubernetes.Interface, nodeInformer informersv1.NodeInformer, nodeName string, volumeRoundingEnabled bool, minVolumeSize resource.Quantity) *OCIProvisioner { +func NewOCIProvisioner(kubeClient kubernetes.Interface, nodeInformer informersv1.NodeInformer, provisionerType string, nodeName string, volumeRoundingEnabled bool, minVolumeSize resource.Quantity) (*OCIProvisioner, error) { configPath, ok := os.LookupEnv("CONFIG_YAML_FILENAME") if !ok { configPath = configFilePath @@ -77,19 +84,24 @@ func NewOCIProvisioner(kubeClient kubernetes.Interface, nodeInformer informersv1 if err != nil { glog.Fatalf("Unable to create volume provisioner client: %v", err) } - - blockProvisioner := block.NewBlockProvisioner(client, instancemeta.New(), - volumeRoundingEnabled, - minVolumeSize, - time.Minute*3) - + var provisioner plugin.ProvisionerPlugin + switch provisionerType { + case ProvisionerNameDefault: + provisioner = block.NewBlockProvisioner(client, instancemeta.New(), volumeRoundingEnabled, minVolumeSize, time.Minute*3) + case ProvisionerNameBlock: + provisioner = block.NewBlockProvisioner(client, instancemeta.New(), volumeRoundingEnabled, minVolumeSize, time.Minute*3) + case ProvisionerNameFss: + provisioner = fss.NewFilesystemProvisioner(client) + default: + return nil, errors.Errorf("invalid provisioner type %q", provisionerType) + } return &OCIProvisioner{ client: client, kubeClient: kubeClient, nodeLister: nodeInformer.Lister(), nodeListerSynced: nodeInformer.Informer().HasSynced, - provisioner: blockProvisioner, - } + provisioner: provisioner, + }, nil } var _ controller.Provisioner = &OCIProvisioner{} diff --git a/pkg/provisioner/core/utils.go b/pkg/provisioner/core/utils.go index 96fae286d..504f4f122 100644 --- a/pkg/provisioner/core/utils.go +++ b/pkg/provisioner/core/utils.go @@ -19,17 +19,15 @@ import ( "fmt" "strings" - "github.com/golang/glog" - - "github.com/oracle/oci-go-sdk/common" - "github.com/oracle/oci-go-sdk/identity" - "k8s.io/api/core/v1" - metav1 "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" + metav1 "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume/util" + + "github.com/golang/glog" + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/identity" ) func (p *OCIProvisioner) findADByName(name string) (*identity.AvailabilityDomain, error) { diff --git a/pkg/provisioner/filestorage/filestorage.go b/pkg/provisioner/filestorage/filestorage.go deleted file mode 100644 index 0fc1fbcdc..000000000 --- a/pkg/provisioner/filestorage/filestorage.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package filestorage - -import ( - "context" - "errors" - "fmt" - "math/rand" - "os" - "time" - - "github.com/golang/glog" - "github.com/kubernetes-incubator/external-storage/lib/controller" - "github.com/oracle/oci-go-sdk/common" - "github.com/oracle/oci-go-sdk/core" - "github.com/oracle/oci-go-sdk/filestorage" - "github.com/oracle/oci-go-sdk/identity" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" - - "github.com/oracle/oci-volume-provisioner/pkg/oci/client" - "github.com/oracle/oci-volume-provisioner/pkg/provisioner/plugin" -) - -const ( - ociVolumeID = "ociVolumeID" - ociExportID = "ociExportID" - volumePrefixEnvVarName = "OCI_VOLUME_NAME_PREFIX" - fsType = "fsType" - subnetID = "subnetId" - mntTargetID = "mntTargetId" -) - -// filesystemProvisioner is the internal provisioner for OCI filesystem volumes -type filesystemProvisioner struct { - client client.ProvisionerClient -} - -var _ plugin.ProvisionerPlugin = &filesystemProvisioner{} - -// NewFilesystemProvisioner creates a new file system provisioner that creates -// filsystems using OCI file system service. -func NewFilesystemProvisioner(client client.ProvisionerClient) plugin.ProvisionerPlugin { - return &filesystemProvisioner{ - client: client, - } -} - -// getMountTargetFromID retrieves mountTarget from given mountTargetID -func getMountTargetFromID(ctx context.Context, mountTargetID string, fileStorageClient client.FileStorage) *filestorage.MountTarget { - responseMnt, err := fileStorageClient.GetMountTarget(ctx, filestorage.GetMountTargetRequest{ - MountTargetId: common.String(mountTargetID), - }) - if err != nil { - glog.Errorf("Failed to retrieve mount point: %s", err) - return nil - } - return &responseMnt.MountTarget -} - -func (filesystem *filesystemProvisioner) Provision( - options controller.VolumeOptions, - availabilityDomain *identity.AvailabilityDomain) (*v1.PersistentVolume, error) { - - ctx, cancel := context.WithTimeout(filesystem.client.Context(), filesystem.client.Timeout()) - defer cancel() - - fileStorageClient := filesystem.client.FileStorage() - response, err := fileStorageClient.CreateFileSystem(ctx, filestorage.CreateFileSystemRequest{ - CreateFileSystemDetails: filestorage.CreateFileSystemDetails{ - AvailabilityDomain: availabilityDomain.Name, - CompartmentId: common.String(filesystem.client.CompartmentOCID()), - DisplayName: common.String(fmt.Sprintf("%s%s", os.Getenv(volumePrefixEnvVarName), options.PVC.Name)), - }, - }) - if err != nil { - glog.Errorf("Failed to create a file system storage:%#v, %s", options, err) - return nil, err - } - - mntTargetResp := filestorage.MountTarget{} - if options.Parameters[mntTargetID] == "" { - // Check if there there already is a mount target in the existing compartment - glog.Infof("Looking up existing mount targets") - responseListMnt, err := fileStorageClient.ListMountTargets(ctx, filestorage.ListMountTargetsRequest{ - AvailabilityDomain: availabilityDomain.Name, - CompartmentId: common.String(filesystem.client.CompartmentOCID()), - }) - if err != nil { - glog.Errorf("Failed to list mount targets:%#v, %s", options, err) - return nil, err - } - if len(responseListMnt.Items) != 0 { - glog.Infof("Found mount targets to use") - rand.Seed(time.Now().Unix()) - mntTargetSummary := responseListMnt.Items[rand.Int()%len(responseListMnt.Items)] - mntTargetResp = *getMountTargetFromID(ctx, *mntTargetSummary.Id, fileStorageClient) - } else { - // Mount target not created, create a new one - responseMnt, err := fileStorageClient.CreateMountTarget(ctx, filestorage.CreateMountTargetRequest{ - CreateMountTargetDetails: filestorage.CreateMountTargetDetails{ - AvailabilityDomain: availabilityDomain.Name, - SubnetId: common.String(options.Parameters[subnetID]), - CompartmentId: common.String(filesystem.client.CompartmentOCID()), - DisplayName: common.String(fmt.Sprintf("%s%s", os.Getenv(volumePrefixEnvVarName), "mnt")), - }, - }) - if err != nil { - glog.Errorf("Failed to create a mount target:%#v, %s", options, err) - return nil, err - } - mntTargetResp = responseMnt.MountTarget - } - } else { - // Mount target already specified in the configuration file, find it in the list of mount targets - mntTargetResp = *getMountTargetFromID(ctx, options.Parameters[mntTargetID], fileStorageClient) - } - - glog.Infof("Creating export set") - createExportResponse, err := fileStorageClient.CreateExport(ctx, filestorage.CreateExportRequest{ - CreateExportDetails: filestorage.CreateExportDetails{ - ExportSetId: mntTargetResp.ExportSetId, - FileSystemId: response.FileSystem.Id, - Path: common.String("/" + *response.FileSystem.Id), - }, - }) - - if err != nil { - glog.Errorf("Failed to create export:%s", err) - return nil, err - } - serverIP := "" - if len(mntTargetResp.PrivateIpIds) != 0 { - privateIPID := mntTargetResp.PrivateIpIds[rand.Int()%len(mntTargetResp.PrivateIpIds)] - virtualNetworkClient := filesystem.client.VirtualNetwork() - getPrivateIPResponse, err := virtualNetworkClient.GetPrivateIp(ctx, core.GetPrivateIpRequest{ - PrivateIpId: common.String(privateIPID), - }) - if err != nil { - glog.Errorf("Failed to retrieve IP address for mount target:%s", err) - return nil, err - } - serverIP = *getPrivateIPResponse.PrivateIp.IpAddress - } else { - glog.Errorf("Failed to find server IDs associated with the mount target to provision a persistent volume") - return nil, fmt.Errorf("Failed to find server IDs associated with the mount target") - } - - glog.Infof("Creating persistent volume on mount target with private IP address %s", serverIP) - return &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: *response.FileSystem.Id, - Annotations: map[string]string{ - ociVolumeID: *response.FileSystem.Id, - ociExportID: *createExportResponse.Export.Id, - }, - Labels: map[string]string{}, - }, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, - AccessModes: options.PVC.Spec.AccessModes, - //FIXME: fs storage doesn't enforce quota, capacity is meaningless here. - Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], - }, - PersistentVolumeSource: v1.PersistentVolumeSource{ - NFS: &v1.NFSVolumeSource{ - // Randomnly select IP address associated with the mount target to use for attachment - Server: serverIP, - Path: *common.String("/" + *response.FileSystem.Id), - ReadOnly: false, - }, - }, - }, - }, nil -} - -// Delete destroys a OCI volume created by Provision -func (filesystem *filesystemProvisioner) Delete(volume *v1.PersistentVolume) error { - exportID, ok := volume.Annotations[ociExportID] - if !ok { - return errors.New("Export ID annotation not found on PV") - } - filesystemID, ok := volume.Annotations[ociVolumeID] - if !ok { - return errors.New("Filesystem ID annotation not found on PV") - } - ctx, cancel := context.WithTimeout(filesystem.client.Context(), filesystem.client.Timeout()) - defer cancel() - glog.Infof("Deleting export for filesystemID %v", filesystemID) - _, err := filesystem.client.FileStorage().DeleteExport(ctx, - filestorage.DeleteExportRequest{ - ExportId: &exportID, - }) - if err != nil { - glog.Errorf("Failed to delete export:%s, %s", exportID, err) - return err - } - glog.Infof("Deleting volume %v with filesystemID %v", volume, filesystemID) - _, err = filesystem.client.FileStorage().DeleteFileSystem(ctx, - filestorage.DeleteFileSystemRequest{ - FileSystemId: &filesystemID, - }) - return err -} diff --git a/pkg/provisioner/filestorage/filestorage_test.go b/pkg/provisioner/filestorage/filestorage_test.go deleted file mode 100644 index 54fe3e2a2..000000000 --- a/pkg/provisioner/filestorage/filestorage_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package filestorage - -import ( - "testing" - - "github.com/kubernetes-incubator/external-storage/lib/controller" - "github.com/oracle/oci-go-sdk/common" - "github.com/oracle/oci-go-sdk/identity" - "github.com/oracle/oci-volume-provisioner/pkg/helpers" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" -) - -func TestCreateVolumeWithFSS(t *testing.T) { - // test creating a volume on a file system storage - options := controller.VolumeOptions{PVName: "dummyVolumeOptions", - PVC: &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{}, - }} - ad := identity.AvailabilityDomain{Name: common.String("dummyAdName"), CompartmentId: common.String("dummyCompartmentId")} - fss := filesystemProvisioner{client: helpers.NewClientProvisioner(nil)} - _, err := fss.Provision(options, &ad) - if err != nil { - t.Fatalf("Failed to provision volume from block storage: %v", err) - } - -} diff --git a/pkg/provisioner/fss/fss.go b/pkg/provisioner/fss/fss.go new file mode 100644 index 000000000..8781b2d9d --- /dev/null +++ b/pkg/provisioner/fss/fss.go @@ -0,0 +1,271 @@ +// Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fss + +import ( + "context" + "fmt" + "math/rand" + "os" + + "github.com/golang/glog" + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/core" + "github.com/oracle/oci-go-sdk/filestorage" + "github.com/oracle/oci-go-sdk/identity" + "github.com/pkg/errors" + + "github.com/oracle/oci-volume-provisioner/pkg/oci/client" + "github.com/oracle/oci-volume-provisioner/pkg/provisioner" + "github.com/oracle/oci-volume-provisioner/pkg/provisioner/plugin" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ociVolumeID = "volume.beta.kubernetes.io/oci-volume-id" + ociExportID = "volume.beta.kubernetes.io/oci-export-id" + volumePrefixEnvVarName = "OCI_VOLUME_NAME_PREFIX" + fsType = "fsType" + subnetID = "subnetId" + mntTargetID = "mntTargetId" +) + +// filesystemProvisioner is the internal provisioner for OCI filesystem volumes +type filesystemProvisioner struct { + client client.ProvisionerClient +} + +var _ plugin.ProvisionerPlugin = &filesystemProvisioner{} + +// NewFilesystemProvisioner creates a new file system provisioner that creates +// filesystems using OCI File System Service. +func NewFilesystemProvisioner(client client.ProvisionerClient) plugin.ProvisionerPlugin { + return &filesystemProvisioner{ + client: client, + } +} + +// getMountTargetFromID retrieves mountTarget from given mountTargetID +func (fsp *filesystemProvisioner) getMountTargetFromID(mountTargetID string) (*filestorage.MountTarget, error) { + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + + resp, err := fsp.client.FSS().GetMountTarget(ctx, filestorage.GetMountTargetRequest{ + MountTargetId: &mountTargetID, + }) + if err != nil { + glog.Errorf("Failed to retrieve mount point mountTargetId=%q: %v", mountTargetID, err) + return nil, err + } + return &resp.MountTarget, nil +} + +// listAllMountTargets retrieves all available mount targets +func (fsp *filesystemProvisioner) listAllMountTargets(ad string) ([]filestorage.MountTargetSummary, error) { + var ( + page *string + mountTargets []filestorage.MountTargetSummary + ) + // Check if there already is a mount target in the existing compartment + for { + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + resp, err := fsp.client.FSS().ListMountTargets(ctx, filestorage.ListMountTargetsRequest{ + AvailabilityDomain: &ad, + CompartmentId: common.String(fsp.client.CompartmentOCID()), + Page: page, + }) + if err != nil { + return nil, err + } + mountTargets = append(mountTargets, resp.Items...) + if page = resp.OpcNextPage; resp.OpcNextPage == nil { + break + } + } + return mountTargets, nil +} + +func (fsp *filesystemProvisioner) getOrCreateMountTarget(mtID string, ad string, subnetID string) (*filestorage.MountTarget, error) { + if mtID != "" { + // Mount target already specified in the configuration file, find it in the list of mount targets + return fsp.getMountTargetFromID(mtID) + } + mountTargets, err := fsp.listAllMountTargets(ad) + if err != nil { + return nil, err + } + if len(mountTargets) != 0 { + glog.V(4).Infof("Found mount targets to use") + mntTargetSummary := mountTargets[rand.Int()%len(mountTargets)] + target, err := fsp.getMountTargetFromID(*mntTargetSummary.Id) + return target, err + } + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + // Mount target not created, create a new one + resp, err := fsp.client.FSS().CreateMountTarget(ctx, filestorage.CreateMountTargetRequest{ + CreateMountTargetDetails: filestorage.CreateMountTargetDetails{ + AvailabilityDomain: &ad, + SubnetId: &subnetID, + CompartmentId: common.String(fsp.client.CompartmentOCID()), + DisplayName: common.String(fmt.Sprintf("%s%s", os.Getenv(volumePrefixEnvVarName), "mnt")), + }, + }) + if err != nil { + return nil, err + } + return &resp.MountTarget, nil +} + +func (fsp *filesystemProvisioner) Provision(options controller.VolumeOptions, ad *identity.AvailabilityDomain) (*v1.PersistentVolume, error) { + // Create the FileSystem. + var fsID string + { + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + resp, err := fsp.client.FSS().CreateFileSystem(ctx, filestorage.CreateFileSystemRequest{ + CreateFileSystemDetails: filestorage.CreateFileSystemDetails{ + AvailabilityDomain: ad.Name, + CompartmentId: common.String(fsp.client.CompartmentOCID()), + DisplayName: common.String(fmt.Sprintf("%s%s", os.Getenv(volumePrefixEnvVarName), options.PVC.Name)), + }, + }) + if err != nil { + glog.Errorf("Failed to create a file system options=%#v: %v", options, err) + return nil, err + } + fsID = *resp.FileSystem.Id + } + + target, err := fsp.getOrCreateMountTarget(options.Parameters[mntTargetID], *ad.Name, options.Parameters[subnetID]) + if err != nil { + glog.Errorf("Failed to retrieve mount target: %s", err) + return nil, err + } + + glog.V(6).Infof("Creating export set") + // Create the ExportSet. + var exportSetID string + { + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + resp, err := fsp.client.FSS().CreateExport(ctx, filestorage.CreateExportRequest{ + CreateExportDetails: filestorage.CreateExportDetails{ + ExportSetId: target.ExportSetId, + FileSystemId: &fsID, + Path: common.String("/" + fsID), + }, + }) + if err != nil { + glog.Errorf("Failed to create export: %v", err) + return nil, err + } + exportSetID = *resp.Export.Id + } + + if len(target.PrivateIpIds) == 0 { + glog.Errorf("Failed to find server IDs associated with the Mount Target (OCID %s) to provision a persistent volume", target.Id) + return nil, errors.Errorf("failed to find server IDs associated with the Mount Target with OCID %q", target.Id) + } + + // Get PrivateIP. + var serverIP string + { + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + id := target.PrivateIpIds[rand.Int()%len(target.PrivateIpIds)] + getPrivateIPResponse, err := fsp.client.VCN().GetPrivateIp(ctx, core.GetPrivateIpRequest{ + PrivateIpId: &id, + }) + if err != nil { + glog.Errorf("Failed to retrieve IP address for mount target privateIpID=%q: %v", id, err) + return nil, err + } + serverIP = *getPrivateIPResponse.PrivateIp.IpAddress + } + + glog.Infof("Creating persistent volume on mount target with private IP address %s", serverIP) + return &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: fsID, + Annotations: map[string]string{ + ociVolumeID: fsID, + ociExportID: exportSetID, + }, + Labels: map[string]string{}, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: options.PersistentVolumeReclaimPolicy, + AccessModes: options.PVC.Spec.AccessModes, + //FIXME: fs storage doesn't enforce quota, capacity is meaningless here. + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)], + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + NFS: &v1.NFSVolumeSource{ + // Randomnly select IP address associated with the mount target to use for attachment + Server: serverIP, + Path: "/" + fsID, + ReadOnly: false, + }, + }, + }, + }, nil +} + +// Delete destroys a OCI volume created by Provision +func (fsp *filesystemProvisioner) Delete(volume *v1.PersistentVolume) error { + exportID, ok := volume.Annotations[ociExportID] + if !ok { + return errors.Errorf("%q annotation not found on PV", ociExportID) + } + + filesystemID, ok := volume.Annotations[ociVolumeID] + if !ok { + return errors.Errorf("%q annotation not found on PV", ociVolumeID) + } + + glog.Infof("Deleting export for filesystemID %v", filesystemID) + ctx, cancel := context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + if _, err := fsp.client.FSS().DeleteExport(ctx, filestorage.DeleteExportRequest{ + ExportId: &exportID, + }); err != nil { + if !provisioner.IsNotFound(err) { + glog.Errorf("Failed to delete export exportID=%q: %v", exportID, err) + return err + } + glog.Infof("ExportID %q was not found. Unable to delete it: %v", exportID, err) + } + + ctx, cancel = context.WithTimeout(fsp.client.Context(), fsp.client.Timeout()) + defer cancel() + + glog.Infof("Deleting volume %v with FileSystemID %v", volume, filesystemID) + _, err := fsp.client.FSS().DeleteFileSystem(ctx, filestorage.DeleteFileSystemRequest{ + FileSystemId: &filesystemID, + }) + if err != nil { + if !provisioner.IsNotFound(err) { + return err + } + glog.Infof("FileSystemID %q was not found. Unable to delete it: %v", filesystemID, err) + } + return nil +} diff --git a/pkg/provisioner/fss/fss_test.go b/pkg/provisioner/fss/fss_test.go new file mode 100644 index 000000000..cf27113ce --- /dev/null +++ b/pkg/provisioner/fss/fss_test.go @@ -0,0 +1,80 @@ +// Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fss + +import ( + "reflect" + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubernetes-incubator/external-storage/lib/controller" + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/identity" + + "github.com/oracle/oci-volume-provisioner/pkg/provisioner" +) + +func TestGetMountTargetFromID(t *testing.T) { + // test retrieving a mount target from given ID + fss := filesystemProvisioner{client: provisioner.NewClientProvisioner(nil, nil)} + resp, err := fss.getMountTargetFromID("mtOCID") + if err != nil { + t.Fatalf("Failed to retrieve mount target from ID: %v", err) + } + if !reflect.DeepEqual(resp.PrivateIpIds, provisioner.ServerIPs) { + t.Fatalf("Incorrect response for retrieving mount target from ID") + } +} + +func TestListAllMountTargets(t *testing.T) { + // test listing all mount targets + fss := filesystemProvisioner{client: provisioner.NewClientProvisioner(nil, nil)} + resp, err := fss.listAllMountTargets("adOCID") + if err != nil { + t.Fatalf("Failed to retrieve list mount targets: %v", err) + } + if !reflect.DeepEqual(resp, provisioner.MountTargetItems) { + t.Fatalf("Incorrect response for listing mount targets") + } +} + +func TestGetOrCreateMountTarget(t *testing.T) { + // test get or create mount target + fss := filesystemProvisioner{client: provisioner.NewClientProvisioner(nil, nil)} + resp, err := fss.getOrCreateMountTarget("", provisioner.NilListMountTargetsADID, "subnetID") + if err != nil { + t.Fatalf("Failed to retrieve or create mount target: %v", err) + } + if *resp.Id != provisioner.CreatedMountTargetID { + t.Fatalf("Failed to create mount target") + } + +} +func TestCreateVolumeWithFSS(t *testing.T) { + // test creating a volume on a file system storage + options := controller.VolumeOptions{ + PVName: "dummyVolumeOptions", + PVC: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{}, + }} + ad := identity.AvailabilityDomain{Name: common.String("dummyAdName"), CompartmentId: common.String("dummyCompartmentId")} + fss := filesystemProvisioner{client: provisioner.NewClientProvisioner(nil, nil)} + _, err := fss.Provision(options, &ad) + if err != nil { + t.Fatalf("Failed to provision volume from fss storage: %v", err) + } +} diff --git a/pkg/helpers/mock_interfaces.go b/pkg/provisioner/mock_interfaces.go similarity index 77% rename from pkg/helpers/mock_interfaces.go rename to pkg/provisioner/mock_interfaces.go index 9af204b9c..a108b823e 100644 --- a/pkg/helpers/mock_interfaces.go +++ b/pkg/provisioner/mock_interfaces.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package helpers +package provisioner import ( "context" @@ -30,12 +30,23 @@ var ( VolumeBackupID = "dummyVolumeBackupId" fileSystemID = "dummyFileSystemId" exportID = "dummyExportID" - serverIPs = []string{"dummyServerIP"} - privateIP = "127.0.0.1" + // NilListMountTargetsADID lists no mount targets for the given AD + NilListMountTargetsADID = "dummyNilListMountTargetsForADID" + mountTargetID = "dummyMountTargetID" + // CreatedMountTargetID for dynamically created mount target + CreatedMountTargetID = "dummyCreatedMountTargetID" + // ServerIPs address for mount target + ServerIPs = []string{"dummyServerIP"} + // MountTargetItems retrieving during listing + MountTargetItems = []filestorage.MountTargetSummary{filestorage.MountTargetSummary{Id: &mountTargetID}} + // EmptyMountTargetItems retrieving during listing + EmptyMountTargetItems = []filestorage.MountTargetSummary{} + privateIP = "127.0.0.1" ) // MockBlockStorageClient mocks BlockStorage client implementation type MockBlockStorageClient struct { + VolumeState core.VolumeLifecycleStateEnum } // CreateVolume mocks the BlockStorage CreateVolume implementation @@ -50,7 +61,7 @@ func (c *MockBlockStorageClient) DeleteVolume(ctx context.Context, request core. // GetVolume mocks the BlockStorage GetVolume implementation func (c *MockBlockStorageClient) GetVolume(ctx context.Context, request core.GetVolumeRequest) (response core.GetVolumeResponse, err error) { - return core.GetVolumeResponse{Volume: core.Volume{Id: common.String(VolumeBackupID)}}, nil + return core.GetVolumeResponse{Volume: core.Volume{LifecycleState: c.VolumeState}}, nil } // MockFileStorageClient mocks FileStorage client implementation @@ -79,17 +90,20 @@ func (c *MockFileStorageClient) DeleteExport(ctx context.Context, request filest // CreateMountTarget mocks the FileStorage CreateMountTarget implementation func (c *MockFileStorageClient) CreateMountTarget(ctx context.Context, request filestorage.CreateMountTargetRequest) (response filestorage.CreateMountTargetResponse, err error) { - return filestorage.CreateMountTargetResponse{MountTarget: filestorage.MountTarget{PrivateIpIds: serverIPs}}, nil + return filestorage.CreateMountTargetResponse{MountTarget: filestorage.MountTarget{PrivateIpIds: ServerIPs, Id: &CreatedMountTargetID}}, nil } // GetMountTarget mocks the FileStorage GetMountTarget implementation func (c *MockFileStorageClient) GetMountTarget(ctx context.Context, request filestorage.GetMountTargetRequest) (response filestorage.GetMountTargetResponse, err error) { - return filestorage.GetMountTargetResponse{}, nil + return filestorage.GetMountTargetResponse{MountTarget: filestorage.MountTarget{PrivateIpIds: ServerIPs}}, nil } // ListMountTargets mocks the FileStorage ListMountTargets implementation func (c *MockFileStorageClient) ListMountTargets(ctx context.Context, request filestorage.ListMountTargetsRequest) (response filestorage.ListMountTargetsResponse, err error) { - return filestorage.ListMountTargetsResponse{}, nil + if *request.AvailabilityDomain == NilListMountTargetsADID { + return filestorage.ListMountTargetsResponse{Items: EmptyMountTargetItems}, nil + } + return filestorage.ListMountTargetsResponse{Items: MountTargetItems}, nil } // MockVirtualNetworkClient mocks VirtualNetwork client implementation @@ -113,20 +127,21 @@ func (client MockIdentityClient) ListAvailabilityDomains(ctx context.Context, re // MockProvisionerClient mocks client structure type MockProvisionerClient struct { + Storage *MockBlockStorageClient } // BlockStorage mocks client BlockStorage implementation func (p *MockProvisionerClient) BlockStorage() client.BlockStorage { - return &MockBlockStorageClient{} + return p.Storage } -// FileStorage mocks client FileStorage implementation -func (p *MockProvisionerClient) FileStorage() client.FileStorage { +// FSS mocks client FileStorage implementation +func (p *MockProvisionerClient) FSS() client.FSS { return &MockFileStorageClient{} } -// VirtualNetwork mocks client VirtualNetwork implementation -func (p *MockProvisionerClient) VirtualNetwork() client.VirtualNetwork { +// VCN mocks client VirtualNetwork implementation +func (p *MockProvisionerClient) VCN() client.VCN { return &MockVirtualNetworkClient{} } @@ -156,6 +171,6 @@ func (p *MockProvisionerClient) TenancyOCID() string { } // NewClientProvisioner creates an OCI client from the given configuration. -func NewClientProvisioner(pcData client.ProvisionerClient) client.ProvisionerClient { - return &MockProvisionerClient{} +func NewClientProvisioner(pcData client.ProvisionerClient, storage *MockBlockStorageClient) client.ProvisionerClient { + return &MockProvisionerClient{Storage: storage} } diff --git a/pkg/provisioner/plugin/plugin.go b/pkg/provisioner/plugin/plugin.go index 77d67d042..01a34520c 100644 --- a/pkg/provisioner/plugin/plugin.go +++ b/pkg/provisioner/plugin/plugin.go @@ -17,13 +17,13 @@ package plugin import ( "github.com/kubernetes-incubator/external-storage/lib/controller" "github.com/oracle/oci-go-sdk/identity" + "k8s.io/api/core/v1" ) const ( // OCIProvisionerName is the name of the provisioner defined in the storage class definitions OCIProvisionerName = "oracle/oci" - // LabelZoneFailureDomain the availability domain in which the PD resides. LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" // LabelZoneRegion the region in which the PD resides. diff --git a/test/system/README.md b/test/system/README.md index 8ed8d2a28..35b23e58e 100644 --- a/test/system/README.md +++ b/test/system/README.md @@ -9,13 +9,13 @@ We first need to setup the environment. The following must be defined: * $KUBECONFIG or $KUBECONFIG_VAR If the --check-oci argument is going to be set, then the following will also -need to be defined: +need to be defined: * $OCICONFIG or $OCICONFIG_VAR Note: If set, OCICONFIG/KUBECONFIG must contain the path to the required files. Alternatively, OCICONFIG_VAR/KUBECONFIG_VAR must contain the content -of the required files (base64 encoded). If both are set, the former will +of the required files (base64 encoded). If both are set, the former will take precedence. * $MNT_TARGET_OCID diff --git a/test/system/backup_vol_system_test.py b/test/system/backup_vol_system_test.py new file mode 100644 index 000000000..cb8d2b0af --- /dev/null +++ b/test/system/backup_vol_system_test.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from vol_provisioner_system_test import VolumeProvisionerSystemTestInterface +import oci +import utils +import atexit +from yaml_utils import PopulateYaml + +class BackupVolumeSystemTest(VolumeProvisionerSystemTestInterface): + + KUBERNETES_RESOURCES = ["../../dist/storage-class.yaml", "../../dist/storage-class-ext3.yaml", + "../../dist/oci-volume-provisioner.yaml", + "../../dist/oci-volume-provisioner-rbac.yaml"] + BACKUP_CLAIM_TEMPLATE = "templates/example-claim-from-backup.template" + CM_VOLUME_FROM_BACKUP = "volume_provisioner_volume_from_backup" + + def __init__(self, test_id=None, setup=False, check_oci=False, canaryMetrics=None): + super(BackupVolumeSystemTest, self).__init__(test_id=test_id, setup=setup, check_oci=check_oci, + k8Resources=self.KUBERNETES_RESOURCES, canaryMetrics=canaryMetrics) + + def run(self): + super(BackupVolumeSystemTest, self).run() + if self._check_oci: # Do not run tests in the validate-test-image stage (oci_config not propagated to image) + utils.log("Running system test: Create volume from backup", as_banner=True) + _backup_ocid, _availability_domain = self._setup_create_volume_from_backup() + _claim_target = PopulateYaml(self.BACKUP_CLAIM_TEMPLATE, self._test_id, + region=_availability_domain.split(':')[1], backup_id=_backup_ocid).generateFile() + _res = self._test_create_volume(_claim_target, "demooci-from-backup-" + self._test_id, + availability_domain=_availability_domain, + verify_func=self._volume_from_backup_check, canaryMetricName=self.CM_VOLUME_FROM_BACKUP) + self._tear_down_create_volume_from_backup(_backup_ocid) + self._checkTestSuccess() + + def _create_backup(self, volume_ocid): + '''Create volume backup on OCI from existing volume + @param volume_ocid: Ocid of course volume + @type volume_ocid: C{Str} + @return: Tuple containing the backup id, compartment id and display name + @rtype: C{Tuple}''' + client = oci.core.blockstorage_client.BlockstorageClient(self._oci_config) + _backup_details = oci.core.models.CreateVolumeBackupDetails(volume_id=volume_ocid, + display_name="backup_volume_system_test" + self._test_id) + _response = client.create_volume_backup(_backup_details) + utils.log("Response for creating backup for volume %s: %s" % (volume_ocid, _response.data)) + _res = self._get_json_doc(str(_response.data)) + return _res['id'], _res['compartment_id'], _res['display_name'] + + def _delete_backup(self, backup_ocid): + '''Delete volume backup from OCI + @param backup_ocid: Ocid of backup volume to delete + @type backup_ocid: C{Str}''' + client = oci.core.blockstorage_client.BlockstorageClient(self._oci_config) + _response = client.delete_volume_backup(backup_ocid) + utils.log("Response for deleting volume backup %s: %s" % (backup_ocid, _response.data)) + + def _create_volume_from_backup(self, backup_ocid, test_id, availability_domain, compartment_id): + client = oci.core.blockstorage_client.BlockstorageClient(self._oci_config) + _volume_details = oci.core.models.CreateVolumeDetails(volume_backup_id=backup_ocid, + display_name="restored_volume_system_test" + test_id, + availability_domain=availability_domain, + compartment_id=compartment_id) + try: + _response = client.create_volume(_volume_details) + utils.log("Response for creating volume from backup %s: %s %s" % (_response.data, self._get_json_doc(str(_response.data))['id'], compartment_id)) + return self._get_json_doc(str(_response.data))['id'] + except Exception as exc: + utils.log("Failed to create volume from backup %s" % exc) + + def _setup_create_volume_from_backup(self, storageType=VolumeProvisionerSystemTestInterface.BLOCK_STORAGE, availability_domain=None): + '''Setup environment for creating a volume from a backup device + @return: OCID of generated backup + @rtype: C{Str}''' + utils.log("Creating test volume (using terraform)", as_banner=True) + self._terraform("init", self.TERRAFORM_DIR) + self._terraform("apply", self.TERRAFORM_DIR) + _availability_domain = self._get_terraform_output_var(self.TERRAFORM_AVAILABILITY_DOMAIN) + utils.log(self._terraform("output -json", self.TERRAFORM_DIR)) + # Create replication controller and write data to the generated volume + _rc_name, _rc_config = self._create_rc_or_pod("templates/example-replication-controller-with-volume-claim.template", + _availability_domain, volume_name=self._get_volume_name()) + self._create_file_via_replication_controller(_rc_name) + self._verify_file_existance_via_replication_controller(_rc_name) + # Create backup from generated volume + _backup_ocid, compartment_id, _volume_name = self._create_backup(self._get_terraform_output_var(self.TERRAFORM_VOLUME_OCID)) + if not self._wait_for_volume_to_create(_backup_ocid, compartment_id=compartment_id, backup=True, storageType=storageType, + availability_domain=availability_domain): + utils.log("Failed to find backup with name: " + _volume_name) + return _backup_ocid, _availability_domain + + def _tear_down_create_volume_from_backup(self, backup_ocid): + '''Tear down create volume from backup + @param test_id: Test id used to append to component names + @type test_id: C{Str} + @param backup_ocid: OCID of backup from which the test volume was created + @type backup_ocid: C{Str}''' + def _destroy_test_volume_atexit(): + utils.log("Destroying test volume (using terraform)", as_banner=True) + self._terraform("destroy -force", self.TERRAFORM_DIR) + atexit.register(_destroy_test_volume_atexit) + self._delete_backup(backup_ocid) + + def _volume_from_backup_check(self, test_id, availability_domain, volume, file_name='hello.txt'): + '''Verify whether the volume created from the backup is in a healthy state + @param test_id: Test id to use for creating components + @type test_id: C{Str} + @param availability_domain: Availability domain to create resource in + @type availability_domain: C{Str} + @param volume: Name of volume to verify + @type volume: C{Str} + @param file_name: Name of file to do checks for + @type file_name: C{Str}''' + _ocid = volume.split('.') + _ocid = _ocid[-1] + _rc_name, _rc_config = self._create_rc_or_pod("templates/example-replication-controller.template", availability_domain, _ocid) + utils.log("Does the file from the previous backup exist?") + stdout = utils.kubectl("exec " + _rc_name + " -- ls /usr/share/nginx/html") + if file_name not in stdout.split("\n"): + utils.log("Error: Failed to find file %s in mounted volume" % file_name) + utils.log("Deleting the replication controller (deletes the single nginx pod).") + utils.kubectl("delete -f " + _rc_config) diff --git a/test/system/block_system_test.py b/test/system/block_system_test.py new file mode 100644 index 000000000..60483cb87 --- /dev/null +++ b/test/system/block_system_test.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import utils +from yaml_utils import PopulateYaml +from vol_provisioner_system_test import VolumeProvisionerSystemTestInterface + +class BlockSystemTests(VolumeProvisionerSystemTestInterface): + + KUBERNETES_RESOURCES = ["../../dist/storage-class.yaml", "../../dist/storage-class-ext3.yaml", + "../../dist/oci-volume-provisioner.yaml", + "../../dist/oci-volume-provisioner-rbac.yaml"] + SIMPLE_CLAIM_TEMPLATE = "templates/example-claim.template" + EXT_CLAIM_TEMPLATE = "templates/example-claim-ext3.template" + NO_AD_CLAIM_TEMPLATE = "templates/example-claim-no-AD.template" + CM_SIMPLE = "volume_provisioner_simple" + CM_EXT3 = "volume_provisioner_ext3" + CM_NO_AD = "volume_provisioner_no_ad" + + def __init__(self, test_id=None, setup=False, check_oci=False, canaryMetrics=None): + super(BlockSystemTests, self).__init__(test_id=test_id, setup=setup, check_oci=check_oci, + k8Resources=self.KUBERNETES_RESOURCES, canaryMetrics=canaryMetrics) + + def run(self): + super(BlockSystemTests, self).run() + utils.log("Running system test: Simple", as_banner=True) + self._testSuccess = self._test_create_volume(PopulateYaml(self.SIMPLE_CLAIM_TEMPLATE, region=self._region, + test_id=self._test_id).generateFile(), "demooci-" + self._test_id, + self._check_oci, canaryMetricName=self.CM_SIMPLE) + self._checkTestSuccess() + utils.log("Running system test: Ext3 file system", as_banner=True) + self._testSuccess = self._test_create_volume(PopulateYaml(self.EXT_CLAIM_TEMPLATE, + self._test_id).generateFile(), "demooci-ext3-" + self._test_id, + self._check_oci, canaryMetricName=self.CM_EXT3) + self._checkTestSuccess() + utils.log("Running system test: No AD specified", as_banner=True) + self._testSuccess = self._test_create_volume(PopulateYaml(self.NO_AD_CLAIM_TEMPLATE, + self._test_id).generateFile(), "demooci-no-ad-" + self._test_id, + self._check_oci, canaryMetricName=self.CM_NO_AD) + self._checkTestSuccess() diff --git a/test/system/canary_metrics.py b/test/system/canary_metrics.py new file mode 100644 index 000000000..fa43af7c3 --- /dev/null +++ b/test/system/canary_metrics.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import utils +import os +import json +import datetime +from collections import MutableMapping + +class CanaryMetrics(object): + + CM_SIMPLE = "volume_provisioner_simple" + CM_EXT3 = "volume_provisioner_ext3" + CM_NO_AD = "volume_provisioner_no_ad" + CM_VOLUME_FROM_BACKUP = "volume_provisioner_volume_from_backup" + START_TIME = "start_time" + END_TIME = "end_time" + + def __init__(self, metrics_file=None, *args, **kwargs): + self._canaryMetrics = dict(*args, **kwargs) + self._metrics_file = metrics_file + self._canaryMetrics[self.START_TIME] = self.canary_metric_date() + + @staticmethod + def canary_metric_date(): + return datetime.datetime.today().strftime('%Y-%m-%d-%H%m%S') + + def update_canary_metric(self, name, result): + self._canaryMetrics[name] = result + + def finish_canary_metrics(self): + self.update_canary_metric(self.END_TIME, self.canary_metric_date()) + if self._metrics_file: + with open(self._metrics_file, 'w') as metrics_file: + json.dump(self._canaryMetrics, metrics_file, sort_keys=True, indent=4) diff --git a/test/system/fss_system_test.py b/test/system/fss_system_test.py new file mode 100644 index 000000000..1c299cd2a --- /dev/null +++ b/test/system/fss_system_test.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import utils +from yaml_utils import PopulateYaml +from vol_provisioner_system_test import VolumeProvisionerSystemTestInterface + +class FSSSystemTests(VolumeProvisionerSystemTestInterface): + + STORAGE_CLAIM_WITH_SUBNET_ID = "templates/example-storage-class-fss-subnet.template" + STORAGE_CLAIM_WITH_MNT_ID = "templates/example-storage-class-fss-mnt.template" + STORAGE_CLAIM_EMPTY = "templates/example-storage-class-fss-empty.template" + FSS_CLAIM = "templates/example-claim-fss.template" + MNT_TARGET_OCID = "MNT_TARGET_OCID" + SUBNET_OCID = "SUBNET_OCID" + KUBERNETES_RESOURCES = ["../../dist/oci-volume-provisioner-fss.yaml", + "../../dist/oci-volume-provisioner-rbac.yaml"] + TEST_SC_FILES = [STORAGE_CLAIM_WITH_MNT_ID, STORAGE_CLAIM_EMPTY] + CM_FSS = "" + + def __init__(self, subnet_ocid=None, mnt_target_ocid=None, test_id=None, setup=False, check_oci=False, canaryMetrics=None): + super(FSSSystemTests, self).__init__(test_id=test_id, setup=setup, check_oci=check_oci, + k8Resources=self.KUBERNETES_RESOURCES, canaryMetrics=canaryMetrics) + self._subnet_ocid = subnet_ocid + self._mnt_target_ocid = mnt_target_ocid + self._scFile = self.STORAGE_CLAIM_WITH_SUBNET_ID + + def run(self): + super(FSSSystemTests, self).run() + if self._check_oci: # Do not run tests in the validate-test-image stage + utils.log("Running system test: Create volume with FSS", as_banner=True) + for _testSc in self.TEST_SC_FILES: + # Not testing the creation of a mount target, as all mount targets on the system will have + # to be removed + self._testCreateVolumeFromStorageClass(_testSc) + + def _testCreateVolumeFromStorageClass(self, scFile): + '''Test creating a volume based on provided storage class + @type scFile: Path for storage class config file + @param scFile: C{Str}''' + utils.log("Testing for sc: %s" % scFile, as_banner=True) + _storageClassFile = PopulateYaml(self._scFile, self._test_id, mount_target_ocid=self._mnt_target_ocid, + subnet_ocid=self._subnet_ocid).generateFile() + # Delete any previously existing storage classes with the same name + utils.kubectl("delete -f " + _storageClassFile, exit_on_error=False) + # Create storage class yaml file + _storageClassFile = PopulateYaml(self._scFile, self._test_id, mount_target_ocid=self._mnt_target_ocid, + subnet_ocid=self._subnet_ocid).generateFile() + utils.kubectl("create -f " + _storageClassFile, exit_on_error=False) + self._testSuccess = self._test_create_volume(PopulateYaml(self.FSS_CLAIM, self._test_id, region=self._region).generateFile(), + "demooci-fss-" + self._test_id, availability_domain=self.DEFAULT_AVAILABILITY_DOMAIN, + storageType=self.FS_STORAGE, verify_func=self._volume_from_fss_dynamic_check, canaryMetricName=self.CM_FSS) + self._checkTestSuccess() + + def _volume_from_fss_dynamic_check(self, availability_domain, volume, file_name='hello.txt'): + '''Verify whether the file system is attached to the pod and can be written to + @param test_id: Test id to use for creating components + @type test_id: C{Str} + @param availability_domain: Availability domain to create resource in + @type availability_domain: C{Str} + @param volume: Name of volume to verify + @type volume: C{Str} + @param file_name: Name of file to do checks for + @type file_name: C{Str}''' + _ocid = volume.split('.') + _ocid = _ocid[-1] + _rc_name, _rc_config = self._create_rc_or_pod("templates/example-pod-fss.template", + availability_domain, _ocid) + utils.log("Does the file from the previous backup exist?") + stdout = utils.kubectl("exec " + _rc_name + " -- ls /usr/share/nginx/html") + if file_name not in stdout.split("\n"): + utils.log("Error: Failed to find file %s in mounted volume" % file_name) + utils.log("Deleting the replication controller (deletes the single nginx pod).") + utils.kubectl("delete -f " + _rc_config) diff --git a/test/system/runner.py b/test/system/runner.py index a8c2e19a6..9429e2ad9 100755 --- a/test/system/runner.py +++ b/test/system/runner.py @@ -26,75 +26,44 @@ import oci import yaml import utils +from yaml_utils import PopulateYaml +from vol_provisioner_system_test import VolumeProvisionerSystemTestInterface +from block_system_test import BlockSystemTests +from fss_system_test import FSSSystemTests +from backup_vol_system_test import BackupVolumeSystemTest +from canary_metrics import CanaryMetrics -MNT_TARGET_OCID = "MNT_TARGET_OCID" -TERRAFORM_CLUSTER = "terraform/cluster" -TERRAFORM_DIR = "terraform" -# Variable name correspond to the ones found in the terraform config file -TERRAFORM_AVAILABILITY_DOMAIN = "availability_domain" -TERRAFORM_VOLUME_OCID = "volume_ocid" TEST_NAME = "volumeprovisionersystemtest" -TMP_OCICONFIG = "/tmp/ociconfig" -TMP_KUBECONFIG = "/tmp/kubeconfig.conf" -TMP_OCI_API_KEY_FILE = "/tmp/oci_api_key.pem" REGION = "us-ashburn-1" -TIMEOUT = 600 WRITE_REPORT=True -POD_CONTROLLER = "controller" -POD_VOLUME = "volume" -BLOCK_STORAGE = "block" -FS_STORAGE = "fileSystem" -DEFAULT_AVAILABILITY_DOMAIN="NWuj:PHX-AD-2" -LIFECYCLE_STATE_ON = {BLOCK_STORAGE: 'AVAILABLE', - FS_STORAGE: 'ACTIVE'} -LIFECYCLE_STATE_OFF = {BLOCK_STORAGE: 'TERMINATED', -<<<<<<< HEAD - FS_STORAGE:'DELETED'} - -# On exit return 0 for success or any other integer for a failure. -# If write_report is true then write a completion file to the Sonabuoy plugin result file. -# The default location is: /tmp/results/done -def _finish_with_exit_code(exit_code, write_report=True, report_dir_path=REPORT_DIR_PATH, report_file=REPORT_FILE): - print "finishing with exit code: " + str(exit_code) - if write_report: - if not os.path.exists(report_dir_path): - os.makedirs(report_dir_path) - if exit_code == 0: - _debug_file("\nTest Suite Success\n") - else: - _debug_file("\nTest Suite Failed\n") - time.sleep(3) - copyfile(DEBUG_FILE, report_dir_path + "/" + DEBUG_FILE) - with open(report_dir_path + "/" + report_file, "w+") as file: - file.write(str(report_dir_path + "/" + DEBUG_FILE)) - finish_canary_metrics() - sys.exit(exit_code) -======= - FS_STORAGE:'DELETED'} ->>>>>>> fcfa92b6... Retrieve pirvate ip for mount target and fixed system tests +OCICONFIG = "OCICONFIG" +OCICONFIG_VAR = "OCICONFIG_VAR" +KUBECONFIG_VAR = "KUBECONFIG_VAR" +SUBNET_OCID = "SUBNET_OCID" +METRICS_FILE = "METRICS_FILE" +MNT_TARGET_OCID = "MNT_TARGET_OCID" def _check_env(check_oci): if check_oci: - if "OCICONFIG" not in os.environ and "OCICONFIG_VAR" not in os.environ: + if OCICONFIG not in os.environ and OCICONFIG_VAR not in os.environ: utils.log("Error. Can't find either OCICONFIG or OCICONFIG_VAR in the environment.") utils.finish_with_exit_code(1) - def _create_key_files(check_oci): utils.log("Setting environment variables") - if "OCICONFIG_VAR" in os.environ: - utils.run_command("echo \"$OCICONFIG_VAR\" | openssl enc -base64 -d -A > " + TMP_OCICONFIG, ".") - utils.run_command("chmod 600 " + TMP_OCICONFIG, ".") - if "KUBECONFIG_VAR" in os.environ: - utils.run_command("echo \"$KUBECONFIG_VAR\" | openssl enc -base64 -d -A > " + TMP_KUBECONFIG, ".") + if OCICONFIG_VAR in os.environ: + utils.run_command("echo \"$OCICONFIG_VAR\" | openssl enc -base64 -d -A > " + VolumeProvisionerSystemTestInterface.TMP_OCICONFIG, ".") + utils.run_command("chmod 600 " + VolumeProvisionerSystemTestInterface.TMP_OCICONFIG, ".") + if KUBECONFIG_VAR in os.environ: + utils.run_command("echo \"$KUBECONFIG_VAR\" | openssl enc -base64 -d -A > " + utils.TMP_KUBECONFIG, ".") if check_oci: - oci_config_file = _get_oci_config_file() + oci_config_file = VolumeProvisionerSystemTestInterface()._get_oci_config_file() with open(oci_config_file, 'r') as stream: try: cnf = yaml.load(stream) - with open(TMP_OCI_API_KEY_FILE, 'w') as stream: + with open(VolumeProvisionerSystemTestInterface.TMP_OCI_API_KEY_FILE, 'w') as stream: stream.write(cnf['auth']['key']) except yaml.YAMLError as err: utils.log("Error. Failed to parse oci config file %s. Error: %s " % (oci_config_file, err)) @@ -102,231 +71,19 @@ def _create_key_files(check_oci): def _destroy_key_files(check_oci): - if "OCICONFIG_VAR" in os.environ: - os.remove(TMP_OCICONFIG) - if "KUBECONFIG_VAR" in os.environ: - os.remove(TMP_KUBECONFIG) + if OCICONFIG_VAR in os.environ: + os.remove(VolumeProvisionerSystemTestInterface.TMP_OCICONFIG) + if KUBECONFIG_VAR in os.environ: + os.remove(utils.TMP_KUBECONFIG) if check_oci: - os.remove(TMP_OCI_API_KEY_FILE) - -def _get_kubeconfig(): - return os.environ['KUBECONFIG'] if "KUBECONFIG" in os.environ else TMP_KUBECONFIG - - -def _get_oci_config_file(): - return os.environ['OCICONFIG'] if "OCICONFIG" in os.environ else TMP_OCICONFIG - + os.remove(VolumeProvisionerSystemTestInterface.TMP_OCI_API_KEY_FILE) def _get_oci_api_key_file(): - return TMP_OCI_API_KEY_FILE + return VolumeProvisionerSystemTestInterface.TMP_OCI_API_KEY_FILE def _get_timestamp(test_id): return test_id if test_id is not None else datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') -def _get_terraform_env(): - timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') - return "TF_VAR_test_id=" + timestamp - -def _terraform(action, cwd, terraform_env): - (stdout, _, returncode) = utils.run_command(terraform_env + " terraform " + action, cwd) - if returncode != 0: - utils.log("Error running terraform") - sys.exit(1) - return stdout - -def _kubectl(action, exit_on_error=True, display_errors=True, log_stdout=True): - if "KUBECONFIG" not in os.environ and "KUBECONFIG_VAR" not in os.environ: - (stdout, _, returncode) = utils.run_command("kubectl " + action, ".", display_errors) - else: - (stdout, _, returncode) = utils.run_command("KUBECONFIG=" + _get_kubeconfig() + " kubectl " + action, ".", display_errors) - if exit_on_error and returncode != 0: - utils.log("Error running kubectl") - utils.finish_with_exit_code(1) - if log_stdout: - utils.log(stdout) - return stdout - -def _get_pod_infos(test_id, pod_type): - '''Retrieve pod information from kube-system - @param test_id: Test id to use to search for the pod to get infor for - @type test_id: C{Str} - @param pod_type: Pod type to search for - @type pod_type: C{Str} - @return: Tuple containing the name of the resource, its status and the - node it's running on - @rtype: C{Tuple}''' - _namespace = "-n kube-system" if pod_type == POD_VOLUME else "" - stdout = _kubectl(_namespace + " get pods -o wide") - infos = [] - for line in stdout.split("\n"): - line_array = line.split() - if len(line_array) > 0: - name = line_array[0] - if name.startswith('oci-volume-provisioner')and pod_type == POD_VOLUME: - status = line_array[2] - node = line_array[6] - infos.append((name, status, node)) - if re.match(r"nginx-controller-" + test_id + ".*", line) and pod_type == POD_CONTROLLER: - name = line_array[0] - status = line_array[2] - node = line_array[6] - infos.append((name, status, node)) - if re.match(r"demooci-fss-pod-" + test_id + ".*", line) and pod_type == POD_CONTROLLER: - name = line_array[0] - status = line_array[2] - node = line_array[6] - infos.append((name, status, node)) - return infos - -def _get_volume(volume_name): - stdout = _kubectl("get PersistentVolumeClaim -o wide") - for line in stdout.split("\n"): - line_array = line.split() - if len(line_array) >= 3: - name = line_array[0] - status = line_array[1] - if name == volume_name and status == "Bound": - return line_array[2] - return None - -def _get_volume_and_wait(volume_name): - num_polls = 0 - volume = _get_volume(volume_name) - while not volume: - utils.log(" waiting...") - time.sleep(1) - num_polls += 1 - if num_polls == TIMEOUT: - return False - volume = _get_volume(volume_name) - return volume - - -def _get_json_doc(response): - decoder = json.JSONDecoder() - try: - doc = decoder.decode(response) - except (ValueError, UnicodeError) as _: -<<<<<<< HEAD - _log('Invalid JSON in response: %s' % str(response)) - _finish_with_exit_code(1) -======= - utils.log('Invalid JSON in response: %s' % str(response)) - utils.finish_with_exit_code(1) ->>>>>>> fcfa92b6... Retrieve pirvate ip for mount target and fixed system tests - return doc - - -def _oci_config(): - config = dict(oci.config.DEFAULT_CONFIG) - oci_config_file = _get_oci_config_file() - with open(oci_config_file, 'r') as stream: - try: - cnf = yaml.load(stream) - config["user"] = cnf['auth']['user'] - config["tenancy"] = cnf['auth']['tenancy'] - config["fingerprint"] = cnf['auth']['fingerprint'] - config["key_file"] = TMP_OCI_API_KEY_FILE - config["region"] = cnf['auth']['region'] - return config - except yaml.YAMLError: - utils.log("Error. Failed to parse oci config file " + oci_config_file) - utils.finish_with_exit_code(1) - - -def _volume_exists(compartment_id, volume, state, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): - '''Verify whether the volume is available or not - @param storageType: Storage type to search for volumes in - @type storageType: C{Str} - @param availability_domain: Availability domain to look in for - @type availability_domain: C{Str}''' - if storageType == BLOCK_STORAGE: - utils.log("Retrieving block volumes") - client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) - if backup: - volumes= oci.pagination.list_call_get_all_results(client.list_volume_backups, compartment_id) - else: - volumes = oci.pagination.list_call_get_all_results(client.list_volumes, compartment_id) - else: - utils.log("Retrieving file systems") - client = oci.file_storage.FileStorageClient(_oci_config()) - volumes = oci.pagination.list_call_get_all_results(client.list_file_systems, compartment_id, - availability_domain) - utils.log("Getting status for volume %s" % volume) - for vol in _get_json_doc(str(volumes.data)): - if vol['id'].endswith(volume) and vol['lifecycle_state'] == state: - return True - return False - -def _create_backup(volume_ocid, test_id): - '''Create volume backup on OCI from existing volume - @param volume_ocid: Ocid of course volume - @type volume_ocid: C{Str} - @param test_id: Test id used to append to component name - @type test_id: C{Str} - @return: Tuple containing the backup id, compartment id and display name - @rtype: C{Tuple}''' - client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) - _backup_details = oci.core.models.CreateVolumeBackupDetails(volume_id=volume_ocid, - display_name="backup_volume_system_test" + test_id) - _response = client.create_volume_backup(_backup_details) - utils.log("Response for creating backup for volume %s: %s" % (volume_ocid, _response.data)) - _res = _get_json_doc(str(_response.data)) - return _res['id'], _res['compartment_id'], _res['display_name'] - -def _delete_backup(backup_ocid): - '''Delete volume backup from OCI - @param backup_ocid: Ocid of backup volume to delete - @type backup_ocid: C{Str}''' - client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) - _response = client.delete_volume_backup(backup_ocid) - utils.log("Response for deleting volume backup %s: %s" % (backup_ocid, _response.data)) - - -def _create_volume_from_backup(backup_ocid, test_id, availability_domain, compartment_id): - client = oci.core.blockstorage_client.BlockstorageClient(_oci_config()) - _volume_details = oci.core.models.CreateVolumeDetails(volume_backup_id=backup_ocid, - display_name="restored_volume_system_test" + test_id, - availability_domain=availability_domain, - compartment_id=compartment_id) - try: - _response = client.create_volume(_volume_details) - utils.log("Response for creating volume from backup %s: %s %s" % (_response.data, _get_json_doc(str(_response.data))['id'], compartment_id)) - return _get_json_doc(str(_response.data))['id'] - except Exception as exc: - utils.log("Failed to create volume from backup %s" % exc) - -def _wait_for_volume(compartment_id, volume, state, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): - num_polls = 0 - while not _volume_exists(compartment_id, volume, state, backup, storageType=storageType, - availability_domain=availability_domain): - utils.log(" waiting...") - time.sleep(1) - num_polls += 1 - if num_polls == TIMEOUT: - return False - return True - -def _wait_for_volume_to_create(compartment_id, volume, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): - return _wait_for_volume(compartment_id, volume, LIFECYCLE_STATE_ON[storageType], backup, storageType=storageType, - availability_domain=availability_domain) - - -def _wait_for_volume_to_delete(compartment_id, volume, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): - return _wait_for_volume(compartment_id, volume, LIFECYCLE_STATE_OFF[storageType], backup, storageType=storageType, - availability_domain=availability_domain) - - -def _get_compartment_id(pod_name): - '''Gets the oci compartment_id from the oci-volume-provisioner pod host. - This is where oci volume resources will be created.''' - result = _kubectl("-n kube-system exec %s -- curl -s http://169.254.169.254/opc/v1/instance/" % pod_name, - exit_on_error=False, log_stdout=False) - result_json = _get_json_doc(str(result)) - compartment_id = result_json["compartmentId"] - return compartment_id - - def _handle_args(): parser = argparse.ArgumentParser(description='Description of your program') parser.add_argument('--setup', @@ -353,358 +110,6 @@ def _handle_args(): return args - -def _cleanup(k8sResources=[], exit_on_error=False, display_errors=True): - for _res in k8sResources: - _kubectl("delete -f " + _res, exit_on_error, display_errors) - -def _get_region(): - nodes_json = _kubectl("get nodes -o json", log_stdout=False) - nodes = json.loads(nodes_json) - for node in nodes['items']: - return node['metadata']['labels']['failure-domain.beta.kubernetes.io/zone'] - utils.log("Region lookup failed") - utils.finish_with_exit_code(1) - - -def _create_yaml(template, test_id, region=None, backup_id=None, mount_target_ocid=None, volume_name=None, availability_domain=None): - '''Generate yaml based on the given template and fill in additional details - @param template: Name of file to use as template - @type template: C{Str} - @param test_id: Used for tagging resources with test id - @type test_id: C{Str} - @param region: Used for selecting resources from specified region - @type region: C{Str} - @param backup_id: Backup id to create PVC from - @type backup_id: C{Str} - @param mount_target_ocid: Mount target OCID to populate config with - @type mount_target_ocid: C{Str} - @param volume_name: Name used to create volume - @type volume_name: C{Str} - @param availability_domain: Availability domain (used for pvc) - @type availability_domain: C{Str} - @return: Name of generated config file - @rtype: C{Str}''' - yaml_file = template + ".yaml" - with open(template, "r") as sources: - lines = sources.readlines() - with open(yaml_file, "w") as sources: - for line in lines: - patched_line = line - patched_line = re.sub('{{TEST_ID}}', test_id, patched_line) - if volume_name is not None: - patched_line = re.sub('{{VOLUME_NAME}}', volume_name, patched_line) - if region is not None: - patched_line = re.sub('{{REGION}}', region, patched_line) - if backup_id is not None: - patched_line = re.sub('{{BACKUP_ID}}', backup_id, patched_line) - if availability_domain: - availability_domain = availability_domain.replace(':', '-') # yaml config does not allow ':' - patched_line = re.sub('{{AVAILABILITY_DOMAIN}}', availability_domain, patched_line) - if mount_target_ocid is not None: - patched_line = re.sub('{{MNT_TARGET_OCID}}', mount_target_ocid, patched_line) - elif "MNT_TARGET_OCID" in patched_line: - patched_line = "" - sources.write(patched_line) - return yaml_file - -def _test_create_volume(compartment_id, claim_target, claim_volume_name, check_oci, test_id=None, - availability_domain=None, verify_func=None, storageType=BLOCK_STORAGE): - '''Test making a volume claim from a configuration file - @param backup_ocid: Verify whether the volume created from a backup contains backup info - @type backup_ocid: C{Str}''' - _kubectl("create -f " + claim_target, exit_on_error=False) - - volume = _get_volume_and_wait(claim_volume_name) - utils.log("Created volume with name: %s" % str(volume)) - - if check_oci: -<<<<<<< HEAD - _log("Querying the OCI api to make sure a volume with this name exists...") - if not _wait_for_volume_to_create(compartment_id, volume, storageType=storageType, - availability_domain=availability_domain): - _log("Failed to find volume with name: " + volume) - return False - _log("Volume: " + volume + " is present and available") - - if verify_func: - verify_func(test_id, availability_domain, volume) - - _log("Delete the volume claim") -======= - utils.log("Querying the OCI api to make sure a volume with this name exists...") - if not _wait_for_volume_to_create(compartment_id, volume, storageType=storageType, - availability_domain=availability_domain): - utils.log("Failed to find volume with name: " + volume) - utils.finish_with_exit_code(1) - utils.log("Volume: " + volume + " is present and available") - - if verify_func: - verify_func(test_id, availability_domain, volume) - - utils.log("Delete the volume claim") ->>>>>>> fcfa92b6... Retrieve pirvate ip for mount target and fixed system tests - _kubectl("delete -f " + claim_target, exit_on_error=False) - - if check_oci: - utils.log("Querying the OCI api to make sure a volume with this name now doesnt exist...") - _wait_for_volume_to_delete(compartment_id, volume, storageType=storageType, - availability_domain=availability_domain) - if not _volume_exists(compartment_id, volume, LIFECYCLE_STATE_OFF[storageType], storageType=storageType, - availability_domain=availability_domain): -<<<<<<< HEAD - _log("Volume with name: " + volume + " still exists") - return False - _log("Volume: " + volume + " has now been terminated") -======= - utils.log("Volume with name: " + volume + " still exists") - utils.finish_with_exit_code(1) - utils.log("Volume: " + volume + " has now been terminated") ->>>>>>> fcfa92b6... Retrieve pirvate ip for mount target and fixed system tests - - return True - -def _patch_template_file(infile, outfile, volume_name, test_id, availability_domain): - '''Generate yaml based on the given template and fill in additional details - @param template: Name of file to use as template - @type template: C{Str} - @param test_id: Used for tagging resources with test id - @type test_id: C{Str} - @param availability_domain: Used for selecting resources from specified AD - @type availability_domain: C{Str} - @return: Name of generated config file - @rtype: C{Str}''' - with open(infile, "r") as sources: - lines = sources.readlines() - with open(outfile + "." + test_id, "w") as sources: - for line in lines: - patched_line = line - if volume_name is not None: - patched_line = re.sub('{{VOLUME_NAME}}', volume_name, patched_line) - patched_line = re.sub('{{TEST_ID}}', test_id, patched_line) - if availability_domain: - availability_domain = availability_domain.replace(':', '-') # yaml config does not allow ':' - patched_line = re.sub('{{AVAILABILITY_DOMAIN}}', availability_domain, patched_line) - sources.write(patched_line) - return outfile + "." + test_id - -def _get_terraform_output_var(terraform_env, var_name): - '''Retrieve variable value from terraform output from state file - @param terraform_env: Terraform test id - @type terraform_env: C{Str} - @param var_name: Name of variable to retrieve from output - @type var_name: C{Str} - @return: Value of variable - @rtype: C{Str}''' - output = _terraform("output -json", TERRAFORM_DIR, terraform_env) - jsn = json.loads(output) - return jsn[var_name]["value"] - -def _get_volume_name(terraform_env): - '''Retrieve volume name from terraform status output - @param terraform_env: Terraform test id - @type terraform_env: C{Str} - @return: Volume OCID - @rtype: C{Str}''' - _ocid = _get_terraform_output_var(terraform_env, TERRAFORM_VOLUME_OCID).split('.') - return _ocid[len(_ocid)-1] - -def _wait_for_pod_status(desired_status, test_id, pod_type): - '''Wait until the pod gets to the desired status - @param desired_status: Status to wait for - @type desired_status: C{Str} - @param test_id: Test_id used to retrieve components generated by this test - @type test_id: C{Str} - @param pod_type: Pod type to query - @type pod_type: C{Str} - @return: Tuple containing the name of the resource, its status and the - node it's running on - @rtype: C{Tuple}''' - infos = _get_pod_infos(test_id, pod_type) - num_polls = 0 - while not any(i[1] == desired_status for i in infos): - for i in infos: - utils.log(" - pod: " + i[0] + ", status: " + i[1] + ", node: " + i[2]) - time.sleep(1) - num_polls += 1 - if num_polls == TIMEOUT: - for i in infos: - utils.log("Error: Pod: " + i[0] + " " + - "failed to achieve status: " + desired_status + "." + - "Final status was: " + i[1]) - sys.exit(1) - infos = _get_pod_infos(test_id, pod_type) - for i in infos: - if i[1] == desired_status: - return (i[0], i[1], i[2]) - # Should never get here. - return (None, None, None) - -def _create_rc_or_pod(config, test_id, availability_domain, volume_name="default_volume"): - '''Create replication controller or pod and wait for it to start - @param rc_config: Replication controller configuration file to patch - @type rc_config: C{Str} - @param test_id: Test id used to append to component names - @type test_id : C{Str} - @param availability_domain: Availability domain to start rc in - @type availability_domain: C{Str} - @param volume_name: Volume name used by the replication controller - @type volume_name: C{Str} - @return: Tuple containing the name of the created rc and its config file - @rtype: C{Tuple}''' - _config = _patch_template_file(config, config + '.yaml', volume_name, test_id, availability_domain) - utils.log("Starting the replication controller (creates a single nginx pod).") - _kubectl("delete -f " + _config, exit_on_error=False, display_errors=False) - _kubectl("create -f " + _config) - utils.log("Waiting for the pod to start.") - _name, _, _ = _wait_for_pod_status("Running", test_id, POD_CONTROLLER) - return _name, _config - -def _create_file_via_replication_controller(rc_name, file_name="hello.txt"): - '''Create file via the replication controller - @param rcName: Name of the replication controller to write data to - @type rcName: C{Str} - @param fileName: Name of file to create - @type fileName: C{Str}''' - _kubectl("exec " + rc_name + " -- touch /usr/share/nginx/html/" + file_name) - -def _verify_file_existance_via_replication_controller(rc_name, file_name="hello.txt"): - '''Verify whether file exists via the replication controller - @param rcName: Name of the replication controller to verify - @type rcName: C{Str} - @param fileName: Name of file to create - @type fileName: C{Str}''' - utils.log("Does the new file exist?") - stdout = _kubectl("exec " + rc_name + " -- ls /usr/share/nginx/html") - if file_name not in stdout.split("\n"): - utils.log("Error: Failed to find file %s in mounted volume" % file_name) - sys.exit(1) - utils.log("Yes it does!") - -def _setup_create_volume_from_backup(terraform_env, test_id, storageType=BLOCK_STORAGE, availability_domain=None): - '''Setup environment for creating a volume from a backup device - @param test_id: Test id used to append to component names - @type test_id : C{Str} - @return: OCID of generated backup - @rtype: C{Str}''' - utils.log("Creating test volume (using terraform)", as_banner=True) - _terraform("init", TERRAFORM_DIR, terraform_env) - _terraform("apply", TERRAFORM_DIR, terraform_env) - _availability_domain = _get_terraform_output_var(terraform_env, TERRAFORM_AVAILABILITY_DOMAIN) - utils.log(_terraform("output -json", TERRAFORM_DIR, terraform_env)) - # Create replication controller and write data to the generated volume - _rc_name, _rc_config = _create_rc_or_pod("../../examples/example-replication-controller-with-volume-claim.template", - test_id, _availability_domain, volume_name=_get_volume_name(terraform_env)) - _create_file_via_replication_controller(_rc_name) - _verify_file_existance_via_replication_controller(_rc_name) - # Create backup from generated volume - _backup_ocid, compartment_id, _volume_name = _create_backup(_get_terraform_output_var(terraform_env, TERRAFORM_VOLUME_OCID), test_id) - if not _wait_for_volume_to_create(compartment_id, _backup_ocid, backup=True, storageType=storageType, - availability_domain=availability_domain): -<<<<<<< HEAD - _log("Failed to find backup with name: " + _volume_name) -======= - utils.log("Failed to find backup with name: " + _volume_name) ->>>>>>> fcfa92b6... Retrieve pirvate ip for mount target and fixed system tests - return _backup_ocid, _availability_domain - -def _tear_down_create_volume_from_backup(terraform_env, backup_ocid): - '''Tear down create volume from backup - @param test_id: Test id used to append to component names - @type test_id: C{Str} - @param backup_ocid: OCID of backup from which the test volume was created - @type backup_ocid: C{Str}''' - def _destroy_test_volume_atexit(): - utils.log("Destroying test volume (using terraform)", as_banner=True) - _terraform("destroy -force", TERRAFORM_DIR, terraform_env) - atexit.register(_destroy_test_volume_atexit) - _delete_backup(backup_ocid) - -def _volume_from_backup_check(test_id, availability_domain, volume, file_name='hello.txt'): - '''Verify whether the volume created from the backup is in a healthy state - @param test_id: Test id to use for creating components - @type test_id: C{Str} - @param availability_domain: Availability domain to create resource in - @type availability_domain: C{Str} - @param volume: Name of volume to verify - @type volume: C{Str} - @param file_name: Name of file to do checks for - @type file_name: C{Str}''' - _ocid = volume.split('.') - _ocid = _ocid[-1] - _rc_name, _rc_config = _create_rc_or_pod("../../examples/example-replication-controller.template", test_id, availability_domain, _ocid) - utils.log("Does the file from the previous backup exist?") - stdout = _kubectl("exec " + _rc_name + " -- ls /usr/share/nginx/html") - if file_name not in stdout.split("\n"): - utils.log("Error: Failed to find file %s in mounted volume" % file_name) - utils.log("Deleting the replication controller (deletes the single nginx pod).") - _kubectl("delete -f " + _rc_config) - -def _volume_from_fss_dynamic_check(test_id, availability_domain, volume, file_name='hello.txt'): - '''Verify whether the file system is attached to the pod and can be written to - @param test_id: Test id to use for creating components - @type test_id: C{Str} - @param availability_domain: Availability domain to create resource in - @type availability_domain: C{Str} - @param volume: Name of volume to verify - @type volume: C{Str} - @param file_name: Name of file to do checks for - @type file_name: C{Str}''' - _ocid = volume.split('.') - _ocid = _ocid[-1] - _rc_name, _rc_config = _create_rc_or_pod("../../examples/example-pod-fss.template", - test_id, availability_domain, _ocid) - utils.log("Does the file from the previous backup exist?") - stdout = _kubectl("exec " + _rc_name + " -- ls /usr/share/nginx/html") - if file_name not in stdout.split("\n"): - utils.log("Error: Failed to find file %s in mounted volume" % file_name) - utils.log("Deleting the replication controller (deletes the single nginx pod).") - _kubectl("delete -f " + _rc_config) - - -<<<<<<< HEAD -# Canary Metrics ************************************************************** -# - -CM_SIMPLE = "volume_provisioner_simple" -CM_EXT3 = "volume_provisioner_ext3" -CM_NO_AD = "volume_provisioner_no_ad" -CM_VOLUME_FROM_BACKUP = "volume_provisioner_volume_from_backup" - -def canary_metric_date(): - return datetime.datetime.today().strftime('%Y-%m-%d-%H%m%S') - -def init_canary_metrics(check_oci): - if "METRICS_FILE" in os.environ: - _log("generating metrics file...") - canary_metrics = {} - canary_metrics["start_time"] = canary_metric_date() - canary_metrics[CM_SIMPLE] = 0 - canary_metrics[CM_EXT3] = 0 - canary_metrics[CM_NO_AD] = 0 - if check_oci: - canary_metrics[CM_VOLUME_FROM_BACKUP] = 0 - with open(os.environ.get("METRICS_FILE"), 'w') as metrics_file: - json.dump(canary_metrics, metrics_file, sort_keys=True, indent=4) - -def update_canary_metric(name, result): - if "METRICS_FILE" in os.environ: - _log("updating metrics fle...") - with open(os.environ.get("METRICS_FILE"), 'r') as metrics_file: - canary_metrics = json.load(metrics_file) - canary_metrics[name] = result - with open(os.environ.get("METRICS_FILE"), 'w') as metrics_file: - json.dump(canary_metrics, metrics_file, sort_keys=True, indent=4) - -def finish_canary_metrics(): - update_canary_metric("end_time", canary_metric_date()) - - -# Main ************************************************************************ -# - -======= ->>>>>>> fcfa92b6... Retrieve pirvate ip for mount target and fixed system tests def _main(): utils.reset_debug_file() args = _handle_args() @@ -716,86 +121,21 @@ def _destroy_key_files_atexit(): atexit.register(_destroy_key_files_atexit) test_id = str(uuid.uuid4())[:8] - - success = True - _storageClassFile = _create_yaml("../../examples/example-storage-class-fss.template", test_id, - mount_target_ocid=os.environ.get(MNT_TARGET_OCID)) - - _k8sResources = [_storageClassFile, - "../../dist/storage-class.yaml", "../../dist/storage-class-ext3.yaml", - "../../dist/oci-volume-provisioner-rbac.yaml", - "../../dist/oci-volume-provisioner-fss.yaml", - "../../dist/oci-volume-provisioner.yaml"] - if args['setup']: - # Cleanup in case any existing state exists in the cluster - _cleanup(k8sResources=_k8sResources, display_errors=False) - utils.log("Setting up the volume provisioner", as_banner=True) - _kubectl("-n kube-system create secret generic oci-volume-provisioner " + \ - "--from-file=config.yaml=" + _get_oci_config_file(), - exit_on_error=False) - for _res in _k8sResources: - _kubectl("create -f " + _res, exit_on_error=False) - pod_name, _, _ = _wait_for_pod_status("Running", test_id, POD_VOLUME) - compartment_id = _get_compartment_id(pod_name) - else: - compartment_id = None - + canaryMetrics = CanaryMetrics(metrics_file=os.environ.get(METRICS_FILE)) if args['teardown']: def _teardown_atexit(): utils.log("Tearing down the volume provisioner", as_banner=True) - _cleanup(k8sResources=_k8sResources) + # BlockSystemTests(test_id, args['setup']).cleanup() + FSSSystemTests(test_id, args['setup']).cleanup() + # BackupVolumeSystemTest(test_id, args['setup']).cleanup() atexit.register(_teardown_atexit) if not args['no_test']: - _log("Running system test: Simple", as_banner=True) - init_canary_metrics(args['check_oci']) - res = _test_create_volume(compartment_id, - _create_yaml("../../examples/example-claim.template", test_id, _get_region()), - "demooci-" + test_id, args['check_oci']) - update_canary_metric(CM_SIMPLE, int(res)) - success = False if res == False else success - - _log("Running system test: Ext3 file system", as_banner=True) - res = _test_create_volume(compartment_id, - _create_yaml("../../examples/example-claim-ext3.template", test_id, None), - "demooci-ext3-" + test_id, args['check_oci']) - update_canary_metric(CM_EXT3, int(res)) - success = False if res == False else success - - _log("Running system test: No AD specified", as_banner=True) - res = _test_create_volume(compartment_id, - _create_yaml("../../examples/example-claim-no-AD.template", test_id, None), - "demooci-no-ad-" + test_id, args['check_oci']) - update_canary_metric(CM_NO_AD, int(res)) - success = False if res == False else success - - _log("Running system test: Create volume with FSS", as_banner=True) - _test_create_volume(compartment_id, - _create_yaml("../../examples/example-claim-fss.template", test_id, _get_region()), - "demooci-fss-" + test_id, args['check_oci'], availability_domain=DEFAULT_AVAILABILITY_DOMAIN, - storageType=FS_STORAGE) - _log("Running system test: Create volume from backup", as_banner=True) - if args['check_oci']: - _log("Running system test: Create volume from backup", as_banner=True) - terraform_env = _get_terraform_env() - _backup_ocid, _availability_domain = _setup_create_volume_from_backup(terraform_env, test_id) - _claim_target = _create_yaml("../../examples/example-claim-from-backup.template", test_id, - region=_availability_domain.split(':')[1], backup_id=_backup_ocid) - res = _test_create_volume(compartment_id, _claim_target, - "demooci-from-backup-" + test_id, args['check_oci'], - test_id=test_id, availability_domain=_availability_domain, - verify_func=_volume_from_backup_check) - update_canary_metric(CM_VOLUME_FROM_BACKUP, int(res)) - success = False if res == False else success - _tear_down_create_volume_from_backup(terraform_env, _backup_ocid) - - if not success: - _finish_with_exit_code(1) - else: - _finish_with_exit_code(0) + BlockSystemTests(test_id=test_id, setup=args['setup'], check_oci=args['check_oci'], canaryMetrics=canaryMetrics).run() + FSSSystemTests(subnet_ocid=os.environ.get(SUBNET_OCID), test_id=test_id, setup=args['setup'], check_oci=args['check_oci'], canaryMetrics=canaryMetrics).run() + BackupVolumeSystemTest(test_id=test_id, setup=args['setup'], check_oci=args['check_oci'], canaryMetrics=canaryMetrics).run() + canaryMetrics.finish_canary_metrics() + utils.finish_with_exit_code(0) if __name__ == "__main__": _main() - - - diff --git a/examples/example-claim-ext3.template b/test/system/templates/example-claim-ext3.template similarity index 100% rename from examples/example-claim-ext3.template rename to test/system/templates/example-claim-ext3.template diff --git a/examples/example-claim-from-backup.template b/test/system/templates/example-claim-from-backup.template similarity index 100% rename from examples/example-claim-from-backup.template rename to test/system/templates/example-claim-from-backup.template diff --git a/examples/example-claim-fss.template b/test/system/templates/example-claim-fss.template similarity index 100% rename from examples/example-claim-fss.template rename to test/system/templates/example-claim-fss.template diff --git a/examples/example-claim-no-AD.template b/test/system/templates/example-claim-no-AD.template similarity index 100% rename from examples/example-claim-no-AD.template rename to test/system/templates/example-claim-no-AD.template diff --git a/examples/example-claim.template b/test/system/templates/example-claim.template similarity index 100% rename from examples/example-claim.template rename to test/system/templates/example-claim.template diff --git a/examples/example-pod-fss.template b/test/system/templates/example-pod-fss.template similarity index 90% rename from examples/example-pod-fss.template rename to test/system/templates/example-pod-fss.template index ece0687c5..2f5e14e97 100644 --- a/examples/example-pod-fss.template +++ b/test/system/templates/example-pod-fss.template @@ -17,4 +17,4 @@ spec: volumes: - name: nginx persistentVolumeClaim: - claimName: demooci-fss-{{TEST_ID}} \ No newline at end of file + claimName: demooci-fss-{{TEST_ID}} diff --git a/examples/example-replication-controller-with-volume-claim.template b/test/system/templates/example-replication-controller-with-volume-claim.template similarity index 100% rename from examples/example-replication-controller-with-volume-claim.template rename to test/system/templates/example-replication-controller-with-volume-claim.template diff --git a/examples/example-replication-controller.template b/test/system/templates/example-replication-controller.template similarity index 100% rename from examples/example-replication-controller.template rename to test/system/templates/example-replication-controller.template diff --git a/test/system/templates/example-storage-class-fss-empty.template b/test/system/templates/example-storage-class-fss-empty.template new file mode 100644 index 000000000..34a3447ed --- /dev/null +++ b/test/system/templates/example-storage-class-fss-empty.template @@ -0,0 +1,7 @@ +# Storage class to create mount target on without any additional information +# Assumes a mount target has already been created and a random one shall be chosen +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: oci-fss +provisioner: oracle.com/oci-fss diff --git a/test/system/templates/example-storage-class-fss-mnt.template b/test/system/templates/example-storage-class-fss-mnt.template new file mode 100644 index 000000000..0954ca269 --- /dev/null +++ b/test/system/templates/example-storage-class-fss-mnt.template @@ -0,0 +1,8 @@ +# Storage class with subnet OCID to create mount target on +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: oci-fss +provisioner: oracle.com/oci-fss +parameters: + mntTargetId: {{MNT_TARGET_OCID}} diff --git a/examples/example-storage-class-fss.template b/test/system/templates/example-storage-class-fss-subnet.template similarity index 58% rename from examples/example-storage-class-fss.template rename to test/system/templates/example-storage-class-fss-subnet.template index 9b1a87a4f..743e216fa 100644 --- a/examples/example-storage-class-fss.template +++ b/test/system/templates/example-storage-class-fss-subnet.template @@ -1,7 +1,9 @@ + +# Storage class with subnet OCID to create mount target on kind: StorageClass apiVersion: storage.k8s.io/v1beta1 metadata: name: oci-fss provisioner: oracle.com/oci-fss parameters: - subnetId: {{MNT_TARGET_OCID}} \ No newline at end of file + subnetId: {{SUBNET_OCID}} diff --git a/test/system/utils.py b/test/system/utils.py index aea35fb59..3d46d02aa 100644 --- a/test/system/utils.py +++ b/test/system/utils.py @@ -24,6 +24,7 @@ DEBUG_FILE = "runner.log" REPORT_DIR_PATH="/tmp/results" REPORT_FILE="done" +TMP_KUBECONFIG = "/tmp/kubeconfig.conf" def _banner(as_banner, bold): if as_banner: @@ -74,9 +75,9 @@ def finish_with_exit_code(exit_code, write_report=True, report_dir_path=REPORT_D _debug_file("\nTest Suite Failed\n") time.sleep(3) copyfile(DEBUG_FILE, report_dir_path + "/" + DEBUG_FILE) - with open(report_dir_path + "/" + report_file, "w+") as file: + with open(report_dir_path + "/" + report_file, "w+") as file: file.write(str(report_dir_path + "/" + DEBUG_FILE)) - sys.exit(exit_code) + sys.exit(exit_code) def reset_debug_file(): if os.path.exists(DEBUG_FILE): @@ -104,4 +105,19 @@ def run_command(cmd, cwd, display_errors=True): log(" stdout: " + stdout) log(" stderr: " + stderr) log(" result: " + str(returncode)) - return (stdout, stderr, returncode) \ No newline at end of file + return (stdout, stderr, returncode) + +def _get_kubeconfig(): + return os.environ['KUBECONFIG'] if "KUBECONFIG" in os.environ else TMP_KUBECONFIG + +def kubectl(action, exit_on_error=True, display_errors=True, log_stdout=True): + if "KUBECONFIG" not in os.environ and "KUBECONFIG_VAR" not in os.environ: + (stdout, _, returncode) = run_command("kubectl " + action, ".", display_errors) + else: + (stdout, _, returncode) = run_command("KUBECONFIG=" + _get_kubeconfig() + " kubectl " + action, ".", display_errors) + if exit_on_error and returncode != 0: + log("Error running kubectl") + finish_with_exit_code(1) + if log_stdout: + log(stdout) + return stdout diff --git a/test/system/vol_provisioner_system_test.py b/test/system/vol_provisioner_system_test.py new file mode 100644 index 000000000..3710058f1 --- /dev/null +++ b/test/system/vol_provisioner_system_test.py @@ -0,0 +1,382 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import utils +import time +import oci +import yaml +import os +import json +import datetime +import sys +import re +from yaml_utils import PopulateYaml + + +def _retrieve_metrics(test_func): + def wrapper(self, *args, **kwargs): + _canaryMetricName = kwargs.pop(self.CANARY_METRIC_NAME) + _res = test_func(self, *args, **kwargs) + if self._canaryMetrics: + self._canaryMetrics.update_canary_metric(_canaryMetricName, int(_res)) + return _res + return wrapper + +class VolumeProvisionerSystemTestInterface(object): + + TERRAFORM_CLUSTER = "terraform/cluster" + TERRAFORM_DIR = "terraform" + # Variable name correspond to the ones found in the terraform config file + TERRAFORM_AVAILABILITY_DOMAIN = "availability_domain" + TERRAFORM_VOLUME_OCID = "volume_ocid" + POD_CONTROLLER = "controller" + POD_VOLUME = "volume" + BLOCK_STORAGE = "block" + FS_STORAGE = "fileSystem" + TIMEOUT = 600 + BOUND = "Bound" + TMP_OCI_API_KEY_FILE = "/tmp/oci_api_key.pem" + TMP_OCICONFIG = "/tmp/ociconfig" + LIFECYCLE_STATE_ON = {BLOCK_STORAGE: 'AVAILABLE', + FS_STORAGE: 'ACTIVE'} + LIFECYCLE_STATE_OFF = {BLOCK_STORAGE: 'TERMINATED', + FS_STORAGE:'DELETED'} + DEFAULT_AVAILABILITY_DOMAIN="NWuj:PHX-AD-2" + CANARY_METRIC_NAME="canaryMetricName" + + def __init__(self, test_id=None, setup=False, compartment_id=None, check_oci=False, k8Resources=None, canaryMetrics=None): + '''@param test_id: Id of currently running test + @type test_id: C{Str} + @param setup: Flag that indicated whether the provisioner needs to be setup on the cluster + @type setup: C{Bool} + @param compartment_id: Compartment Id to use to creaate/delete resources + @type compartment_id: C{Str} + @param check_oci: 'Check with OCI that the volumes have been created/destroyed (requires --setup)' + @type check_oci: C{Bool} + ''' + self._test_id = test_id if test_id else "demotest" + self._setup = setup + self._compartment_id = compartment_id + self._region = self._get_region() + self._check_oci = check_oci + self._oci_config = self._get_oci_config() if check_oci else None + self._terraform_env = self._get_terraform_env() + self._k8sResources = k8Resources if k8Resources else [] + self._canaryMetrics = canaryMetrics + self._testSuccess=True + + def run(self): + if self._setup: + # Cleanup in case any existing state exists in the cluster + self.cleanup(display_errors=False) + utils.log("Setting up the volume provisioner", as_banner=True) + utils.kubectl("-n kube-system create secret generic oci-volume-provisioner " + \ + "--from-file=config.yaml=" + self._get_oci_config_file(), + exit_on_error=False) + for _res in self._k8sResources: + utils.kubectl("create -f " + _res, exit_on_error=False) + pod_name, _, _ = self._wait_for_pod_status("Running", self.POD_VOLUME) + self._compartment_id = self._get_compartment_id(pod_name) + def cleanup(self, exit_on_error=False, display_errors=True): + for _res in self._k8sResources: + utils.kubectl("delete -f " + _res, exit_on_error, display_errors) + + def _checkTestSuccess(self): + '''Check whether any tests failed or not''' + if not self._testSuccess: + utils.finish_with_exit_code(1) + + @staticmethod + def _get_region(): + nodes_json = utils.kubectl("get nodes -o json", log_stdout=False) + nodes = json.loads(nodes_json) + for node in nodes['items']: + return node['metadata']['labels']['failure-domain.beta.kubernetes.io/zone'] + utils.log("Region lookup failed") + utils.finish_with_exit_code(1) + + def _get_oci_config_file(self): + return os.environ['OCICONFIG'] if "OCICONFIG" in os.environ else self.TMP_OCICONFIG + + def _get_oci_config(self): + config = dict(oci.config.DEFAULT_CONFIG) + oci_config_file = self._get_oci_config_file() + with open(oci_config_file, 'r') as stream: + try: + cnf = yaml.load(stream) + config["user"] = cnf['auth']['user'] + config["tenancy"] = cnf['auth']['tenancy'] + config["fingerprint"] = cnf['auth']['fingerprint'] + config["key_file"] = self.TMP_OCI_API_KEY_FILE + config["region"] = cnf['auth']['region'] + return config + except yaml.YAMLError: + utils.log("Error. Failed to parse oci config file " + oci_config_file) + utils.finish_with_exit_code(1) + + def _get_terraform_env(self): + timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') + return "TF_VAR_test_id=" + timestamp + + def _terraform(self, action, cwd): + '''Execute terraform command''' + (stdout, _, returncode) = utils.run_command(self._terraform_env + " terraform " + action, cwd) + if returncode != 0: + utils.log("Error running terraform") + sys.exit(1) + return stdout + + def _get_terraform_output_var(self, var_name): + '''Retrieve variable value from terraform output from state file + @param var_name: Name of variable to retrieve from output + @type var_name: C{Str} + @return: Value of variable + @rtype: C{Str}''' + output = self._terraform("output -json", self.TERRAFORM_DIR,) + jsn = json.loads(output) + return jsn[var_name]["value"] + + def _get_volume_name(self): + '''Retrieve volume name from terraform status output + @param terraform_env: Terraform test id + @type terraform_env: C{Str} + @return: Volume OCID + @rtype: C{Str}''' + _ocid = self._get_terraform_output_var(self.TERRAFORM_VOLUME_OCID).split('.') + return _ocid[len(_ocid)-1] + + def _get_volume(self, volume_name): + stdout = utils.kubectl("get PersistentVolumeClaim -o wide") + for line in stdout.split("\n"): + line_array = line.split() + if len(line_array) >= 3: + name = line_array[0] + status = line_array[1] + if name == volume_name and status == self.BOUND: + return line_array[2] + + def _get_volume_and_wait(self, volume_name): + num_polls = 0 + volume = self._get_volume(volume_name) + while not volume: + utils.log(" waiting...") + time.sleep(1) + num_polls += 1 + if num_polls == self.TIMEOUT: + return False + volume = self._get_volume(volume_name) + return volume + + @staticmethod + def _get_json_doc(response): + decoder = json.JSONDecoder() + try: + doc = decoder.decode(response) + except (ValueError, UnicodeError) as _: + utils.log('Invalid JSON in response: %s' % str(response)) + utils.finish_with_exit_code(1) + return doc + + def _volume_exists(self, volume, state, compartment_id=None, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): + '''Verify whether the volume is available or not + @param storageType: Storage type to search for volumes in + @type storageType: C{Str} + @param availability_domain: Availability domain to look in for + @type availability_domain: C{Str}''' + if compartment_id is None: + compartment_id = self._compartment_id + if storageType == self.BLOCK_STORAGE: + utils.log("Retrieving block volumes for compartmentID %s" % compartment_id) + client = oci.core.blockstorage_client.BlockstorageClient(self._oci_config) + if backup: + volumes= oci.pagination.list_call_get_all_results(client.list_volume_backups, compartment_id) + else: + volumes = oci.pagination.list_call_get_all_results(client.list_volumes, compartment_id) + else: + utils.log("Retrieving file systems") + client = oci.file_storage.FileStorageClient(self._oci_config) + volumes = oci.pagination.list_call_get_all_results(client.list_file_systems, compartment_id, + availability_domain) + utils.log("Getting status for volume %s" % volume) + for vol in self._get_json_doc(str(volumes.data)): + if vol['id'].endswith(volume) and vol['lifecycle_state'] == state: + return True + return False + + def _wait_for_volume(self, volume, state, compartment_id=None, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): + num_polls = 0 + while not self._volume_exists(volume, state, compartment_id=compartment_id, backup=backup, storageType=storageType, + availability_domain=availability_domain,): + utils.log(" waiting...") + time.sleep(1) + num_polls += 1 + if num_polls == self.TIMEOUT: + return False + return True + + def _wait_for_volume_to_create(self, volume, compartment_id=None, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): + return self._wait_for_volume(volume, self.LIFECYCLE_STATE_ON[storageType], compartment_id=compartment_id, backup=backup, storageType=storageType, + availability_domain=availability_domain) + + + def _wait_for_volume_to_delete(self, volume, compartment_id=None, backup=False, storageType=BLOCK_STORAGE, availability_domain=None): + return self._wait_for_volume(volume, self.LIFECYCLE_STATE_OFF[storageType], compartment_id=compartment_id, backup=backup, storageType=storageType, + availability_domain=availability_domain) + + @_retrieve_metrics + def _test_create_volume(self, claim_target, claim_volume_name, availability_domain=None, verify_func=None, + storageType=BLOCK_STORAGE, canaryMetricName=None): + '''Test making a volume claim from a configuration file + @param backup_ocid: Verify whether the volume created from a backup contains backup info + @type backup_ocid: C{Str}''' + utils.kubectl("create -f " + claim_target, exit_on_error=False) + + volume = self._get_volume_and_wait(claim_volume_name) + utils.log("Created volume with name: %s" % str(volume)) + + if self._check_oci: + utils.log("Querying the OCI api to make sure a volume with this name exists...") + if not self._wait_for_volume_to_create(volume, storageType=storageType, + availability_domain=availability_domain): + utils.log("Failed to find volume with name: " + volume) + return False + utils.log("Volume: " + volume + " is present and available") + + if verify_func: + verify_func(self._test_id, availability_domain, volume) + utils.log("Delete the volume claim") + utils.kubectl("delete -f " + claim_target, exit_on_error=False) + + if self._check_oci: + utils.log("Querying the OCI api to make sure a volume with this name now doesnt exist...") + self._wait_for_volume_to_delete(volume, storageType=storageType, + availability_domain=availability_domain) + if not self._volume_exists(volume, self.LIFECYCLE_STATE_OFF[storageType], + compartment_id=self._compartment_id, storageType=storageType, + availability_domain=availability_domain): + utils.log("Volume with name: " + volume + " still exists") + return False + utils.log("Volume: " + volume + " has now been terminated") + return True + + def _create_rc_or_pod(self, config, availability_domain, volume_name="default_volume"): + '''Create replication controller or pod and wait for it to start + @param rc_config: Replication controller configuration file to patch + @type rc_config: C{Str} + @param availability_domain: Availability domain to start rc in + @type availability_domain: C{Str} + @param volume_name: Volume name used by the replication controller + @type volume_name: C{Str} + @return: Tuple containing the name of the created rc and its config file + @rtype: C{Tuple}''' + _config = PopulateYaml(config, self._test_id, volume_name=volume_name, availability_domain=availability_domain).generateFile() + utils.log("Starting the replication controller (creates a single nginx pod).") + utils.kubectl("delete -f " + _config, exit_on_error=False, display_errors=False) + utils.kubectl("create -f " + _config) + utils.log("Waiting for the pod to start.") + _name, _, _ = self._wait_for_pod_status("Running", self.POD_CONTROLLER) + return _name, _config + + def _wait_for_pod_status(self, desired_status, pod_type): + '''Wait until the pod gets to the desired status + @param desired_status: Status to wait for + @type desired_status: C{Str} + @param pod_type: Pod type to query + @type pod_type: C{Str} + @return: Tuple containing the name of the resource, its status and the + node it's running on + @rtype: C{Tuple}''' + infos = self._get_pod_infos(pod_type) + num_polls = 0 + while not any(i[1] == desired_status for i in infos): + for i in infos: + utils.log(" - pod: " + i[0] + ", status: " + i[1] + ", node: " + i[2]) + time.sleep(1) + num_polls += 1 + if num_polls == self.TIMEOUT: + for i in infos: + utils.log("Error: Pod: " + i[0] + " " + + "failed to achieve status: " + desired_status + "." + + "Final status was: " + i[1]) + sys.exit(1) + infos = self._get_pod_infos(pod_type) + for i in infos: + if i[1] == desired_status: + return (i[0], i[1], i[2]) + # Should never get here. + return (None, None, None) + + def _get_pod_infos(self, pod_type): + '''Retrieve pod information from kube-system + @param pod_type: Pod type to search for + @type pod_type: C{Str} + @return: Tuple containing the name of the resource, its status and the + node it's running on + @rtype: C{Tuple}''' + _namespace = "-n kube-system" if pod_type == self.POD_VOLUME else "" + stdout = utils.kubectl(_namespace + " get pods -o wide") + infos = [] + for line in stdout.split("\n"): + line_array = line.split() + if len(line_array) > 0: + name = line_array[0] + if name.startswith('oci-volume-provisioner') and pod_type == self.POD_VOLUME: + status = line_array[2] + node = line_array[6] + infos.append((name, status, node)) + if re.match(r"nginx-controller-" + self._test_id + ".*", line) and pod_type == self.POD_CONTROLLER: + name = line_array[0] + status = line_array[2] + node = line_array[6] + infos.append((name, status, node)) + if re.match(r"demooci-fss-pod-" + self._test_id + ".*", line) and pod_type == self.POD_CONTROLLER: + name = line_array[0] + status = line_array[2] + node = line_array[6] + infos.append((name, status, node)) + return infos + + def _get_compartment_id(self, pod_name): + '''Gets the oci compartment_id from the oci-volume-provisioner pod host. + This is where oci volume resources will be created.''' + result = utils.kubectl("-n kube-system exec %s -- curl -s http://169.254.169.254/opc/v1/instance/" % pod_name, + exit_on_error=False, log_stdout=False) + result_json = self._get_json_doc(str(result)) + compartment_id = result_json["compartmentId"] + return compartment_id + + @staticmethod + def _create_file_via_replication_controller(rc_name, file_name="hello.txt"): + '''Create file via the replication controller + @param rcName: Name of the replication controller to write data to + @type rcName: C{Str} + @param fileName: Name of file to create + @type fileName: C{Str}''' + utils.kubectl("exec " + rc_name + " -- touch /usr/share/nginx/html/" + file_name) + + @staticmethod + def _verify_file_existance_via_replication_controller(rc_name, file_name="hello.txt"): + '''Verify whether file exists via the replication controller + @param rcName: Name of the replication controller to verify + @type rcName: C{Str} + @param fileName: Name of file to create + @type fileName: C{Str}''' + utils.log("Does the new file exist?") + stdout = utils.kubectl("exec " + rc_name + " -- ls /usr/share/nginx/html") + if file_name not in stdout.split("\n"): + utils.log("Error: Failed to find file %s in mounted volume" % file_name) + sys.exit(1) + utils.log("Yes it does!") diff --git a/test/system/yaml_utils.py b/test/system/yaml_utils.py new file mode 100644 index 000000000..e701d5015 --- /dev/null +++ b/test/system/yaml_utils.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python + +# Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import utils + +class PopulateYaml(): + + TEST_ID = "{{TEST_ID}}" + REGION = "{{REGION}}" + BACKUP_ID = "{{BACKUP_ID}}" + MNT_TARGET_OCID = "{{MNT_TARGET_OCID}}" + SUBNET_OCID = "{{SUBNET_OCID}}" + VOLUME_NAME = "{{VOLUME_NAME}}" + AVAILABILITY_DOMAIN = "{{AVAILABILITY_DOMAIN}}" + TEMPLATE_ELEMENTS = {'_test_id': TEST_ID, '_region': REGION, + '_backup_id': BACKUP_ID, '_mount_target_ocid': MNT_TARGET_OCID, + '_subnet_ocid': SUBNET_OCID, '_volume_name': VOLUME_NAME, + '_availability_domain': AVAILABILITY_DOMAIN} + + def __init__(self, template_file, test_id, region=None, backup_id=None, + mount_target_ocid=None, subnet_ocid=None, volume_name=None, availability_domain=None): + '''@param template: Name of file to use as template + @type template: C{Str} + @param test_id: Used for tagging resources with test id + @type test_id: C{Str} + @param region: Used for selecting resources from specified region + @type region: C{Str} + @param backup_id: Backup id to create PVC from + @type backup_id: C{Str} + @param mount_target_ocid: Mount target OCID to populate config with + @type mount_target_ocid: C{Str} + @param volume_name: Name used to create volume + @type volume_name: C{Str} + @param availability_domain: Availability domain (used for pvc) + @type availability_domain: C{Str}''' + self._template_file = template_file + self._test_id = test_id + self._region = region + self._backup_id = backup_id + self._mount_target_ocid = mount_target_ocid + self._subnet_ocid = subnet_ocid + self._volume_name = volume_name + # yaml config does not allow ':' + self._availability_domain = availability_domain.replace(':', '-') if availability_domain else None + + def generateFile(self): + '''Generate yaml based on the given template and fill in additional details + @return: Name of generated config file + @rtype: C{Str}''' + yaml_file = self._template_file + ".yaml" + with open(self._template_file, "r") as sources: + lines = sources.readlines() + with open(yaml_file, "w") as sources: + for line in lines: + patched_line = line + for _elem, _elemName in self.TEMPLATE_ELEMENTS.iteritems(): + if getattr(self, _elem) is not None: + patched_line = re.sub(_elemName, getattr(self, _elem), patched_line) + elif _elemName in [self.MNT_TARGET_OCID, self.SUBNET_OCID] and _elemName in patched_line: + # Remove lines from config files if attribute is not specified + utils.log("%s not specified. Removing reference from config" % _elemName) + patched_line = "" + sources.write(patched_line) + return yaml_file + + +