diff --git a/drivers/backup/auth.go b/drivers/backup/auth.go index e930cca1f..ef1bbcd56 100644 --- a/drivers/backup/auth.go +++ b/drivers/backup/auth.go @@ -379,6 +379,7 @@ const ( ApplicationOwner PxBackupRole = "px-backup-app.admin" ApplicationUser = "px-backup-app.user" InfrastructureOwner = "px-backup-infra.admin" + SuperAdmin = "px-backup-super.admin" DefaultRoles = "default-roles-master" ) diff --git a/drivers/backup/backup.go b/drivers/backup/backup.go index f4afef89d..86347e9a2 100644 --- a/drivers/backup/backup.go +++ b/drivers/backup/backup.go @@ -159,6 +159,12 @@ type Cluster interface { // DeleteCluster deletes a cluster object DeleteCluster(ctx context.Context, req *api.ClusterDeleteRequest) (*api.ClusterDeleteResponse, error) + // ShareCluster Share a cluster object + ShareCluster(ctx context.Context, req *api.ShareClusterRequest) (*api.ShareClusterResponse, error) + + // UnShareCluster UnShare a cluster object + UnShareCluster(ctx context.Context, req *api.UnShareClusterRequest) (*api.UnShareClusterResponse, error) + // ClusterUpdateBackupShare updates ownership details for backup share at cluster ClusterUpdateBackupShare(ctx context.Context, req *api.ClusterBackupShareUpdateRequest) (*api.ClusterBackupShareUpdateResponse, error) diff --git a/drivers/backup/portworx/portworx.go b/drivers/backup/portworx/portworx.go index b338471e7..593264fa3 100644 --- a/drivers/backup/portworx/portworx.go +++ b/drivers/backup/portworx/portworx.go @@ -407,6 +407,14 @@ func (p *portworx) DeleteCluster(ctx context.Context, req *api.ClusterDeleteRequ return p.clusterManager.Delete(ctx, req) } +func (p *portworx) ShareCluster(ctx context.Context, req *api.ShareClusterRequest) (*api.ShareClusterResponse, error) { + return p.clusterManager.ShareCluster(ctx, req) +} + +func (p *portworx) UnShareCluster(ctx context.Context, req *api.UnShareClusterRequest) (*api.UnShareClusterResponse, error) { + return p.clusterManager.UnShareCluster(ctx, req) +} + func (p *portworx) ClusterUpdateBackupShare(ctx context.Context, req *api.ClusterBackupShareUpdateRequest) (*api.ClusterBackupShareUpdateResponse, error) { reqInterface, err := p.SetMissingClusterUID(ctx, req) if err != nil { diff --git a/tests/backup/backup_test_labels.go b/tests/backup/backup_test_labels.go index 2513aeef3..993ccaa2e 100644 --- a/tests/backup/backup_test_labels.go +++ b/tests/backup/backup_test_labels.go @@ -136,6 +136,7 @@ const ( PSALowerPrivilegeToHigherPrivilegeWithProjectMapping TestCaseName = "PSALowerPrivilegeToHigherPrivilegeWithProjectMapping" AzureCloudAccountCreationWithMandatoryAndNonMandatoryFields TestCaseName = "AzureCloudAccountCreationWithMandatoryAndNonMandatoryFields" AzureCloudAccountForLockedBucket TestCaseName = "AzureCloudAccountForLockedBucket" + ClusterShare TestCaseName = "ClusterShare" ) // Test case labels @@ -271,6 +272,7 @@ const ( PSALowerPrivilegeToHigherPrivilegeWithProjectMappingLabel TestCaseLabel = "PSALowerPrivilegeToHigherPrivilegeWithProjectMapping" AzureCloudAccountCreationWithMandatoryAndNonMandatoryFieldsLabel TestCaseLabel = "AzureCloudAccountCreationWithMandatoryAndNonMandatoryFields" AzureCloudAccountForLockedBucketLabel TestCaseLabel = "AzureCloudAccountForLockedBucket" + ClusterShareLabel TestCaseLabel = "ClusterShare" ) // Common Labels @@ -536,4 +538,5 @@ var TestCaseLabelsMap = map[TestCaseName][]TestCaseLabel{ PartialBackupWithLowerStorkVersion: {PartialBackupWithLowerStorkVersionLabel, PartialBackupLabel}, PartialBackupSuccessWithAzureEndpoint: {PartialBackupSuccessWithAzureEndpointLabel, PartialBackupLabel, AzureBackupLocationLabel}, PSALowerPrivilegeToHigherPrivilegeWithProjectMapping: {PSALowerPrivilegeToHigherPrivilegeWithProjectMappingLabel, rkePipelineNightly}, + ClusterShare: {ClusterShareLabel}, } diff --git a/tests/backup/func_backup_cluster_share_test.go b/tests/backup/func_backup_cluster_share_test.go new file mode 100644 index 000000000..338bccf6c --- /dev/null +++ b/tests/backup/func_backup_cluster_share_test.go @@ -0,0 +1,413 @@ +package tests + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + "github.com/pborman/uuid" + api "github.com/portworx/px-backup-api/pkg/apis/v1" + "github.com/portworx/torpedo/drivers/backup" + "github.com/portworx/torpedo/drivers/scheduler" + "github.com/portworx/torpedo/pkg/log" + . "github.com/portworx/torpedo/tests" +) + +var _ = Describe("{ClusterShare}", Label(TestCaseLabelsMap[ClusterShare]...), func() { + type user struct { + name string + ctx context.Context + } + + var ( + scheduledAppContexts []*scheduler.Context + bkpNamespaces []string + providers []string = GetBackupProviders() + infraAdminRole backup.PxBackupRole = backup.InfrastructureOwner + superAdminRole backup.PxBackupRole = backup.SuperAdmin + cloudCredName string + cloudCredUID string + backupLocationUID string + bkpLocationName string + backupLocationMap map[string]string + testUsers map[int]user + adminContext context.Context + ) + + JustBeforeEach(func() { + backupLocationMap = make(map[string]string) + scheduledAppContexts = make([]*scheduler.Context, 0) + bkpNamespaces = make([]string, 0) + cloudCredName, cloudCredUID, backupLocationUID, bkpLocationName = "", "", "", "" + + // Schedule an Application + appContexts := ScheduleApplications(TaskNamePrefix) + for _, ctx := range appContexts { + ctx.ReadinessTimeout = AppReadinessTimeout + namespace := GetAppNamespace(ctx, TaskNamePrefix) + bkpNamespaces = append(bkpNamespaces, namespace) + scheduledAppContexts = append(scheduledAppContexts, ctx) + } + + // Validate the application + ValidateApplications(scheduledAppContexts) + + // Create two test users with infra admin role + testUsers = map[int]user{1: {name: "testuser1"}, 2: {name: "testuser2"}} + log.InfoD("Creating %d users", 2) + for i := 1; i <= 2; i++ { + userName := testUsers[i].name + firstName := fmt.Sprintf("FirstName%v", i) + lastName := fmt.Sprintf("LastName%v", i) + email := fmt.Sprintf("testuser%v@cnbu.com", i) + + err := backup.AddUser(userName, firstName, lastName, email, CommonPassword) + log.FailOnError(err, "Failed to create user - %s", userName) + + err = backup.AddRoleToUser(userName, infraAdminRole, fmt.Sprintf("Adding %v role to %s", infraAdminRole, userName)) + log.FailOnError(err, "Failed to add role %s to the user %s", infraAdminRole, userName) + + ctxNonAdmin, err := backup.GetNonAdminCtx(userName, CommonPassword) + log.FailOnError(err, "Fetching non admin ctx") + user := testUsers[i] + user.ctx = ctxNonAdmin + testUsers[i] = user + } + + // Create backup location and cloud setting with admin user + var err error + adminContext, err = backup.GetAdminCtxFromSecret() + log.FailOnError(err, "Fetching admin user ctx") + log.InfoD("Creating backup location and cloud setting with admin user") + for _, provider := range providers { + cloudCredName = fmt.Sprintf("%s-%s-%v", "cred", provider, time.Now().Unix()) + bkpLocationName = fmt.Sprintf("autogenerated-backup-location-%v", time.Now().Unix()) + cloudCredUID = uuid.New() + backupLocationUID = uuid.New() + backupLocationMap[backupLocationUID] = bkpLocationName + err := CreateCloudCredential(provider, cloudCredName, cloudCredUID, BackupOrgID, adminContext) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of cloud credential named [%s] for org [%s] with [%s] as provider", cloudCredName, BackupOrgID, provider)) + err = CreateBackupLocationWithContext(provider, bkpLocationName, backupLocationUID, cloudCredName, cloudCredUID, getGlobalBucketName(provider), BackupOrgID, "", adminContext, true) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creating backup location %s", bkpLocationName)) + } + + log.InfoD("Sharing backup location with user2") + err = AddBackupLocationOwnership(bkpLocationName, backupLocationUID, []string{testUsers[2].name}, nil, Read, Invalid, adminContext) + log.FailOnError(err, "failed to update backup location [%v] ownership", bkpLocationName) + }) + + // This testcase verifies whether the restores created/owned by the user were deleted during the cluster un-share. + It("VerifyRestoreObjectsAreDeletedCreatedByNonSuperAdmin", func() { + StartPxBackupTorpedoTest("VerifyRestoreObjectsAreDeletedCreatedByNonSuperAdmin", "VerifyRestoreObjectsAreDeletedCreatedByNonSuperAdmin during the cluster unshare", nil, 301039, Sgajawada, Q2FY25) + + var ( + backupName = fmt.Sprintf("%s-%v", BackupNamePrefix, time.Now().Unix()) + restoreName = fmt.Sprintf("%s-%v", RestoreNamePrefix, time.Now().Unix()) + namespaceMapping = make(map[string]string) + clusterUid string + restoreUID string + ) + + Step("Create a cluster object with User1 and Share with User2", func() { + err := AddSourceCluster(testUsers[1].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of source [%s] cluster with %s ctx", SourceClusterName, testUsers[1].name)) + + clusterUid, err = Inst().Backup.GetClusterUID(testUsers[1].ctx, BackupOrgID, SourceClusterName) + dash.VerifyFatal(err, nil, fmt.Sprintf("Fetching [%s] cluster uid", SourceClusterName)) + + // Share the source cluster to testuser2 + err = ShareCluster(SourceClusterName, clusterUid, nil, []string{testUsers[2].name}, testUsers[1].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying share of source [%s] cluster with %s user using %s ctx", SourceClusterName, testUsers[2].name, testUsers[1].name)) + }) + + Step("Create a backup and restore it on the same cluster(Shared Cluster) with user(User2)", func() { + // Take Backup + log.InfoD(fmt.Sprintf("Taking backup of multiple namespaces [%v]", bkpNamespaces)) + err := CreateBackup(backupName, SourceClusterName, bkpLocationName, backupLocationUID, bkpNamespaces, nil, BackupOrgID, clusterUid, "", "", "", "", testUsers[2].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creation of backup [%s]", backupName)) + + // Create Restore + for _, namespace := range bkpNamespaces { + restoredNameSpace := fmt.Sprintf("%s-%s", RandomString(10), "restored") + namespaceMapping[namespace] = restoredNameSpace + } + log.InfoD("Namespace mapping is %v:", namespaceMapping) + err = CreateRestoreWithClusterUID(restoreName, backupName, namespaceMapping, SourceClusterName, clusterUid, BackupOrgID, testUsers[2].ctx, make(map[string]string)) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creation of restore [%s]", restoreName)) + restoreUID, err = Inst().Backup.GetRestoreUID(testUsers[2].ctx, restoreName, BackupOrgID) + log.FailOnError(err, "failed to get restore %s uid", restoreName) + + }) + + Step("User1 UnShares the cluster from User2", func() { + // Un-Share the source cluster with testuser2 + err := UnShareCluster(SourceClusterName, clusterUid, nil, []string{testUsers[2].name}, testUsers[1].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying Unshare of source [%s] cluster with %s user using %s ctx", SourceClusterName, testUsers[2].name, testUsers[1].name)) + }) + + Step("Validate the restore object", func() { + log.InfoD("validating the restore object") + restoreInspectRequest := &api.RestoreInspectRequest{ + Name: restoreName, + OrgId: BackupOrgID, + Uid: restoreUID, + } + _, err := Inst().Backup.InspectRestore(testUsers[2].ctx, restoreInspectRequest) + log.FailOnNoError(err, "inspect restore %s", restoreName) + }) + + Step("Cleanup", func() { + backupDriver := Inst().Backup + backupUID, err := backupDriver.GetBackupUID(testUsers[2].ctx, backupName, BackupOrgID) + log.FailOnError(err, "Failed while trying to get backup UID for - [%s]", backupName) + + log.InfoD("Deleting backup") + _, err = DeleteBackup(backupName, backupUID, BackupOrgID, testUsers[2].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting backup [%s]", backupName)) + + err = DeleteClusterWithUID(SourceClusterName, clusterUid, BackupOrgID, testUsers[1].ctx, false) + dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Cluster [%s]", SourceClusterName)) + }) + }) + // This testcase verifies whether the restores created/owned by the user with super-admin role were not deleted during the cluster un-share. + It("VerifyRestoreObjectsAreNotDeletedCreatedBySuperAdmin", func() { + StartPxBackupTorpedoTest("VerifyRestoreObjectsAreNotDeletedCreatedBySuperAdmin", "VerifyRestoreObjectsAreNotDeletedCreatedBySuperAdmin during the cluster unshare", nil, 301041, Sgajawada, Q2FY25) + + var ( + backupName = fmt.Sprintf("%s-%v", BackupNamePrefix, time.Now().Unix()) + restoreName = fmt.Sprintf("%s-%v", RestoreNamePrefix, time.Now().Unix()) + namespaceMapping = make(map[string]string) + clusterUid string + restoreUID string + ) + + Step("Update testuser2 with super admin role", func() { + err := backup.AddRoleToUser(testUsers[2].name, superAdminRole, fmt.Sprintf("Adding %v role to %s", infraAdminRole, testUsers[2].name)) + log.FailOnError(err, "failed to add role %s to the user %s", superAdminRole, testUsers[2].name) + + ctxNonAdmin, err := backup.GetNonAdminCtx(testUsers[2].name, CommonPassword) + log.FailOnError(err, "Fetching non admin ctx") + user := testUsers[2] + user.ctx = ctxNonAdmin + testUsers[2] = user + }) + + Step("Create a cluster object with User1 and Share with User2", func() { + err := AddSourceCluster(testUsers[1].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of source [%s] cluster with %s ctx", SourceClusterName, testUsers[1].name)) + clusterUid, err = Inst().Backup.GetClusterUID(testUsers[1].ctx, BackupOrgID, SourceClusterName) + dash.VerifyFatal(err, nil, fmt.Sprintf("Fetching [%s] cluster uid", SourceClusterName)) + + // Share the source cluster to testuser2 + err = ShareCluster(SourceClusterName, clusterUid, nil, []string{testUsers[2].name}, testUsers[1].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying share of source [%s] cluster with %s user using %s ctx", SourceClusterName, testUsers[2].name, testUsers[1].name)) + }) + + Step("Create a backup and restore it on the same cluster(Shared Cluster) with user(User2)", func() { + // Take Backup + log.InfoD(fmt.Sprintf("Taking backup of multiple namespaces [%v]", bkpNamespaces)) + err := CreateBackup(backupName, SourceClusterName, bkpLocationName, backupLocationUID, bkpNamespaces, nil, BackupOrgID, clusterUid, "", "", "", "", testUsers[2].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creation of backup [%s]", backupName)) + + // Create Restore + for _, namespace := range bkpNamespaces { + restoredNameSpace := fmt.Sprintf("%s-%s", RandomString(10), "restored") + namespaceMapping[namespace] = restoredNameSpace + } + log.InfoD("Namespace mapping is %v:", namespaceMapping) + err = CreateRestoreWithClusterUID(restoreName, backupName, namespaceMapping, SourceClusterName, clusterUid, BackupOrgID, testUsers[2].ctx, make(map[string]string)) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creation of restore [%s]", restoreName)) + restoreUID, err = Inst().Backup.GetRestoreUID(testUsers[2].ctx, restoreName, BackupOrgID) + log.FailOnError(err, "failed to get restore %s uid", restoreName) + }) + + Step("User1 UnShares the cluster from User2", func() { + // Un-Share the source cluster with testuser2 + err := UnShareCluster(SourceClusterName, clusterUid, nil, []string{testUsers[2].name}, testUsers[1].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying Unshare of source [%s] cluster with %s user using %s ctx", SourceClusterName, testUsers[2].name, testUsers[1].name)) + }) + + Step("Validate the restore object", func() { + log.InfoD("validating the restore object") + restoreInspectRequest := &api.RestoreInspectRequest{ + Name: restoreName, + OrgId: BackupOrgID, + Uid: restoreUID, + } + _, err := Inst().Backup.InspectRestore(testUsers[2].ctx, restoreInspectRequest) + log.FailOnError(err, "inspect restore %s", restoreName) + }) + + Step("Cleanup", func() { + backupDriver := Inst().Backup + backupUID, err := backupDriver.GetBackupUID(testUsers[2].ctx, backupName, BackupOrgID) + log.FailOnError(err, "Failed while trying to get backup UID for - [%s]", backupName) + + log.InfoD("Deleting backup") + _, err = DeleteBackup(backupName, backupUID, BackupOrgID, testUsers[2].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting backup [%s]", backupName)) + + log.InfoD(fmt.Sprintf("Delete restore with name [%s]", restoreName)) + err = DeleteRestore(restoreName, BackupOrgID, testUsers[2].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting restore [%s]", restoreName)) + + err = DeleteClusterWithUID(SourceClusterName, clusterUid, BackupOrgID, testUsers[1].ctx, false) + dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Cluster [%s]", SourceClusterName)) + }) + + }) + // This testcase verifies the cluster un-share operation when there is a backupschedule. + It("VerifyClusterUnShareWhenBackupSchedulesAreDeleted", func() { + StartPxBackupTorpedoTest("VerifyClusterUnShareWhenBackupSchedulesAreDeleted", "VerifyClusterUnShareWhenBackupSchedulesAreDeleted during the cluster unshare", nil, 301040, Sgajawada, Q2FY25) + + var ( + scheduleName string + periodicSchedulePolicyName string + periodicSchedulePolicyUid string + clusterUid string + ) + + Step("Create a cluster object with User1 and Share with User2", func() { + err := AddSourceCluster(testUsers[1].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of source [%s] cluster with %s ctx", SourceClusterName, testUsers[1].name)) + clusterUid, err = Inst().Backup.GetClusterUID(testUsers[1].ctx, BackupOrgID, SourceClusterName) + dash.VerifyFatal(err, nil, fmt.Sprintf("Fetching [%s] cluster uid", SourceClusterName)) + + // Share the source cluster to testuser2 + err = ShareCluster(SourceClusterName, clusterUid, nil, []string{testUsers[2].name}, testUsers[1].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying share of source [%s] cluster with %s user using %s ctx", SourceClusterName, testUsers[2].name, testUsers[1].name)) + }) + + Step("Create a backup schedule with User2", func() { + // Create Schedule Policy + periodicSchedulePolicyName = fmt.Sprintf("%s-%s", "periodic", RandomString(5)) + periodicSchedulePolicyUid = uuid.New() + periodicSchedulePolicyInterval := int64(15) + err := CreateBackupScheduleIntervalPolicy(5, periodicSchedulePolicyInterval, 5, periodicSchedulePolicyName, periodicSchedulePolicyUid, BackupOrgID, testUsers[2].ctx, false, false) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of periodic schedule policy of interval [%v] minutes named [%s] ", periodicSchedulePolicyInterval, periodicSchedulePolicyName)) + + // Take Backup Schedule + log.InfoD("Taking schedule backup of multiple namespaces") + scheduleName = fmt.Sprintf("schedule-bkp-%v", RandomString(5)) + _, err = CreateScheduleBackupWithoutCheckWithClusterUID(scheduleName, SourceClusterName, clusterUid, bkpLocationName, backupLocationUID, bkpNamespaces, make(map[string]string), BackupOrgID, "", "", "", "", periodicSchedulePolicyName, periodicSchedulePolicyUid, testUsers[2].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of scheduled backup with schedule name [%s]", scheduleName)) + }) + + Step("User1 UnShares the cluster from User2", func() { + // UnShare the source cluster to testuser2 + err := UnShareCluster(SourceClusterName, clusterUid, nil, []string{testUsers[2].name}, testUsers[1].ctx) + log.InfoD("UnshareCluster error response: %v", err) + dash.VerifyNotNilFatal(err, fmt.Sprintf("Verifying Unshare of source [%s] cluster with %s user using %s ctx", SourceClusterName, testUsers[2].name, testUsers[1].name)) + }) + + Step("Delete the backup schedule and unshare the cluster", func() { + // TODO: Here need to delete the backup schedule with super-admin role user + log.InfoD("Deleting backup schedule [%s]", scheduleName) + scheduleUID, err := GetScheduleUID(scheduleName, BackupOrgID, testUsers[2].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Fetching [%s] backupschedule uid", scheduleName)) + + err = DeleteScheduleWithUIDAndWait(scheduleName, scheduleUID, SourceClusterName, clusterUid, BackupOrgID, testUsers[2].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying deletion of backup schedule [%s]", scheduleName)) + + // UnShare the source cluster to testuser2 + err = UnShareCluster(SourceClusterName, clusterUid, nil, []string{testUsers[2].name}, testUsers[1].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying Unshare of source [%s] cluster with %s user using %s ctx", SourceClusterName, testUsers[2].name, testUsers[1].name)) + }) + + Step("Cleanup", func() { + log.InfoD("Deleting schedule policy [%s]", periodicSchedulePolicyName) + schedulePolicyDeleteRequest := &api.SchedulePolicyDeleteRequest{ + Name: periodicSchedulePolicyName, + Uid: periodicSchedulePolicyUid, + OrgId: BackupOrgID, + } + _, err := Inst().Backup.DeleteSchedulePolicy(testUsers[2].ctx, schedulePolicyDeleteRequest) + log.FailOnError(err, "failed to delete schedule policy %s of the user %s", periodicSchedulePolicyName, testUsers[2].name) + + err = DeleteClusterWithUID(SourceClusterName, clusterUid, BackupOrgID, testUsers[1].ctx, false) + dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Cluster [%s]", SourceClusterName)) + }) + + }) + // This testcase verifies the restore creation on the shared cluster by the shared user. + It("VerifyRestoreCreateBySharedClusterUser", func() { + StartPxBackupTorpedoTest("VerifyRestoreCreateBySharedClusterUser", "VerifyRestoreCreateBySharedClusterUser during the cluster unshare", nil, 301042, Sgajawada, Q2FY25) + + var ( + backupName = fmt.Sprintf("%s-%v", BackupNamePrefix, time.Now().Unix()) + restoreName = fmt.Sprintf("%s-%v", RestoreNamePrefix, time.Now().Unix()) + namespaceMapping = make(map[string]string) + clusterUid string + destinationClusterUid string + ) + + Step("Create a cluster object(destination cluster) with User1 and Share with User2", func() { + err := AddDestinationCluster(testUsers[1].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of [%s] cluster with %s ctx", DestinationClusterName, testUsers[1].name)) + destinationClusterUid, err = Inst().Backup.GetClusterUID(testUsers[1].ctx, BackupOrgID, DestinationClusterName) + dash.VerifyFatal(err, nil, fmt.Sprintf("Fetching [%s] cluster uid", SourceClusterName)) + + // Share the destination cluster to testuser2 + err = ShareCluster(DestinationClusterName, destinationClusterUid, nil, []string{testUsers[2].name}, testUsers[1].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying share of [%s] cluster with %s user using %s ctx", DestinationClusterName, testUsers[2].name, testUsers[1].name)) + }) + + Step("Create another cluster object(Source Cluster) with User2", func() { + err := AddSourceCluster(testUsers[2].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Verifying creation of [%s] cluster with %s ctx", SourceClusterName, testUsers[2].name)) + clusterUid, err = Inst().Backup.GetClusterUID(testUsers[2].ctx, BackupOrgID, SourceClusterName) + dash.VerifyFatal(err, nil, fmt.Sprintf("Fetching [%s] cluster uid", SourceClusterName)) + }) + + Step("create a backup from soure and restore it on destination(Shared Cluster) with user2", func() { + // Take Backup + log.InfoD(fmt.Sprintf("Taking backup of multiple namespaces [%v]", bkpNamespaces)) + err := CreateBackup(backupName, SourceClusterName, bkpLocationName, backupLocationUID, bkpNamespaces, nil, BackupOrgID, clusterUid, "", "", "", "", testUsers[2].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creation of backup [%s]", backupName)) + + // Create Restore + for _, namespace := range bkpNamespaces { + restoredNameSpace := fmt.Sprintf("%s-%s", RandomString(10), "restored") + namespaceMapping[namespace] = restoredNameSpace + } + log.InfoD("Namespace mapping is %v:", namespaceMapping) + err = CreateRestoreWithClusterUID(restoreName, backupName, namespaceMapping, DestinationClusterName, destinationClusterUid, BackupOrgID, testUsers[2].ctx, make(map[string]string)) + dash.VerifyFatal(err, nil, fmt.Sprintf("Creation of restore [%s]", restoreName)) + }) + + Step("Cleanup", func() { + backupDriver := Inst().Backup + backupUID, err := backupDriver.GetBackupUID(testUsers[2].ctx, backupName, BackupOrgID) + log.FailOnError(err, "Failed while trying to get backup UID for - [%s]", backupName) + + log.InfoD("Deleting backup") + _, err = DeleteBackup(backupName, backupUID, BackupOrgID, testUsers[2].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting backup [%s]", backupName)) + + err = DeleteRestore(restoreName, BackupOrgID, testUsers[2].ctx) + dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting restore [%s]", restoreName)) + + err = DeleteClusterWithUID(SourceClusterName, clusterUid, BackupOrgID, testUsers[2].ctx, false) + dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Cluster [%s]", SourceClusterName)) + + err = DeleteClusterWithUID(DestinationClusterName, destinationClusterUid, BackupOrgID, testUsers[1].ctx, false) + dash.VerifyFatal(err, nil, fmt.Sprintf("Deleting Cluster [%s]", DestinationClusterName)) + }) + }) + + JustAfterEach(func() { + defer EndPxBackupTorpedoTest(scheduledAppContexts) + + opts := make(map[string]bool) + opts[SkipClusterScopedObjects] = true + log.InfoD("Deleting deployed applications") + DestroyApps(scheduledAppContexts, opts) + + CleanupCloudSettingsAndClusters(backupLocationMap, cloudCredName, cloudCredUID, adminContext) + for i := 1; i <= 2; i++ { + err := backup.DeleteUser(testUsers[i].name) + log.FailOnError(err, "Failed to delete user - %s", testUsers[i].name) + } + }) +}) diff --git a/tests/backup_helper.go b/tests/backup_helper.go index fe2531a26..dbe90cc47 100644 --- a/tests/backup_helper.go +++ b/tests/backup_helper.go @@ -91,6 +91,7 @@ const ( Sabrarhussaini TestcaseAuthor = "sabrarhussaini" ATrivedi TestcaseAuthor = "atrivedi-px" Dbinnal TestcaseAuthor = "dbinnal-px" + Sgajawada TestcaseAuthor = "sgajawada-px" ) // TestcaseQuarter List @@ -1472,6 +1473,76 @@ func CreateScheduleBackupWithoutCheck(scheduleName string, clusterName string, b return resp, nil } +// CreateScheduleBackupWithoutCheck creates a schedule backup without waiting for success +func CreateScheduleBackupWithoutCheckWithClusterUID(scheduleName string, clusterName string, clusterUID string, bLocation string, bLocationUID string, + namespaces []string, labelSelectors map[string]string, orgID string, preRuleName string, + preRuleUid string, postRuleName string, postRuleUid string, schPolicyName string, schPolicyUID string, ctx context1.Context, resourceTypes ...string) (*api.BackupScheduleInspectResponse, error) { + + if GlobalRuleFlag { + preRuleName = GlobalPreRuleName + if GlobalPreRuleName != "" { + preRuleUid = GlobalPreRuleUid + } + + postRuleName = GlobalPostRuleName + if GlobalPostRuleName != "" { + postRuleUid = GlobalPostRuleUid + } + } + + backupDriver := Inst().Backup + bkpSchCreateRequest := &api.BackupScheduleCreateRequest{ + CreateMetadata: &api.CreateMetadata{ + Name: scheduleName, + OrgId: orgID, + }, + SchedulePolicyRef: &api.ObjectRef{ + Name: schPolicyName, + Uid: schPolicyUID, + }, + BackupLocationRef: &api.ObjectRef{ + Name: bLocation, + Uid: bLocationUID, + }, + SchedulePolicy: schPolicyName, + Cluster: clusterName, + Namespaces: namespaces, + LabelSelectors: labelSelectors, + PreExecRuleRef: &api.ObjectRef{ + Name: preRuleName, + Uid: preRuleUid, + }, + PostExecRuleRef: &api.ObjectRef{ + Name: postRuleName, + Uid: postRuleUid, + }, + ClusterRef: &api.ObjectRef{ + Name: clusterName, + Uid: clusterUID, + }, + ResourceTypes: resourceTypes, + } + + err := AdditionalScheduledBackupRequestParams(bkpSchCreateRequest) + if err != nil { + return nil, err + } + _, err = backupDriver.CreateBackupSchedule(ctx, bkpSchCreateRequest) + if err != nil { + return nil, err + } + backupScheduleInspectRequest := &api.BackupScheduleInspectRequest{ + OrgId: orgID, + Name: scheduleName, + Uid: "", + } + resp, err := backupDriver.InspectBackupSchedule(ctx, backupScheduleInspectRequest) + if err != nil { + return resp, err + } + return resp, nil +} + // CreateVMScheduleBackupByNamespacesWithoutCheck creates VM schedule backup of provided namespaces without waiting for success. func CreateVMScheduleBackupByNamespacesWithoutCheck(scheduleName string, vms []kubevirtv1.VirtualMachine, clusterName string, bLocation string, bLocationUID string, labelSelectors map[string]string, orgID string, uid string, preRuleName string, @@ -1666,6 +1737,60 @@ func ShareBackup(backupName string, groupNames []string, userNames []string, acc } +// ShareCluster provides access to the mentioned groups or/add users +func ShareCluster(clusterName, clusterUID string, groupNames []string, userNames []string, ctx context1.Context) error { + backupDriver := Inst().Backup + userIDs := make([]string, 0) + + for _, userName := range userNames { + userID, err := backup.FetchIDOfUser(userName) + if err != nil { + return err + } + userIDs = append(userIDs, userID) + } + + shareBackupRequest := &api.ShareClusterRequest{ + OrgId: BackupOrgID, + ClusterRef: &api.ObjectRef{ + Name: clusterName, + Uid: clusterUID, + }, + Users: userIDs, + Groups: groupNames, + } + + _, err := backupDriver.ShareCluster(ctx, shareBackupRequest) + return err +} + +// UnShareCluster provides access to the mentioned groups or/add users +func UnShareCluster(clusterName, clusterUID string, groupNames []string, userNames []string, ctx context1.Context) error { + backupDriver := Inst().Backup + userIDs := make([]string, 0) + + for _, userName := range userNames { + userID, err := backup.FetchIDOfUser(userName) + if err != nil { + return err + } + userIDs = append(userIDs, userID) + } + + shareBackupRequest := &api.UnShareClusterRequest{ + OrgId: BackupOrgID, + ClusterRef: &api.ObjectRef{ + Name: clusterName, + Uid: clusterUID, + }, + Users: userIDs, + Groups: groupNames, + } + + _, err := backupDriver.UnShareCluster(ctx, shareBackupRequest) + return err +} + // ClusterUpdateBackupShare shares all backup with the users and/or groups provided for a given cluster // addUsersOrGroups - provide true if the mentioned users/groups needs to be added // addUsersOrGroups - provide false if the mentioned users/groups needs to be deleted or removed @@ -1838,6 +1963,63 @@ func CreateRestore(restoreName string, backupName string, namespaceMapping map[s return nil } +// CreateRestoreWithClusterUID creates restore +func CreateRestoreWithClusterUID(restoreName string, backupName string, namespaceMapping map[string]string, clusterName string, clusterUID string, + orgID string, ctx context1.Context, storageClassMapping map[string]string) error { + + var bkpUid string + + // Check if the backup used is in successful state or not + bkpUid, err := Inst().Backup.GetBackupUID(ctx, backupName, orgID) + if err != nil { + return err + } + + backupInspectRequest := &api.BackupInspectRequest{ + Name: backupName, + Uid: bkpUid, + OrgId: orgID, + } + resp, err := Inst().Backup.InspectBackup(ctx, backupInspectRequest) + if err != nil { + return err + } + actual := resp.GetBackup().GetStatus().Status + reason := resp.GetBackup().GetStatus().Reason + if actual != api.BackupInfo_StatusInfo_Success && actual != api.BackupInfo_StatusInfo_PartialSuccess { + return fmt.Errorf("backup status for [%s] expected was [%s] but got [%s] because of [%s]", backupName, api.BackupInfo_StatusInfo_Success, actual, reason) + } + backupDriver := Inst().Backup + createRestoreReq := &api.RestoreCreateRequest{ + CreateMetadata: &api.CreateMetadata{ + Name: restoreName, + OrgId: orgID, + }, + Backup: backupName, + Cluster: clusterName, + NamespaceMapping: namespaceMapping, + StorageClassMapping: storageClassMapping, + BackupRef: &api.ObjectRef{ + Name: backupName, + Uid: bkpUid, + }, + ClusterRef: &api.ObjectRef{ + Name: clusterName, + Uid: clusterUID, + }, + } + _, err = backupDriver.CreateRestore(ctx, createRestoreReq) + if err != nil { + return err + } + err = RestoreSuccessCheck(restoreName, orgID, MaxWaitPeriodForRestoreCompletionInMinute*time.Minute, 30*time.Second, ctx) + if err != nil { + return err + } + log.Infof("Restore [%s] created successfully", restoreName) + return nil +} + // CreateRestoreWithCRValidation creates a restore along with restore CR validation func CreateRestoreWithCRValidation(restoreName string, backupName string, namespaceMapping map[string]string, clusterName string, orgID string, ctx context1.Context, storageClassMapping map[string]string) error { @@ -5823,6 +6005,7 @@ func RegisterCluster(clusterName string, cloudCredName string, orgID string, ctx if err != nil && !strings.Contains(err.Error(), "already exists with status: Online") { return "", true, err } + log.Errorf("error: %v \n", err) createClusterStatus, err := Inst().Backup.GetClusterStatus(orgID, clusterName, ctx) if err != nil { return "", true, err