From 0d492278ed2bf6a31c69776e7aea8ad613bc0477 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Tue, 14 Jan 2025 08:26:13 +0100 Subject: [PATCH] chore: apply gofumpt in pkg packages Signed-off-by: Matthieu MOREL --- .../velero/v1/server_status_request_types.go | 3 +- pkg/archive/extractor_test.go | 2 +- pkg/archive/parser_test.go | 4 +- pkg/backup/backup.go | 5 +- pkg/backup/backup_test.go | 15 ++++-- pkg/backup/item_backupper.go | 2 +- pkg/backup/item_collector.go | 3 +- pkg/backup/pv_skip_tracker_test.go | 2 +- pkg/builder/testcr_builder.go | 6 +-- pkg/client/config.go | 4 +- pkg/client/config_test.go | 1 + pkg/client/dynamic.go | 4 +- pkg/client/factory.go | 3 -- pkg/cmd/cli/backup/create_test.go | 12 ++--- pkg/cmd/cli/backup/download.go | 2 +- pkg/cmd/cli/backup/download_test.go | 1 - pkg/cmd/cli/backuplocation/delete_test.go | 3 +- pkg/cmd/cli/backuplocation/get_test.go | 1 - pkg/cmd/cli/backuplocation/set_test.go | 1 - pkg/cmd/cli/datamover/backup.go | 12 +++-- pkg/cmd/cli/datamover/backup_test.go | 1 - pkg/cmd/cli/datamover/data_mover.go | 6 ++- pkg/cmd/cli/datamover/data_mover_test.go | 2 +- pkg/cmd/cli/nodeagent/server.go | 4 +- pkg/cmd/cli/repomantenance/maintenance.go | 1 - pkg/cmd/cli/restore/create_test.go | 6 +-- pkg/cmd/server/plugin/plugin.go | 1 + pkg/cmd/server/server.go | 4 +- pkg/cmd/test/const.go | 6 ++- .../util/downloadrequest/downloadrequest.go | 6 ++- pkg/cmd/util/output/backup_describer.go | 3 +- pkg/cmd/util/output/backup_printer.go | 26 +++++----- pkg/cmd/util/output/backup_repo_printer.go | 16 +++--- .../output/backup_storage_location_printer.go | 24 ++++----- .../output/backup_structured_describer.go | 6 ++- pkg/cmd/util/output/output_test.go | 4 +- pkg/cmd/util/output/plugin_printer.go | 14 +++--- pkg/cmd/util/output/restore_describer_test.go | 1 + pkg/cmd/util/output/restore_printer.go | 28 +++++------ pkg/cmd/util/output/schedule_printer.go | 26 +++++----- .../volume_snapshot_location_printer.go | 14 +++--- pkg/controller/backup_controller_test.go | 27 +++++----- pkg/controller/backup_deletion_controller.go | 5 +- .../backup_deletion_controller_test.go | 32 ++++++------ pkg/controller/backup_finalizer_controller.go | 3 +- .../backup_finalizer_controller_test.go | 1 + .../backup_operations_controller.go | 6 ++- .../backup_repository_controller.go | 3 +- .../backup_storage_location_controller.go | 3 +- pkg/controller/backup_sync_controller.go | 5 +- pkg/controller/backup_sync_controller_test.go | 7 ++- pkg/controller/data_download_controller.go | 10 ++-- .../data_download_controller_test.go | 12 +++-- pkg/controller/data_upload_controller.go | 8 +-- pkg/controller/data_upload_controller_test.go | 9 ++-- .../pod_volume_backup_controller.go | 4 +- .../pod_volume_restore_controller.go | 7 +-- pkg/controller/restore_controller_test.go | 2 +- .../restore_finalizer_controller.go | 2 - .../restore_finalizer_controller_test.go | 50 +++++++++++-------- .../restore_operations_controller.go | 6 ++- .../server_status_request_controller.go | 3 +- pkg/controller/suite_test.go | 3 +- pkg/datamover/backup_micro_service.go | 5 +- pkg/datamover/restore_micro_service.go | 4 +- pkg/datapath/file_system_test.go | 4 +- pkg/datapath/manager.go | 11 ++-- pkg/datapath/micro_service_watcher.go | 19 ++++--- pkg/exposer/generic_restore.go | 3 +- pkg/exposer/generic_restore_test.go | 3 +- pkg/exposer/host_path.go | 11 ++-- pkg/exposer/host_path_test.go | 3 +- pkg/install/deployment.go | 1 + pkg/install/install.go | 2 +- pkg/itemoperationmap/backup_operation_map.go | 9 ++-- pkg/itemoperationmap/restore_operation_map.go | 9 ++-- pkg/metrics/metrics.go | 2 +- pkg/persistence/object_store.go | 2 +- pkg/persistence/object_store_test.go | 1 + pkg/plugin/clientmgmt/process/process.go | 3 +- pkg/plugin/clientmgmt/process/registry.go | 2 +- .../clientmgmt/process/registry_test.go | 28 +++++------ .../clientmgmt/process/restartable_process.go | 3 +- pkg/plugin/framework/action_resolver.go | 6 ++- .../framework/backup_item_action_server.go | 6 ++- .../v2/backup_item_action_server.go | 12 +++-- .../v1/item_block_action_server.go | 6 ++- .../v2/restore_item_action_server.go | 9 ++-- pkg/podvolume/backupper_test.go | 3 +- pkg/podvolume/restorer_factory.go | 3 +- pkg/repository/backup_repo_op.go | 3 +- pkg/repository/backup_repo_op_test.go | 27 ++++++---- pkg/repository/maintenance.go | 1 - pkg/repository/maintenance_test.go | 1 + pkg/repository/provider/unified_repo.go | 15 ++---- pkg/repository/udmrepo/kopialib/lib_repo.go | 3 -- pkg/restic/exec_commands.go | 6 ++- .../actions/change_image_name_action.go | 8 +-- pkg/restore/actions/csi/pvc_action.go | 9 ++-- .../actions/csi/volumesnapshot_action.go | 12 +++-- .../actions/csi/volumesnapshotclass_action.go | 3 +- .../init_restorehook_pod_action_test.go | 6 ++- pkg/restore/actions/pod_action.go | 3 +- pkg/restore/actions/pod_action_test.go | 5 +- pkg/restore/actions/service_action.go | 1 - pkg/restore/merge_service_account_test.go | 2 +- pkg/restore/restore.go | 15 +++--- pkg/restore/restore_test.go | 26 ++++++---- pkg/test/fake_file_system.go | 2 +- pkg/test/fake_mapper.go | 2 +- pkg/test/tar_writer.go | 2 +- pkg/uploader/kopia/progress.go | 14 +++--- pkg/uploader/kopia/snapshot.go | 33 ++++++------ pkg/uploader/provider/kopia.go | 17 ++++--- pkg/uploader/provider/provider.go | 6 ++- pkg/uploader/provider/restic.go | 22 ++++---- pkg/uploader/provider/restic_test.go | 15 ++++-- pkg/util/actionhelpers/rbac.go | 3 +- pkg/util/csi/volume_snapshot.go | 12 ++--- pkg/util/csi/volume_snapshot_test.go | 3 +- pkg/util/kube/periodical_enqueue_source.go | 3 +- pkg/util/kube/pod.go | 1 - pkg/util/kube/pvc_pv.go | 21 ++++---- pkg/util/kube/pvc_pv_test.go | 4 +- pkg/util/kube/utils.go | 8 +-- pkg/util/kube/utils_test.go | 6 +-- pkg/util/logging/log_merge_hook.go | 5 +- pkg/util/podvolume/pod_volume_test.go | 48 +++++++++++++----- 128 files changed, 565 insertions(+), 471 deletions(-) diff --git a/pkg/apis/velero/v1/server_status_request_types.go b/pkg/apis/velero/v1/server_status_request_types.go index 98e15a0b59..c1b39fd2a1 100644 --- a/pkg/apis/velero/v1/server_status_request_types.go +++ b/pkg/apis/velero/v1/server_status_request_types.go @@ -45,8 +45,7 @@ type ServerStatusRequest struct { } // ServerStatusRequestSpec is the specification for a ServerStatusRequest. -type ServerStatusRequestSpec struct { -} +type ServerStatusRequestSpec struct{} // ServerStatusRequestPhase represents the lifecycle phase of a ServerStatusRequest. // +kubebuilder:validation:Enum=New;Processed diff --git a/pkg/archive/extractor_test.go b/pkg/archive/extractor_test.go index 47cea734c4..b5ffda8816 100644 --- a/pkg/archive/extractor_test.go +++ b/pkg/archive/extractor_test.go @@ -72,7 +72,7 @@ func TestUnzipAndExtractBackup(t *testing.T) { } require.NoError(t, err) - file, err := ext.fs.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0644) + file, err := ext.fs.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0o644) require.NoError(t, err) _, err = ext.UnzipAndExtractBackup(file.(io.Reader)) diff --git a/pkg/archive/parser_test.go b/pkg/archive/parser_test.go index c9ad517480..a6c32bb6f9 100644 --- a/pkg/archive/parser_test.go +++ b/pkg/archive/parser_test.go @@ -99,7 +99,7 @@ func TestParse(t *testing.T) { } for _, file := range tc.files { - require.NoError(t, p.fs.MkdirAll(file, 0755)) + require.NoError(t, p.fs.MkdirAll(file, 0o755)) if !strings.HasSuffix(file, "/") { res, err := p.fs.Create(file) @@ -213,7 +213,7 @@ func TestParseGroupVersions(t *testing.T) { } for _, file := range tc.files { - require.NoError(t, p.fs.MkdirAll(file, 0755)) + require.NoError(t, p.fs.MkdirAll(file, 0o755)) if !strings.HasSuffix(file, "/") { res, err := p.fs.Create(file) diff --git a/pkg/backup/backup.go b/pkg/backup/backup.go index 280164cc40..6e7a659ff3 100644 --- a/pkg/backup/backup.go +++ b/pkg/backup/backup.go @@ -220,7 +220,8 @@ type VolumeSnapshotterGetter interface { // back up individual resources that don't prevent the backup from continuing to be processed) are logged // to the backup log. func (kb *kubernetesBackupper) Backup(log logrus.FieldLogger, backupRequest *Request, backupFile io.Writer, - actions []biav2.BackupItemAction, itemBlockActions []ibav1.ItemBlockAction, volumeSnapshotterGetter VolumeSnapshotterGetter) error { + actions []biav2.BackupItemAction, itemBlockActions []ibav1.ItemBlockAction, volumeSnapshotterGetter VolumeSnapshotterGetter, +) error { backupItemActions := framework.NewBackupItemActionResolverV2(actions) itemBlockActionResolver := framework.NewItemBlockActionResolver(itemBlockActions) return kb.BackupWithResolvers(log, backupRequest, backupFile, backupItemActions, itemBlockActionResolver, volumeSnapshotterGetter) @@ -889,7 +890,7 @@ func (kb *kubernetesBackupper) writeBackupVersion(tw *tar.Writer) error { Name: versionFile, Size: int64(len(versionString)), Typeflag: tar.TypeReg, - Mode: 0755, + Mode: 0o755, ModTime: time.Now(), } if err := tw.WriteHeader(hdr); err != nil { diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index 425fa827c5..9f07f3f77e 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -352,8 +352,12 @@ func TestBackupOldResourceFiltering(t *testing.T) { { name: "OrLabelSelector only backs up matching resources", backup: defaultBackup(). - OrLabelSelector([]*metav1.LabelSelector{{MatchLabels: map[string]string{"a1": "b1"}}, {MatchLabels: map[string]string{"a2": "b2"}}, - {MatchLabels: map[string]string{"a3": "b3"}}, {MatchLabels: map[string]string{"a4": "b4"}}}). + OrLabelSelector([]*metav1.LabelSelector{ + {MatchLabels: map[string]string{"a1": "b1"}}, + {MatchLabels: map[string]string{"a2": "b2"}}, + {MatchLabels: map[string]string{"a3": "b3"}}, + {MatchLabels: map[string]string{"a4": "b4"}}, + }). Result(), apiResources: []*test.APIResource{ test.Pods( @@ -3300,7 +3304,8 @@ func TestBackupWithAsyncOperations(t *testing.T) { ResourceIdentifier: velero.ResourceIdentifier{ GroupResource: kuberesource.Pods, Namespace: "ns-1", - Name: "pod-1"}, + Name: "pod-1", + }, OperationID: "pod-1-1", }, Status: itemoperation.OperationStatus{ @@ -3331,7 +3336,8 @@ func TestBackupWithAsyncOperations(t *testing.T) { ResourceIdentifier: velero.ResourceIdentifier{ GroupResource: kuberesource.Pods, Namespace: "ns-1", - Name: "pod-2"}, + Name: "pod-2", + }, OperationID: "pod-2-1", }, Status: itemoperation.OperationStatus{ @@ -3986,6 +3992,7 @@ func (b *fakePodVolumeBackupper) GetPodVolumeBackup(namespace, name string) (*ve } return nil, nil } + func (b *fakePodVolumeBackupper) ListPodVolumeBackupsByPod(podNamespace, podName string) ([]*velerov1.PodVolumeBackup, error) { var pvbs []*velerov1.PodVolumeBackup for _, pvb := range b.pvbs { diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index 9e5caef0ed..9614cb5c8d 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -319,7 +319,7 @@ func getFileForArchive(namespace, name, groupResource, versionPath string, itemB Name: filePath, Size: int64(len(itemBytes)), Typeflag: tar.TypeReg, - Mode: 0755, + Mode: 0o755, ModTime: time.Now(), } return FileForArchive{FilePath: filePath, Header: hdr, FileBytes: itemBytes} diff --git a/pkg/backup/item_collector.go b/pkg/backup/item_collector.go index 903b0e991f..1942f03e3a 100644 --- a/pkg/backup/item_collector.go +++ b/pkg/backup/item_collector.go @@ -312,7 +312,7 @@ func sortResourcesByOrder( fullname = item.name } if _, ok := itemMap[fullname]; !ok { - //This item has been inserted in the result + // This item has been inserted in the result continue } sortedItems = append(sortedItems, item) @@ -672,7 +672,6 @@ func (r *itemCollector) processPagerClientCalls( listPager.PageSize = int64(r.pageSize) // Add each item to temporary slice list, paginated, err := listPager.List(context.Background(), metav1.ListOptions{LabelSelector: label}) - if err != nil { r.log.WithError(errors.WithStack(err)).Error("Error listing resources") return list, err diff --git a/pkg/backup/pv_skip_tracker_test.go b/pkg/backup/pv_skip_tracker_test.go index 5f835dfdec..8e10843509 100644 --- a/pkg/backup/pv_skip_tracker_test.go +++ b/pkg/backup/pv_skip_tracker_test.go @@ -61,7 +61,7 @@ func TestSummary(t *testing.T) { func TestSerializeSkipReasons(t *testing.T) { tracker := NewSkipPVTracker() - //tracker.Track("pv5", "", "skipped due to policy") + // tracker.Track("pv5", "", "skipped due to policy") tracker.Track("pv3", podVolumeApproach, "it's set to opt-out") tracker.Track("pv3", csiSnapshotApproach, "not applicable for CSI ") diff --git a/pkg/builder/testcr_builder.go b/pkg/builder/testcr_builder.go index 85bc83b1c0..8cc12829e2 100644 --- a/pkg/builder/testcr_builder.go +++ b/pkg/builder/testcr_builder.go @@ -70,8 +70,6 @@ type TestCR struct { Status TestCRStatus `json:"status,omitempty"` } -type TestCRSpec struct { -} +type TestCRSpec struct{} -type TestCRStatus struct { -} +type TestCRStatus struct{} diff --git a/pkg/client/config.go b/pkg/client/config.go index 7546a0ac4c..224fa26430 100644 --- a/pkg/client/config.go +++ b/pkg/client/config.go @@ -72,11 +72,11 @@ func SaveConfig(config VeleroConfig) error { // Try to make the directory in case it doesn't exist dir := filepath.Dir(fileName) - if err := os.MkdirAll(dir, 0700); err != nil { + if err := os.MkdirAll(dir, 0o700); err != nil { return errors.WithStack(err) } - configFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) + configFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) if err != nil { return errors.WithStack(err) } diff --git a/pkg/client/config_test.go b/pkg/client/config_test.go index 3180d674d3..878ff267d6 100644 --- a/pkg/client/config_test.go +++ b/pkg/client/config_test.go @@ -47,6 +47,7 @@ func removeConfigfileName() error { } return nil } + func TestConfigOperations(t *testing.T) { preHomeEnv := "" prevEnv := os.Environ() diff --git a/pkg/client/dynamic.go b/pkg/client/dynamic.go index 705c28aaa6..af32d08b7a 100644 --- a/pkg/client/dynamic.go +++ b/pkg/client/dynamic.go @@ -85,14 +85,14 @@ type Getter interface { // Patcher patches an object. type Patcher interface { - //Patch patches the named object using the provided patch bytes, which are expected to be in JSON merge patch format. The patched object is returned. + // Patch patches the named object using the provided patch bytes, which are expected to be in JSON merge patch format. The patched object is returned. Patch(name string, data []byte) (*unstructured.Unstructured, error) } // Deletor deletes an object. type Deletor interface { - //Patch patches the named object using the provided patch bytes, which are expected to be in JSON merge patch format. The patched object is returned. + // Patch patches the named object using the provided patch bytes, which are expected to be in JSON merge patch format. The patched object is returned. Delete(name string, opts metav1.DeleteOptions) error } diff --git a/pkg/client/factory.go b/pkg/client/factory.go index f1524a5835..2c12a65651 100644 --- a/pkg/client/factory.go +++ b/pkg/client/factory.go @@ -122,7 +122,6 @@ func (f *factory) KubeClient() (kubernetes.Interface, error) { return nil, err } kubeClient, err := kubernetes.NewForConfig(clientConfig) - if err != nil { return nil, errors.WithStack(err) } @@ -169,7 +168,6 @@ func (f *factory) KubebuilderClient() (kbclient.Client, error) { kubebuilderClient, err := kbclient.New(clientConfig, kbclient.Options{ Scheme: scheme, }) - if err != nil { return nil, err } @@ -205,7 +203,6 @@ func (f *factory) KubebuilderWatchClient() (kbclient.WithWatch, error) { kubebuilderWatchClient, err := kbclient.NewWithWatch(clientConfig, kbclient.Options{ Scheme: scheme, }) - if err != nil { return nil, err } diff --git a/pkg/cmd/cli/backup/create_test.go b/pkg/cmd/cli/backup/create_test.go index b9be38e1e5..0f0ce5de6a 100644 --- a/pkg/cmd/cli/backup/create_test.go +++ b/pkg/cmd/cli/backup/create_test.go @@ -224,27 +224,27 @@ func TestCreateCommand(t *testing.T) { flags.Parse([]string{"--resource-policies-configmap", resPoliciesConfigmap}) flags.Parse([]string{"--data-mover", dataMover}) flags.Parse([]string{"--parallel-files-upload", strconv.Itoa(parallelFilesUpload)}) - //flags.Parse([]string{"--wait"}) + // flags.Parse([]string{"--wait"}) client := velerotest.NewFakeControllerRuntimeClient(t).(kbclient.WithWatch) f.On("Namespace").Return(mock.Anything) f.On("KubebuilderWatchClient").Return(client, nil) - //Complete + // Complete e := o.Complete(args, f) require.NoError(t, e) - //Validate + // Validate e = o.Validate(cmd, args, f) require.ErrorContains(t, e, "include-resources, exclude-resources and include-cluster-resources are old filter parameters") require.ErrorContains(t, e, "include-cluster-scoped-resources, exclude-cluster-scoped-resources, include-namespace-scoped-resources and exclude-namespace-scoped-resources are new filter parameters.\nThey cannot be used together") - //cmd + // cmd e = o.Run(cmd, f) require.NoError(t, e) - //Execute + // Execute cmd.SetArgs([]string{"bk-name-exe"}) e = cmd.Execute() require.NoError(t, e) @@ -274,7 +274,7 @@ func TestCreateCommand(t *testing.T) { require.Equal(t, resPoliciesConfigmap, o.ResPoliciesConfigmap) require.Equal(t, dataMover, o.DataMover) require.Equal(t, parallelFilesUpload, o.ParallelFilesUpload) - //assert.Equal(t, true, o.Wait) + // assert.Equal(t, true, o.Wait) // verify oldAndNewFilterParametersUsedTogether mix := o.oldAndNewFilterParametersUsedTogether() diff --git a/pkg/cmd/cli/backup/download.go b/pkg/cmd/cli/backup/download.go index 5d2fec546c..e47ccacbdb 100644 --- a/pkg/cmd/cli/backup/download.go +++ b/pkg/cmd/cli/backup/download.go @@ -118,7 +118,7 @@ func (o *DownloadOptions) Run(c *cobra.Command, f client.Factory) error { kbClient, err := f.KubebuilderClient() cmd.CheckError(err) - backupDest, err := os.OpenFile(o.Output, o.writeOptions, 0600) + backupDest, err := os.OpenFile(o.Output, o.writeOptions, 0o600) if err != nil { return err } diff --git a/pkg/cmd/cli/backup/download_test.go b/pkg/cmd/cli/backup/download_test.go index a709a814b8..f1b79f9de0 100644 --- a/pkg/cmd/cli/backup/download_test.go +++ b/pkg/cmd/cli/backup/download_test.go @@ -95,7 +95,6 @@ func TestNewDownloadCommand(t *testing.T) { cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewDownloadCommand"}...) cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag)) _, stderr, err := veleroexec.RunCommand(cmd) - if err != nil { require.Contains(t, stderr, "download request download url timeout") return diff --git a/pkg/cmd/cli/backuplocation/delete_test.go b/pkg/cmd/cli/backuplocation/delete_test.go index 4cb5a6bbc1..70eb2f6384 100644 --- a/pkg/cmd/cli/backuplocation/delete_test.go +++ b/pkg/cmd/cli/backuplocation/delete_test.go @@ -75,8 +75,9 @@ func TestNewDeleteCommand(t *testing.T) { } t.Fatalf("process ran with err %v, want backups by get()", err) } + func TestDeleteFunctions(t *testing.T) { - //t.Run("create the other create command with fromSchedule option for Run() other branches", func(t *testing.T) { + // t.Run("create the other create command with fromSchedule option for Run() other branches", func(t *testing.T) { // create a factory f := &factorymocks.Factory{} kbclient := velerotest.NewFakeControllerRuntimeClient(t) diff --git a/pkg/cmd/cli/backuplocation/get_test.go b/pkg/cmd/cli/backuplocation/get_test.go index 2e4c5510cb..cb04557a4f 100644 --- a/pkg/cmd/cli/backuplocation/get_test.go +++ b/pkg/cmd/cli/backuplocation/get_test.go @@ -53,7 +53,6 @@ func TestNewGetCommand(t *testing.T) { cmd := exec.Command(os.Args[0], []string{"-test.run=TestNewGetCommand"}...) cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag)) _, stderr, err := veleroexec.RunCommand(cmd) - if err != nil { assert.Contains(t, stderr, fmt.Sprintf("backupstoragelocations.velero.io \"%s\" not found", bkList[0])) return diff --git a/pkg/cmd/cli/backuplocation/set_test.go b/pkg/cmd/cli/backuplocation/set_test.go index 65aaa17baa..ecf67641dd 100644 --- a/pkg/cmd/cli/backuplocation/set_test.go +++ b/pkg/cmd/cli/backuplocation/set_test.go @@ -101,7 +101,6 @@ func TestSetCommand_Execute(t *testing.T) { cmd := exec.Command(os.Args[0], []string{"-test.run=TestSetCommand_Execute"}...) cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", cmdtest.CaptureFlag)) _, stderr, err := veleroexec.RunCommand(cmd) - if err != nil { assert.Contains(t, stderr, "backupstoragelocations.velero.io \"bsl-1\" not found") return diff --git a/pkg/cmd/cli/datamover/backup.go b/pkg/cmd/cli/datamover/backup.go index 7511fef8ed..b62846684d 100644 --- a/pkg/cmd/cli/datamover/backup.go +++ b/pkg/cmd/cli/datamover/backup.go @@ -213,8 +213,10 @@ func newdataMoverBackup(logger logrus.FieldLogger, factory client.Factory, confi return s, nil } -var funcExitWithMessage = exitWithMessage -var funcCreateDataPathService = (*dataMoverBackup).createDataPathService +var ( + funcExitWithMessage = exitWithMessage + funcCreateDataPathService = (*dataMoverBackup).createDataPathService +) func (s *dataMoverBackup) run() { signals.CancelOnShutdown(s.cancelFunc, s.logger) @@ -268,8 +270,10 @@ func (s *dataMoverBackup) runDataPath() { funcExitWithMessage(s.logger, true, result) } -var funcNewCredentialFileStore = credentials.NewNamespacedFileStore -var funcNewCredentialSecretStore = credentials.NewNamespacedSecretStore +var ( + funcNewCredentialFileStore = credentials.NewNamespacedFileStore + funcNewCredentialSecretStore = credentials.NewNamespacedSecretStore +) func (s *dataMoverBackup) createDataPathService() (dataPathService, error) { credentialFileStore, err := funcNewCredentialFileStore( diff --git a/pkg/cmd/cli/datamover/backup_test.go b/pkg/cmd/cli/datamover/backup_test.go index 2dd1e681d8..39e0f40a81 100644 --- a/pkg/cmd/cli/datamover/backup_test.go +++ b/pkg/cmd/cli/datamover/backup_test.go @@ -61,7 +61,6 @@ func (fr *fakeRunHelper) RunCancelableDataPath(_ context.Context) (string, error } func (fr *fakeRunHelper) Shutdown() { - } func (fr *fakeRunHelper) ExitWithMessage(logger logrus.FieldLogger, succeed bool, message string, a ...any) { diff --git a/pkg/cmd/cli/datamover/data_mover.go b/pkg/cmd/cli/datamover/data_mover.go index 6786f4e7c2..bb49305214 100644 --- a/pkg/cmd/cli/datamover/data_mover.go +++ b/pkg/cmd/cli/datamover/data_mover.go @@ -46,8 +46,10 @@ type dataPathService interface { Shutdown() } -var funcExit = os.Exit -var funcCreateFile = os.Create +var ( + funcExit = os.Exit + funcCreateFile = os.Create +) func exitWithMessage(logger logrus.FieldLogger, succeed bool, message string, a ...any) { exitCode := 0 diff --git a/pkg/cmd/cli/datamover/data_mover_test.go b/pkg/cmd/cli/datamover/data_mover_test.go index 51d9376d31..37596137b3 100644 --- a/pkg/cmd/cli/datamover/data_mover_test.go +++ b/pkg/cmd/cli/datamover/data_mover_test.go @@ -45,7 +45,7 @@ func (em *exitWithMessageMock) CreateFile(name string) (*os.File, error) { } if em.writeFail { - return os.OpenFile(em.filePath, os.O_CREATE|os.O_RDONLY, 0500) + return os.OpenFile(em.filePath, os.O_CREATE|os.O_RDONLY, 0o500) } else { return os.Create(em.filePath) } diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index d5e7193cc1..ae8c70a88b 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -67,9 +67,7 @@ import ( cacheutil "k8s.io/client-go/tools/cache" ) -var ( - scheme = runtime.NewScheme() -) +var scheme = runtime.NewScheme() const ( // the port where prometheus metrics are exposed diff --git a/pkg/cmd/cli/repomantenance/maintenance.go b/pkg/cmd/cli/repomantenance/maintenance.go index e467c054a9..3e8f67d120 100644 --- a/pkg/cmd/cli/repomantenance/maintenance.go +++ b/pkg/cmd/cli/repomantenance/maintenance.go @@ -149,7 +149,6 @@ func (o *Options) runRepoPrune(f velerocli.Factory, namespace string, logger log BackupLocation: o.BackupStorageLocation, RepositoryType: o.RepoType, }, true) - if err != nil { return errors.Wrap(err, "failed to get backup repository") } diff --git a/pkg/cmd/cli/restore/create_test.go b/pkg/cmd/cli/restore/create_test.go index 877de29d0c..3f2b35b669 100644 --- a/pkg/cmd/cli/restore/create_test.go +++ b/pkg/cmd/cli/restore/create_test.go @@ -109,15 +109,15 @@ func TestCreateCommand(t *testing.T) { f.On("Namespace").Return(mock.Anything) f.On("KubebuilderWatchClient").Return(client, nil) - //Complete + // Complete e := o.Complete(args, f) require.NoError(t, e) - //Validate + // Validate e = o.Validate(cmd, args, f) require.ErrorContains(t, e, "either a backup or schedule must be specified, but not both") - //cmd + // cmd e = o.Run(cmd, f) require.NoError(t, e) diff --git a/pkg/cmd/server/plugin/plugin.go b/pkg/cmd/server/plugin/plugin.go index 3fa174d81e..0f4f67e434 100644 --- a/pkg/cmd/server/plugin/plugin.go +++ b/pkg/cmd/server/plugin/plugin.go @@ -348,6 +348,7 @@ func newChangeImageNameRestoreItemAction(f client.Factory) plugincommon.HandlerI ), nil } } + func newRoleBindingItemAction(logger logrus.FieldLogger) (interface{}, error) { return ria.NewRoleBindingAction(logger), nil } diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index d9f6960999..1664eab79b 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -89,7 +89,7 @@ import ( func NewCommand(f client.Factory) *cobra.Command { config := config.GetDefaultConfig() - var command = &cobra.Command{ + command := &cobra.Command{ Use: "server", Short: "Run the velero server", Long: "Run the velero server", @@ -1017,7 +1017,6 @@ func markDataUploadsCancel(ctx context.Context, client ctrlclient.Client, backup return true }) - if err != nil { log.WithError(errors.WithStack(err)).Errorf("failed to mark dataupload %q cancel", du.GetName()) continue @@ -1058,7 +1057,6 @@ func markDataDownloadsCancel(ctx context.Context, client ctrlclient.Client, rest return true }) - if err != nil { log.WithError(errors.WithStack(err)).Errorf("failed to mark datadownload %q cancel", dd.GetName()) continue diff --git a/pkg/cmd/test/const.go b/pkg/cmd/test/const.go index 0c07fe0fa4..8cc0f8ab78 100644 --- a/pkg/cmd/test/const.go +++ b/pkg/cmd/test/const.go @@ -1,4 +1,6 @@ package test -var VeleroNameSpace string = "velero-test" -var CaptureFlag string = "CAPTRUE-OUTPUT" +var ( + VeleroNameSpace string = "velero-test" + CaptureFlag string = "CAPTRUE-OUTPUT" +) diff --git a/pkg/cmd/util/downloadrequest/downloadrequest.go b/pkg/cmd/util/downloadrequest/downloadrequest.go index 7a85920fa8..cf06379cf0 100644 --- a/pkg/cmd/util/downloadrequest/downloadrequest.go +++ b/pkg/cmd/util/downloadrequest/downloadrequest.go @@ -38,8 +38,10 @@ import ( // ErrNotFound is exported for external packages to check for when a file is // not found -var ErrNotFound = errors.New("file not found") -var ErrDownloadRequestDownloadURLTimeout = errors.New("download request download url timeout, check velero server logs for errors. backup storage location may not be available") +var ( + ErrNotFound = errors.New("file not found") + ErrDownloadRequestDownloadURLTimeout = errors.New("download request download url timeout, check velero server logs for errors. backup storage location may not be available") +) func Stream( ctx context.Context, diff --git a/pkg/cmd/util/output/backup_describer.go b/pkg/cmd/util/output/backup_describer.go index b81bad002f..2a7bc4639b 100644 --- a/pkg/cmd/util/output/backup_describer.go +++ b/pkg/cmd/util/output/backup_describer.go @@ -312,7 +312,8 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) { // DescribeBackupStatus describes a backup status in human-readable format. func DescribeBackupStatus(ctx context.Context, kbClient kbclient.Client, d *Describer, backup *velerov1api.Backup, details bool, - insecureSkipTLSVerify bool, caCertPath string, podVolumeBackups []velerov1api.PodVolumeBackup) { + insecureSkipTLSVerify bool, caCertPath string, podVolumeBackups []velerov1api.PodVolumeBackup, +) { status := backup.Status // Status.Version has been deprecated, use Status.FormatVersion diff --git a/pkg/cmd/util/output/backup_printer.go b/pkg/cmd/util/output/backup_printer.go index bae077a29a..37b7fdc336 100644 --- a/pkg/cmd/util/output/backup_printer.go +++ b/pkg/cmd/util/output/backup_printer.go @@ -29,20 +29,18 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" ) -var ( - backupColumns = []metav1.TableColumnDefinition{ - // name needs Type and Format defined for the decorator to identify it: - // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 - {Name: "Name", Type: "string", Format: "name"}, - {Name: "Status"}, - {Name: "Errors"}, - {Name: "Warnings"}, - {Name: "Created"}, - {Name: "Expires"}, - {Name: "Storage Location"}, - {Name: "Selector"}, - } -) +var backupColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Status"}, + {Name: "Errors"}, + {Name: "Warnings"}, + {Name: "Created"}, + {Name: "Expires"}, + {Name: "Storage Location"}, + {Name: "Selector"}, +} func printBackupList(list *velerov1api.BackupList) []metav1.TableRow { sortBackupsByPrefixAndTimestamp(list) diff --git a/pkg/cmd/util/output/backup_repo_printer.go b/pkg/cmd/util/output/backup_repo_printer.go index 1e68cb9499..6bccacd3d8 100644 --- a/pkg/cmd/util/output/backup_repo_printer.go +++ b/pkg/cmd/util/output/backup_repo_printer.go @@ -23,15 +23,13 @@ import ( v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" ) -var ( - backupRepoColumns = []metav1.TableColumnDefinition{ - // name needs Type and Format defined for the decorator to identify it: - // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 - {Name: "Name", Type: "string", Format: "name"}, - {Name: "Status"}, - {Name: "Last Maintenance"}, - } -) +var backupRepoColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Status"}, + {Name: "Last Maintenance"}, +} func printBackupRepoList(list *v1.BackupRepositoryList) []metav1.TableRow { rows := make([]metav1.TableRow, 0, len(list.Items)) diff --git a/pkg/cmd/util/output/backup_storage_location_printer.go b/pkg/cmd/util/output/backup_storage_location_printer.go index 90960ca528..2396004cf3 100644 --- a/pkg/cmd/util/output/backup_storage_location_printer.go +++ b/pkg/cmd/util/output/backup_storage_location_printer.go @@ -24,19 +24,17 @@ import ( "github.com/vmware-tanzu/velero/pkg/cmd" ) -var ( - backupStorageLocationColumns = []metav1.TableColumnDefinition{ - // name needs Type and Format defined for the decorator to identify it: - // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 - {Name: "Name", Type: "string", Format: "name"}, - {Name: "Provider"}, - {Name: "Bucket/Prefix"}, - {Name: "Phase"}, - {Name: "Last Validated"}, - {Name: "Access Mode"}, - {Name: "Default"}, - } -) +var backupStorageLocationColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Provider"}, + {Name: "Bucket/Prefix"}, + {Name: "Phase"}, + {Name: "Last Validated"}, + {Name: "Access Mode"}, + {Name: "Default"}, +} func printBackupStorageLocationList(list *velerov1api.BackupStorageLocationList) []metav1.TableRow { rows := make([]metav1.TableRow, 0, len(list.Items)) diff --git a/pkg/cmd/util/output/backup_structured_describer.go b/pkg/cmd/util/output/backup_structured_describer.go index 63bfbbd5ba..e52145389a 100644 --- a/pkg/cmd/util/output/backup_structured_describer.go +++ b/pkg/cmd/util/output/backup_structured_describer.go @@ -223,7 +223,8 @@ func DescribeBackupSpecInSF(d *StructuredDescriber, spec velerov1api.BackupSpec) // DescribeBackupStatusInSF describes a backup status in structured format. func DescribeBackupStatusInSF(ctx context.Context, kbClient kbclient.Client, d *StructuredDescriber, backup *velerov1api.Backup, details bool, - insecureSkipTLSVerify bool, caCertPath string, podVolumeBackups []velerov1api.PodVolumeBackup) { + insecureSkipTLSVerify bool, caCertPath string, podVolumeBackups []velerov1api.PodVolumeBackup, +) { status := backup.Status backupStatusInfo := make(map[string]interface{}) @@ -299,7 +300,8 @@ func describeBackupResourceListInSF(ctx context.Context, kbClient kbclient.Clien } func describeBackupVolumesInSF(ctx context.Context, kbClient kbclient.Client, backup *velerov1api.Backup, details bool, - insecureSkipTLSVerify bool, caCertPath string, podVolumeBackupCRs []velerov1api.PodVolumeBackup, backupStatusInfo map[string]interface{}) { + insecureSkipTLSVerify bool, caCertPath string, podVolumeBackupCRs []velerov1api.PodVolumeBackup, backupStatusInfo map[string]interface{}, +) { backupVolumes := make(map[string]interface{}) nativeSnapshots := []*volume.BackupVolumeInfo{} diff --git a/pkg/cmd/util/output/output_test.go b/pkg/cmd/util/output/output_test.go index ba9c278435..4946cf5658 100644 --- a/pkg/cmd/util/output/output_test.go +++ b/pkg/cmd/util/output/output_test.go @@ -1,13 +1,13 @@ package output import ( + "testing" + "github.com/spf13/cobra" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/runtime" velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - - "testing" ) func TestBindFlags(t *testing.T) { diff --git a/pkg/cmd/util/output/plugin_printer.go b/pkg/cmd/util/output/plugin_printer.go index faa957314f..c5b22f3641 100644 --- a/pkg/cmd/util/output/plugin_printer.go +++ b/pkg/cmd/util/output/plugin_printer.go @@ -24,14 +24,12 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" ) -var ( - pluginColumns = []metav1.TableColumnDefinition{ - // name needs Type and Format defined for the decorator to identify it: - // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 - {Name: "Name", Type: "string", Format: "name"}, - {Name: "Kind"}, - } -) +var pluginColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Kind"}, +} func printPluginList(list *velerov1api.ServerStatusRequest) []metav1.TableRow { plugins := list.Status.Plugins diff --git a/pkg/cmd/util/output/restore_describer_test.go b/pkg/cmd/util/output/restore_describer_test.go index 94e15c61ca..beffd2d9af 100644 --- a/pkg/cmd/util/output/restore_describer_test.go +++ b/pkg/cmd/util/output/restore_describer_test.go @@ -185,6 +185,7 @@ func TestDescribePodVolumeRestores(t *testing.T) { }) } } + func TestDescribeUploaderConfigForRestore(t *testing.T) { cases := []struct { name string diff --git a/pkg/cmd/util/output/restore_printer.go b/pkg/cmd/util/output/restore_printer.go index 782eb3485e..03237c66bd 100644 --- a/pkg/cmd/util/output/restore_printer.go +++ b/pkg/cmd/util/output/restore_printer.go @@ -23,21 +23,19 @@ import ( v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" ) -var ( - restoreColumns = []metav1.TableColumnDefinition{ - // name needs Type and Format defined for the decorator to identify it: - // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 - {Name: "Name", Type: "string", Format: "name"}, - {Name: "Backup"}, - {Name: "Status"}, - {Name: "Started"}, - {Name: "Completed"}, - {Name: "Errors"}, - {Name: "Warnings"}, - {Name: "Created"}, - {Name: "Selector"}, - } -) +var restoreColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Backup"}, + {Name: "Status"}, + {Name: "Started"}, + {Name: "Completed"}, + {Name: "Errors"}, + {Name: "Warnings"}, + {Name: "Created"}, + {Name: "Selector"}, +} func printRestoreList(list *v1.RestoreList) []metav1.TableRow { rows := make([]metav1.TableRow, 0, len(list.Items)) diff --git a/pkg/cmd/util/output/schedule_printer.go b/pkg/cmd/util/output/schedule_printer.go index e39ee90692..770884862f 100644 --- a/pkg/cmd/util/output/schedule_printer.go +++ b/pkg/cmd/util/output/schedule_printer.go @@ -25,20 +25,18 @@ import ( v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" ) -var ( - scheduleColumns = []metav1.TableColumnDefinition{ - // name needs Type and Format defined for the decorator to identify it: - // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 - {Name: "Name", Type: "string", Format: "name"}, - {Name: "Status"}, - {Name: "Created"}, - {Name: "Schedule"}, - {Name: "Backup TTL"}, - {Name: "Last Backup"}, - {Name: "Selector"}, - {Name: "Paused"}, - } -) +var scheduleColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Status"}, + {Name: "Created"}, + {Name: "Schedule"}, + {Name: "Backup TTL"}, + {Name: "Last Backup"}, + {Name: "Selector"}, + {Name: "Paused"}, +} func printScheduleList(list *v1.ScheduleList) []metav1.TableRow { rows := make([]metav1.TableRow, 0, len(list.Items)) diff --git a/pkg/cmd/util/output/volume_snapshot_location_printer.go b/pkg/cmd/util/output/volume_snapshot_location_printer.go index d6168818c4..d8d37b2d38 100644 --- a/pkg/cmd/util/output/volume_snapshot_location_printer.go +++ b/pkg/cmd/util/output/volume_snapshot_location_printer.go @@ -23,14 +23,12 @@ import ( v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" ) -var ( - volumeSnapshotLocationColumns = []metav1.TableColumnDefinition{ - // name needs Type and Format defined for the decorator to identify it: - // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 - {Name: "Name", Type: "string", Format: "name"}, - {Name: "Provider"}, - } -) +var volumeSnapshotLocationColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Provider"}, +} func printVolumeSnapshotLocationList(list *v1.VolumeSnapshotLocationList) []metav1.TableRow { rows := make([]metav1.TableRow, 0, len(list.Items)) diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index 44cbb6dc52..6c056c7fd7 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -76,7 +76,8 @@ func (b *fakeBackupper) Backup(logger logrus.FieldLogger, backup *pkgbackup.Requ func (b *fakeBackupper) BackupWithResolvers(logger logrus.FieldLogger, backup *pkgbackup.Request, backupFile io.Writer, backupItemActionResolver framework.BackupItemActionResolverV2, itemBlockActionResolver framework.ItemBlockActionResolver, - volumeSnapshotterGetter pkgbackup.VolumeSnapshotterGetter) error { + volumeSnapshotterGetter pkgbackup.VolumeSnapshotterGetter, +) error { args := b.Called(logger, backup, backupFile, backupItemActionResolver, volumeSnapshotterGetter) return args.Error(0) } @@ -129,9 +130,7 @@ func TestProcessBackupNonProcessedItems(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { formatFlag := logging.FormatText - var ( - logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) - ) + logger := logging.DefaultLogger(logrus.DebugLevel, formatFlag) c := &backupReconciler{ kbClient: velerotest.NewFakeControllerRuntimeClient(t), @@ -187,8 +186,12 @@ func TestProcessBackupValidationFailures(t *testing.T) { }, { name: "labelSelector as well as orLabelSelectors both are specified in backup request fails validation", - backup: defaultBackup().LabelSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}).OrLabelSelector([]*metav1.LabelSelector{{MatchLabels: map[string]string{"a1": "b1"}}, {MatchLabels: map[string]string{"a2": "b2"}}, - {MatchLabels: map[string]string{"a3": "b3"}}, {MatchLabels: map[string]string{"a4": "b4"}}}).Result(), + backup: defaultBackup().LabelSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}).OrLabelSelector([]*metav1.LabelSelector{ + {MatchLabels: map[string]string{"a1": "b1"}}, + {MatchLabels: map[string]string{"a2": "b2"}}, + {MatchLabels: map[string]string{"a3": "b3"}}, + {MatchLabels: map[string]string{"a4": "b4"}}, + }).Result(), backupLocation: defaultBackupLocation, expectedErrs: []string{"encountered labelSelector as well as orLabelSelectors in backup spec, only one can be specified"}, }, @@ -203,9 +206,7 @@ func TestProcessBackupValidationFailures(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { formatFlag := logging.FormatText - var ( - logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) - ) + logger := logging.DefaultLogger(logrus.DebugLevel, formatFlag) apiServer := velerotest.NewAPIServer(t) discoveryHelper, err := discovery.NewHelper(apiServer.DiscoveryClient, logger) @@ -407,9 +408,7 @@ func Test_prepareBackupRequest_BackupStorageLocation(t *testing.T) { } func TestDefaultBackupTTL(t *testing.T) { - var ( - defaultBackupTTL = metav1.Duration{Duration: 24 * 30 * time.Hour} - ) + defaultBackupTTL := metav1.Duration{Duration: 24 * 30 * time.Hour} now, err := time.Parse(time.RFC1123Z, time.RFC1123Z) require.NoError(t, err) @@ -1531,9 +1530,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { formatFlag := logging.FormatText - var ( - logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) - ) + logger := logging.DefaultLogger(logrus.DebugLevel, formatFlag) c := &backupReconciler{ logger: logger, diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index 9a09e85d3e..6a2995210f 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -593,7 +593,7 @@ func (r *backupDeletionReconciler) patchDeleteBackupRequestWithError(ctx context } func (r *backupDeletionReconciler) patchBackup(ctx context.Context, backup *velerov1api.Backup, mutate func(*velerov1api.Backup)) (*velerov1api.Backup, error) { - //TODO: The patchHelper can't be used here because the `backup/xxx/status` does not exist, until the backup resource is refactored + // TODO: The patchHelper can't be used here because the `backup/xxx/status` does not exist, until the backup resource is refactored // Record original json oldData, err := json.Marshal(backup) @@ -637,7 +637,8 @@ func getSnapshotsInBackup(ctx context.Context, backup *velerov1api.Backup, kbCli } func batchDeleteSnapshots(ctx context.Context, repoEnsurer *repository.Ensurer, repoMgr repomanager.Manager, - directSnapshots map[string][]repotypes.SnapshotIdentifier, backup *velerov1api.Backup, logger logrus.FieldLogger) []error { + directSnapshots map[string][]repotypes.SnapshotIdentifier, backup *velerov1api.Backup, logger logrus.FieldLogger, +) []error { var errs []error for volumeNamespace, snapshots := range directSnapshots { batchForget := []string{} diff --git a/pkg/controller/backup_deletion_controller_test.go b/pkg/controller/backup_deletion_controller_test.go index 9a85a13481..8ef273e384 100644 --- a/pkg/controller/backup_deletion_controller_test.go +++ b/pkg/controller/backup_deletion_controller_test.go @@ -18,21 +18,19 @@ package controller import ( "bytes" + "context" "encoding/json" "errors" "fmt" "io" "reflect" + "strings" + "testing" "time" - "context" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "strings" - "testing" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" corev1api "k8s.io/api/core/v1" @@ -174,19 +172,18 @@ func TestBackupDeletionControllerReconcile(t *testing.T) { } err := td.fakeClient.Create(context.TODO(), existing) require.NoError(t, err) - existing2 := - &velerov1api.DeleteBackupRequest{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: td.req.Namespace, - Name: "bar-2", - Labels: map[string]string{ - velerov1api.BackupNameLabel: "some-other-backup", - }, - }, - Spec: velerov1api.DeleteBackupRequestSpec{ - BackupName: "some-other-backup", + existing2 := &velerov1api.DeleteBackupRequest{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: td.req.Namespace, + Name: "bar-2", + Labels: map[string]string{ + velerov1api.BackupNameLabel: "some-other-backup", }, - } + }, + Spec: velerov1api.DeleteBackupRequestSpec{ + BackupName: "some-other-backup", + }, + } err = td.fakeClient.Create(context.TODO(), existing2) require.NoError(t, err) _, err = td.controller.Reconcile(context.TODO(), td.req) @@ -941,7 +938,6 @@ func TestDeleteMovedSnapshots(t *testing.T) { repoMgr: repomocks.NewManager(t), backupName: "backup-01", snapshots: []*repotypes.SnapshotIdentifier{ - { SnapshotID: "snapshot-1", RepositoryType: "repo-1", diff --git a/pkg/controller/backup_finalizer_controller.go b/pkg/controller/backup_finalizer_controller.go index 1a912b2952..9740a6756b 100644 --- a/pkg/controller/backup_finalizer_controller.go +++ b/pkg/controller/backup_finalizer_controller.go @@ -244,7 +244,8 @@ func (r *backupFinalizerReconciler) SetupWithManager(mgr ctrl.Manager) error { // updateCSIVolumeSnapshotsCompleted calculate the completed VS number according to // the backup's async operation list. func updateCSIVolumeSnapshotsCompleted( - operations []*itemoperation.BackupOperation) int { + operations []*itemoperation.BackupOperation, +) int { completedNum := 0 for index := range operations { diff --git a/pkg/controller/backup_finalizer_controller_test.go b/pkg/controller/backup_finalizer_controller_test.go index 57ef5d50fb..425109e2b1 100644 --- a/pkg/controller/backup_finalizer_controller_test.go +++ b/pkg/controller/backup_finalizer_controller_test.go @@ -61,6 +61,7 @@ func mockBackupFinalizerReconciler(fakeClient kbclient.Client, fakeGlobalClient 10*time.Minute, ), backupper } + func TestBackupFinalizerReconcile(t *testing.T) { fakeClock := testclocks.NewFakeClock(time.Now()) metav1Now := metav1.NewTime(fakeClock.Now()) diff --git a/pkg/controller/backup_operations_controller.go b/pkg/controller/backup_operations_controller.go index 39a577cbeb..ea211f7b32 100644 --- a/pkg/controller/backup_operations_controller.go +++ b/pkg/controller/backup_operations_controller.go @@ -223,7 +223,8 @@ func (c *backupOperationsReconciler) updateBackupAndOperationsJSON( backupStore persistence.BackupStore, operations *itemoperationmap.OperationsForBackup, changes bool, - completionChanges bool) error { + completionChanges bool, +) error { backupScheduleName := backup.GetLabels()[velerov1api.ScheduleNameLabel] if len(operations.ErrsSinceUpdate) > 0 { @@ -288,7 +289,8 @@ func (c *backupOperationsReconciler) updateBackupAndOperationsJSON( func getBackupItemOperationProgress( backup *velerov1api.Backup, pluginManager clientmgmt.Manager, - operationsList []*itemoperation.BackupOperation) (bool, bool, int, int, []string) { + operationsList []*itemoperation.BackupOperation, +) (bool, bool, int, int, []string) { inProgressOperations := false changes := false var errs []string diff --git a/pkg/controller/backup_repository_controller.go b/pkg/controller/backup_repository_controller.go index d41f547796..caf7d56bd7 100644 --- a/pkg/controller/backup_repository_controller.go +++ b/pkg/controller/backup_repository_controller.go @@ -62,7 +62,8 @@ type BackupRepoReconciler struct { } func NewBackupRepoReconciler(namespace string, logger logrus.FieldLogger, client client.Client, - maintenanceFrequency time.Duration, backupRepoConfig string, repositoryManager repomanager.Manager) *BackupRepoReconciler { + maintenanceFrequency time.Duration, backupRepoConfig string, repositoryManager repomanager.Manager, +) *BackupRepoReconciler { c := &BackupRepoReconciler{ client, namespace, diff --git a/pkg/controller/backup_storage_location_controller.go b/pkg/controller/backup_storage_location_controller.go index c8973abb69..6c09a1a5ec 100644 --- a/pkg/controller/backup_storage_location_controller.go +++ b/pkg/controller/backup_storage_location_controller.go @@ -64,7 +64,8 @@ func NewBackupStorageLocationReconciler( defaultBackupLocationInfo storage.DefaultBackupLocationInfo, newPluginManager func(logrus.FieldLogger) clientmgmt.Manager, backupStoreGetter persistence.ObjectBackupStoreGetter, - log logrus.FieldLogger) *backupStorageLocationReconciler { + log logrus.FieldLogger, +) *backupStorageLocationReconciler { return &backupStorageLocationReconciler{ ctx: ctx, client: client, diff --git a/pkg/controller/backup_sync_controller.go b/pkg/controller/backup_sync_controller.go index c9fb1ba665..4b538c2846 100644 --- a/pkg/controller/backup_sync_controller.go +++ b/pkg/controller/backup_sync_controller.go @@ -67,7 +67,8 @@ func NewBackupSyncReconciler( defaultBackupSyncPeriod time.Duration, newPluginManager func(logrus.FieldLogger) clientmgmt.Manager, backupStoreGetter persistence.ObjectBackupStoreGetter, - logger logrus.FieldLogger) *backupSyncReconciler { + logger logrus.FieldLogger, +) *backupSyncReconciler { return &backupSyncReconciler{ client: client, namespace: namespace, @@ -185,7 +186,7 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request) } backup.Labels[velerov1api.StorageLocationLabel] = label.GetValidName(backup.Spec.StorageLocation) - //check for the ownership references. If they do not exist, remove them. + // check for the ownership references. If they do not exist, remove them. backup.ObjectMeta.OwnerReferences = b.filterBackupOwnerReferences(ctx, backup, log) // attempt to create backup custom resource via API diff --git a/pkg/controller/backup_sync_controller_test.go b/pkg/controller/backup_sync_controller_test.go index 1bba441343..69da56be7e 100644 --- a/pkg/controller/backup_sync_controller_test.go +++ b/pkg/controller/backup_sync_controller_test.go @@ -460,7 +460,8 @@ var _ = Describe("Backup Sync Reconciler", func() { context.TODO(), types.NamespacedName{ Namespace: cloudBackupData.backup.Namespace, - Name: cloudBackupData.backup.Name}, + Name: cloudBackupData.backup.Name, + }, obj) if cloudBackupData.backupShouldSkipSync && (cloudBackupData.backup.Status.Expiration == nil || @@ -725,11 +726,9 @@ var _ = Describe("Backup Sync Reconciler", func() { locationList.Items = testObjList.(*velerov1api.BackupStorageLocationList).Items[1:] testObjList = backupSyncSourceOrderFunc(locationList) Expect(testObjList).To(BeEquivalentTo(locationList)) - }) When("testing validateOwnerReferences", func() { - testCases := []struct { name string backup *velerov1api.Backup @@ -886,7 +885,7 @@ var _ = Describe("Backup Sync Reconciler", func() { client: ctrlfake.NewClientBuilder().Build(), } - //create all required schedules as needed. + // create all required schedules as needed. for _, creatable := range test.toCreate { err := b.client.Create(context.Background(), creatable) Expect(err).ShouldNot(HaveOccurred()) diff --git a/pkg/controller/data_download_controller.go b/pkg/controller/data_download_controller.go index 3d5b2965a7..a4deb7d1e8 100644 --- a/pkg/controller/data_download_controller.go +++ b/pkg/controller/data_download_controller.go @@ -72,7 +72,8 @@ type DataDownloadReconciler struct { func NewDataDownloadReconciler(client client.Client, mgr manager.Manager, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager, restorePVCConfig nodeagent.RestorePVC, podResources v1.ResourceRequirements, nodeName string, preparingTimeout time.Duration, - logger logrus.FieldLogger, metrics *metrics.ServerMetrics) *DataDownloadReconciler { + logger logrus.FieldLogger, metrics *metrics.ServerMetrics, +) *DataDownloadReconciler { return &DataDownloadReconciler{ client: client, kubeClient: kubeClient, @@ -586,7 +587,6 @@ func (r *DataDownloadReconciler) findSnapshotRestoreForPod(ctx context.Context, return true }) - if err != nil { log.WithError(err).Warn("failed to cancel datadownload, and it will wait for prepare timeout") return []reconcile.Request{} @@ -648,7 +648,6 @@ func (r *DataDownloadReconciler) acceptDataDownload(ctx context.Context, dd *vel } succeeded, err := r.exclusiveUpdateDataDownload(ctx, updated, updateFunc) - if err != nil { return false, err } @@ -671,7 +670,6 @@ func (r *DataDownloadReconciler) onPrepareTimeout(ctx context.Context, dd *veler dd.Status.Phase = velerov2alpha1api.DataDownloadPhaseFailed dd.Status.Message = "timeout on preparing data download" }) - if err != nil { log.WithError(err).Warn("Failed to update datadownload") return @@ -695,7 +693,8 @@ func (r *DataDownloadReconciler) onPrepareTimeout(ctx context.Context, dd *veler } func (r *DataDownloadReconciler) exclusiveUpdateDataDownload(ctx context.Context, dd *velerov2alpha1api.DataDownload, - updateFunc func(*velerov2alpha1api.DataDownload)) (bool, error) { + updateFunc func(*velerov2alpha1api.DataDownload), +) (bool, error) { updateFunc(dd) err := r.client.Update(ctx, dd) @@ -777,7 +776,6 @@ func findDataDownloadByPod(client client.Client, pod v1.Pod) (*velerov2alpha1api Namespace: pod.Namespace, Name: label, }, dd) - if err != nil { return nil, errors.Wrapf(err, "error to find DataDownload by pod %s/%s", pod.Namespace, pod.Name) } diff --git a/pkg/controller/data_download_controller_test.go b/pkg/controller/data_download_controller_test.go index 046103771e..b8bd2b317e 100644 --- a/pkg/controller/data_download_controller_test.go +++ b/pkg/controller/data_download_controller_test.go @@ -423,7 +423,8 @@ func TestDataDownloadReconcile(t *testing.T) { } datapath.MicroServiceBRWatcherCreator = func(kbclient.Client, kubernetes.Interface, manager.Manager, string, string, - string, string, string, string, datapath.Callbacks, logrus.FieldLogger) datapath.AsyncBR { + string, string, string, string, datapath.Callbacks, logrus.FieldLogger, + ) datapath.AsyncBR { asyncBR := datapathmockes.NewAsyncBR(t) if test.mockInit { asyncBR.On("Init", mock.Anything, mock.Anything).Return(test.mockInitErr) @@ -721,7 +722,8 @@ func TestFindDataDownloadForPod(t *testing.T) { assert.Equal(t, du.Namespace, requests[0].Namespace) assert.Equal(t, du.Name, requests[0].Name) }, - }, { + }, + { name: "no selected label found for pod", du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Result(), pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Result(), @@ -729,7 +731,8 @@ func TestFindDataDownloadForPod(t *testing.T) { // Assert that the function returns a single request assert.Empty(t, requests) }, - }, { + }, + { name: "no matched pod", du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Result(), pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Labels(map[string]string{velerov1api.DataDownloadLabel: "non-existing-datadownload"}).Result(), @@ -992,7 +995,8 @@ func (dt *ddResumeTestHelper) RebindVolume(context.Context, corev1.ObjectReferen func (dt *ddResumeTestHelper) CleanUp(context.Context, corev1.ObjectReference) {} func (dt *ddResumeTestHelper) newMicroServiceBRWatcher(kbclient.Client, kubernetes.Interface, manager.Manager, string, string, string, string, string, string, - datapath.Callbacks, logrus.FieldLogger) datapath.AsyncBR { + datapath.Callbacks, logrus.FieldLogger, +) datapath.AsyncBR { return dt.asyncBR } diff --git a/pkg/controller/data_upload_controller.go b/pkg/controller/data_upload_controller.go index 3185373c14..b8ca0e48be 100644 --- a/pkg/controller/data_upload_controller.go +++ b/pkg/controller/data_upload_controller.go @@ -639,7 +639,6 @@ func (r *DataUploadReconciler) findDataUploadForPod(ctx context.Context, podObj return true }) - if err != nil { log.WithError(err).Warn("failed to cancel dataupload, and it will wait for prepare timeout") return []reconcile.Request{} @@ -712,7 +711,6 @@ func (r *DataUploadReconciler) acceptDataUpload(ctx context.Context, du *velerov } succeeded, err := r.exclusiveUpdateDataUpload(ctx, updated, updateFunc) - if err != nil { return false, err } @@ -736,7 +734,6 @@ func (r *DataUploadReconciler) onPrepareTimeout(ctx context.Context, du *velerov du.Status.Phase = velerov2alpha1api.DataUploadPhaseFailed du.Status.Message = "timeout on preparing data upload" }) - if err != nil { log.WithError(err).Warn("Failed to update dataupload") return @@ -771,7 +768,8 @@ func (r *DataUploadReconciler) onPrepareTimeout(ctx context.Context, du *velerov } func (r *DataUploadReconciler) exclusiveUpdateDataUpload(ctx context.Context, du *velerov2alpha1api.DataUpload, - updateFunc func(*velerov2alpha1api.DataUpload)) (bool, error) { + updateFunc func(*velerov2alpha1api.DataUpload), +) (bool, error) { updateFunc(du) err := r.client.Update(ctx, du) @@ -805,7 +803,6 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload Namespace: du.Spec.SourceNamespace, Name: du.Spec.SourcePVC, }, pvc) - if err != nil { return nil, errors.Wrapf(err, "failed to get PVC %s/%s", du.Spec.SourceNamespace, du.Spec.SourcePVC) } @@ -881,7 +878,6 @@ func findDataUploadByPod(client client.Client, pod corev1.Pod) (*velerov2alpha1a Namespace: pod.Namespace, Name: label, }, du) - if err != nil { return nil, errors.Wrapf(err, "error to find DataUpload by pod %s/%s", pod.Namespace, pod.Name) } diff --git a/pkg/controller/data_upload_controller_test.go b/pkg/controller/data_upload_controller_test.go index 0a02108692..2c2133dcb1 100644 --- a/pkg/controller/data_upload_controller_test.go +++ b/pkg/controller/data_upload_controller_test.go @@ -826,7 +826,8 @@ func TestFindDataUploadForPod(t *testing.T) { assert.Equal(t, du.Namespace, requests[0].Namespace) assert.Equal(t, du.Name, requests[0].Name) }, - }, { + }, + { name: "no selected label found for pod", du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(), pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Result(), @@ -834,7 +835,8 @@ func TestFindDataUploadForPod(t *testing.T) { // Assert that the function returns a single request assert.Empty(t, requests) }, - }, { + }, + { name: "no matched pod", du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(), pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Labels(map[string]string{velerov1api.DataUploadLabel: "non-existing-dataupload"}).Result(), @@ -1106,7 +1108,8 @@ func (dt *duResumeTestHelper) DiagnoseExpose(context.Context, corev1.ObjectRefer func (dt *duResumeTestHelper) CleanUp(context.Context, corev1.ObjectReference, string, string) {} func (dt *duResumeTestHelper) newMicroServiceBRWatcher(kbclient.Client, kubernetes.Interface, manager.Manager, string, string, string, string, string, string, - datapath.Callbacks, logrus.FieldLogger) datapath.AsyncBR { + datapath.Callbacks, logrus.FieldLogger, +) datapath.AsyncBR { return dt.asyncBR } diff --git a/pkg/controller/pod_volume_backup_controller.go b/pkg/controller/pod_volume_backup_controller.go index dad1593032..e4527b7283 100644 --- a/pkg/controller/pod_volume_backup_controller.go +++ b/pkg/controller/pod_volume_backup_controller.go @@ -49,7 +49,8 @@ const pVBRRequestor string = "pod-volume-backup-restore" // NewPodVolumeBackupReconciler creates the PodVolumeBackupReconciler instance func NewPodVolumeBackupReconciler(client client.Client, dataPathMgr *datapath.Manager, ensurer *repository.Ensurer, credentialGetter *credentials.CredentialGetter, - nodeName string, scheme *runtime.Scheme, metrics *metrics.ServerMetrics, logger logrus.FieldLogger) *PodVolumeBackupReconciler { + nodeName string, scheme *runtime.Scheme, metrics *metrics.ServerMetrics, logger logrus.FieldLogger, +) *PodVolumeBackupReconciler { return &PodVolumeBackupReconciler{ Client: client, logger: logger.WithField("controller", "PodVolumeBackup"), @@ -125,7 +126,6 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ } fsBackup, err := r.dataPathMgr.CreateFileSystemBR(pvb.Name, pVBRRequestor, ctx, r.Client, pvb.Namespace, callbacks, log) - if err != nil { if err == datapath.ConcurrentLimitExceed { return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 5}, nil diff --git a/pkg/controller/pod_volume_restore_controller.go b/pkg/controller/pod_volume_restore_controller.go index f4645657f2..3b02fa7d5a 100644 --- a/pkg/controller/pod_volume_restore_controller.go +++ b/pkg/controller/pod_volume_restore_controller.go @@ -50,7 +50,8 @@ import ( ) func NewPodVolumeRestoreReconciler(client client.Client, dataPathMgr *datapath.Manager, ensurer *repository.Ensurer, - credentialGetter *credentials.CredentialGetter, logger logrus.FieldLogger) *PodVolumeRestoreReconciler { + credentialGetter *credentials.CredentialGetter, logger logrus.FieldLogger, +) *PodVolumeRestoreReconciler { return &PodVolumeRestoreReconciler{ Client: client, logger: logger.WithField("controller", "PodVolumeRestore"), @@ -304,7 +305,7 @@ func (c *PodVolumeRestoreReconciler) OnDataPathCompleted(ctx context.Context, na // Create the .velero directory within the volume dir so we can write a done file // for this restore. - if err := os.MkdirAll(filepath.Join(volumePath, ".velero"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(volumePath, ".velero"), 0o755); err != nil { _, _ = c.errorOut(ctx, &pvr, err, "error creating .velero directory for done file", log) return } @@ -312,7 +313,7 @@ func (c *PodVolumeRestoreReconciler) OnDataPathCompleted(ctx context.Context, na // Write a done file with name= into the just-created .velero dir // within the volume. The velero init container on the pod is waiting // for this file to exist in each restored volume before completing. - if err := os.WriteFile(filepath.Join(volumePath, ".velero", string(restoreUID)), nil, 0644); err != nil { //nolint:gosec // Internal usage. No need to check. + if err := os.WriteFile(filepath.Join(volumePath, ".velero", string(restoreUID)), nil, 0o644); err != nil { //nolint:gosec // Internal usage. No need to check. _, _ = c.errorOut(ctx, &pvr, err, "error writing done file", log) return } diff --git a/pkg/controller/restore_controller_test.go b/pkg/controller/restore_controller_test.go index fe30bab504..befa642274 100644 --- a/pkg/controller/restore_controller_test.go +++ b/pkg/controller/restore_controller_test.go @@ -579,7 +579,7 @@ func TestRestoreReconcile(t *testing.T) { backupStore.On("DeleteRestore", test.restore.Name).Return(nil) } - //err = r.processQueueItem(key) + // err = r.processQueueItem(key) _, err = r.Reconcile(context.Background(), ctrl.Request{NamespacedName: types.NamespacedName{ Namespace: test.restore.Namespace, Name: test.restore.Name, diff --git a/pkg/controller/restore_finalizer_controller.go b/pkg/controller/restore_finalizer_controller.go index 2caec17880..3fd5b4e695 100644 --- a/pkg/controller/restore_finalizer_controller.go +++ b/pkg/controller/restore_finalizer_controller.go @@ -372,7 +372,6 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result scName := *pvc.Spec.StorageClassName sc := &storagev1api.StorageClass{} err = ctx.crClient.Get(context.Background(), client.ObjectKey{Name: scName}, sc) - if err != nil { errs.Add(restoredNamespace, err) return false, err @@ -426,7 +425,6 @@ func (ctx *finalizerContext) patchDynamicPVWithVolumeInfo() (errs results.Result return true, nil }) - if err != nil { err = fmt.Errorf("fail to patch dynamic PV, err: %s, PVC: %s, PV: %s", err, volInfo.PVCName, volInfo.PVName) ctx.logger.WithError(errors.WithStack((err))).Error("err patching dynamic PV using volume info") diff --git a/pkg/controller/restore_finalizer_controller_test.go b/pkg/controller/restore_finalizer_controller_test.go index 518b824241..e550f155ad 100644 --- a/pkg/controller/restore_finalizer_controller_test.go +++ b/pkg/controller/restore_finalizer_controller_test.go @@ -259,7 +259,8 @@ func TestPatchDynamicPVWithVolumeInfo(t *testing.T) { restore: builder.ForRestore(velerov1api.DefaultNamespace, "restore").Result(), restoredPVCNames: map[string]struct{}{"ns1/pvc1": {}}, restoredPV: []*corev1api.PersistentVolume{ - builder.ForPersistentVolume("new-pv1").ObjectMeta(builder.WithLabels("label1", "label1-val")).ClaimRef("ns1", "pvc1").Phase(corev1api.VolumeBound).ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).Result()}, + builder.ForPersistentVolume("new-pv1").ObjectMeta(builder.WithLabels("label1", "label1-val")).ClaimRef("ns1", "pvc1").Phase(corev1api.VolumeBound).ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).Result(), + }, restoredPVC: []*corev1api.PersistentVolumeClaim{ builder.ForPersistentVolumeClaim("ns1", "pvc1").VolumeName("new-pv1").Phase(corev1api.ClaimBound).Result(), }, @@ -281,7 +282,8 @@ func TestPatchDynamicPVWithVolumeInfo(t *testing.T) { restore: builder.ForRestore(velerov1api.DefaultNamespace, "restore").Result(), restoredPVCNames: map[string]struct{}{"ns1/pvc1": {}}, restoredPV: []*corev1api.PersistentVolume{ - builder.ForPersistentVolume("new-pv1").ClaimRef("ns1", "pvc1").Phase(corev1api.VolumeBound).ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result()}, + builder.ForPersistentVolume("new-pv1").ClaimRef("ns1", "pvc1").Phase(corev1api.VolumeBound).ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result(), + }, restoredPVC: []*corev1api.PersistentVolumeClaim{ builder.ForPersistentVolumeClaim("ns1", "pvc1").VolumeName("new-pv1").Phase(corev1api.ClaimBound).Result(), }, @@ -306,7 +308,8 @@ func TestPatchDynamicPVWithVolumeInfo(t *testing.T) { restore: builder.ForRestore(velerov1api.DefaultNamespace, "restore").NamespaceMappings("ns2", "ns1").Result(), restoredPVCNames: map[string]struct{}{"ns1/pvc1": {}}, restoredPV: []*corev1api.PersistentVolume{ - builder.ForPersistentVolume("new-pv1").ClaimRef("ns1", "pvc1").Phase(corev1api.VolumeBound).ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result()}, + builder.ForPersistentVolume("new-pv1").ClaimRef("ns1", "pvc1").Phase(corev1api.VolumeBound).ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result(), + }, restoredPVC: []*corev1api.PersistentVolumeClaim{ builder.ForPersistentVolumeClaim("ns1", "pvc1").VolumeName("new-pv1").Phase(corev1api.ClaimBound).Result(), }, @@ -318,16 +321,17 @@ func TestPatchDynamicPVWithVolumeInfo(t *testing.T) { }, { name: "two applicable pv patches", - volumeInfo: []*volume.BackupVolumeInfo{{ - BackupMethod: "PodVolumeBackup", - PVCName: "pvc1", - PVName: "pv1", - PVCNamespace: "ns1", - PVInfo: &volume.PVInfo{ - ReclaimPolicy: string(corev1api.PersistentVolumeReclaimDelete), - Labels: map[string]string{"label1": "label1-val"}, + volumeInfo: []*volume.BackupVolumeInfo{ + { + BackupMethod: "PodVolumeBackup", + PVCName: "pvc1", + PVName: "pv1", + PVCNamespace: "ns1", + PVInfo: &volume.PVInfo{ + ReclaimPolicy: string(corev1api.PersistentVolumeReclaimDelete), + Labels: map[string]string{"label1": "label1-val"}, + }, }, - }, { BackupMethod: "CSISnapshot", PVCName: "pvc2", @@ -379,7 +383,8 @@ func TestPatchDynamicPVWithVolumeInfo(t *testing.T) { restore: builder.ForRestore(velerov1api.DefaultNamespace, "restore").Result(), restoredPVCNames: map[string]struct{}{"ns1/pvc1": {}}, restoredPV: []*corev1api.PersistentVolume{ - builder.ForPersistentVolume("new-pv1").ClaimRef("ns2", "pvc2").Phase(corev1api.VolumeBound).ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result()}, + builder.ForPersistentVolume("new-pv1").ClaimRef("ns2", "pvc2").Phase(corev1api.VolumeBound).ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result(), + }, restoredPVC: []*corev1api.PersistentVolumeClaim{ builder.ForPersistentVolumeClaim("ns1", "pvc1").VolumeName("new-pv1").Phase(corev1api.ClaimBound).Result(), }, @@ -387,16 +392,17 @@ func TestPatchDynamicPVWithVolumeInfo(t *testing.T) { }, { name: "two applicable pv patches with an error", - volumeInfo: []*volume.BackupVolumeInfo{{ - BackupMethod: "PodVolumeBackup", - PVCName: "pvc1", - PVName: "pv1", - PVCNamespace: "ns1", - PVInfo: &volume.PVInfo{ - ReclaimPolicy: string(corev1api.PersistentVolumeReclaimDelete), - Labels: map[string]string{"label1": "label1-val"}, + volumeInfo: []*volume.BackupVolumeInfo{ + { + BackupMethod: "PodVolumeBackup", + PVCName: "pvc1", + PVName: "pv1", + PVCNamespace: "ns1", + PVInfo: &volume.PVInfo{ + ReclaimPolicy: string(corev1api.PersistentVolumeReclaimDelete), + Labels: map[string]string{"label1": "label1-val"}, + }, }, - }, { BackupMethod: "CSISnapshot", PVCName: "pvc2", diff --git a/pkg/controller/restore_operations_controller.go b/pkg/controller/restore_operations_controller.go index 5f9b2ee565..7caab985e1 100644 --- a/pkg/controller/restore_operations_controller.go +++ b/pkg/controller/restore_operations_controller.go @@ -207,7 +207,8 @@ func (r *restoreOperationsReconciler) updateRestoreAndOperationsJSON( backupStore persistence.BackupStore, operations *itemoperationmap.OperationsForRestore, changes bool, - completionChanges bool) error { + completionChanges bool, +) error { if len(operations.ErrsSinceUpdate) > 0 { // FIXME: download/upload results r.logger.WithField("restore", restore.Name).Infof("Restore has %d errors", len(operations.ErrsSinceUpdate)) @@ -254,7 +255,8 @@ func (r *restoreOperationsReconciler) updateRestoreAndOperationsJSON( func getRestoreItemOperationProgress( restore *velerov1api.Restore, pluginManager clientmgmt.Manager, - operationsList []*itemoperation.RestoreOperation) (bool, bool, int, int, []string) { + operationsList []*itemoperation.RestoreOperation, +) (bool, bool, int, int, []string) { inProgressOperations := false changes := false var errs []string diff --git a/pkg/controller/server_status_request_controller.go b/pkg/controller/server_status_request_controller.go index 3fb1af80bd..08bfce5a79 100644 --- a/pkg/controller/server_status_request_controller.go +++ b/pkg/controller/server_status_request_controller.go @@ -63,7 +63,8 @@ func NewServerStatusRequestReconciler( client client.Client, pluginRegistry PluginLister, clock clocks.WithTickerAndDelayedExecution, - log logrus.FieldLogger) *serverStatusRequestReconciler { + log logrus.FieldLogger, +) *serverStatusRequestReconciler { return &serverStatusRequestReconciler{ client: client, ctx: ctx, diff --git a/pkg/controller/suite_test.go b/pkg/controller/suite_test.go index 1e5e90717e..2bca4e3085 100644 --- a/pkg/controller/suite_test.go +++ b/pkg/controller/suite_test.go @@ -134,8 +134,7 @@ func (t *testEnvironment) stop() error { return env.Stop() } -type fakeErrorBackupStoreGetter struct { -} +type fakeErrorBackupStoreGetter struct{} func (f *fakeErrorBackupStoreGetter) Get(*velerov1api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) { return nil, fmt.Errorf("some error") diff --git a/pkg/datamover/backup_micro_service.go b/pkg/datamover/backup_micro_service.go index de77204a84..afa2313177 100644 --- a/pkg/datamover/backup_micro_service.go +++ b/pkg/datamover/backup_micro_service.go @@ -76,7 +76,8 @@ type dataPathResult struct { func NewBackupMicroService(ctx context.Context, client client.Client, kubeClient kubernetes.Interface, dataUploadName string, namespace string, nodeName string, sourceTargetPath datapath.AccessPoint, dataPathMgr *datapath.Manager, repoEnsurer *repository.Ensurer, cred *credentials.CredentialGetter, - duInformer cache.Informer, log logrus.FieldLogger) *BackupMicroService { + duInformer cache.Informer, log logrus.FieldLogger, +) *BackupMicroService { return &BackupMicroService{ ctx: ctx, client: client, @@ -117,7 +118,6 @@ func (r *BackupMicroService) Init() error { }, }, ) - if err != nil { return errors.Wrap(err, "error adding du handler") } @@ -153,7 +153,6 @@ func (r *BackupMicroService) RunCancelableDataPath(ctx context.Context) (string, return false, nil } }) - if err != nil { log.WithError(err).Error("Failed to wait du") return "", errors.Wrap(err, "error waiting for du") diff --git a/pkg/datamover/restore_micro_service.go b/pkg/datamover/restore_micro_service.go index 1746366c9d..5f27ca4346 100644 --- a/pkg/datamover/restore_micro_service.go +++ b/pkg/datamover/restore_micro_service.go @@ -65,7 +65,8 @@ type RestoreMicroService struct { func NewRestoreMicroService(ctx context.Context, client client.Client, kubeClient kubernetes.Interface, dataDownloadName string, namespace string, nodeName string, sourceTargetPath datapath.AccessPoint, dataPathMgr *datapath.Manager, repoEnsurer *repository.Ensurer, cred *credentials.CredentialGetter, - ddInformer cache.Informer, log logrus.FieldLogger) *RestoreMicroService { + ddInformer cache.Informer, log logrus.FieldLogger, +) *RestoreMicroService { return &RestoreMicroService{ ctx: ctx, client: client, @@ -106,7 +107,6 @@ func (r *RestoreMicroService) Init() error { }, }, ) - if err != nil { return errors.Wrap(err, "error adding dd handler") } diff --git a/pkg/datapath/file_system_test.go b/pkg/datapath/file_system_test.go index fab33df1c0..68e5166e32 100644 --- a/pkg/datapath/file_system_test.go +++ b/pkg/datapath/file_system_test.go @@ -34,7 +34,7 @@ func TestAsyncBackup(t *testing.T) { var asyncErr error var asyncResult Result finish := make(chan struct{}) - var failErr = errors.New("fake-fail-error") + failErr := errors.New("fake-fail-error") tests := []struct { name string uploaderProv provider.Provider @@ -118,7 +118,7 @@ func TestAsyncRestore(t *testing.T) { var asyncErr error var asyncResult Result finish := make(chan struct{}) - var failErr = errors.New("fake-fail-error") + failErr := errors.New("fake-fail-error") tests := []struct { name string uploaderProv provider.Provider diff --git a/pkg/datapath/manager.go b/pkg/datapath/manager.go index 0b790a5cc9..f25941fe5f 100644 --- a/pkg/datapath/manager.go +++ b/pkg/datapath/manager.go @@ -27,9 +27,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" ) -var ConcurrentLimitExceed error = errors.New("Concurrent number exceeds") -var FSBRCreator = newFileSystemBR -var MicroServiceBRWatcherCreator = newMicroServiceBRWatcher +var ( + ConcurrentLimitExceed error = errors.New("Concurrent number exceeds") + FSBRCreator = newFileSystemBR + MicroServiceBRWatcherCreator = newMicroServiceBRWatcher +) type Manager struct { cocurrentNum int @@ -61,7 +63,8 @@ func (m *Manager) CreateFileSystemBR(jobName string, requestorType string, ctx c // CreateMicroServiceBRWatcher creates a new micro service watcher instance func (m *Manager) CreateMicroServiceBRWatcher(ctx context.Context, client client.Client, kubeClient kubernetes.Interface, mgr manager.Manager, taskType string, - taskName string, namespace string, podName string, containerName string, associatedObject string, callbacks Callbacks, resume bool, log logrus.FieldLogger) (AsyncBR, error) { + taskName string, namespace string, podName string, containerName string, associatedObject string, callbacks Callbacks, resume bool, log logrus.FieldLogger, +) (AsyncBR, error) { m.trackerLock.Lock() defer m.trackerLock.Unlock() diff --git a/pkg/datapath/micro_service_watcher.go b/pkg/datapath/micro_service_watcher.go index 8d09275389..744c26a27a 100644 --- a/pkg/datapath/micro_service_watcher.go +++ b/pkg/datapath/micro_service_watcher.go @@ -83,7 +83,8 @@ type microServiceBRWatcher struct { } func newMicroServiceBRWatcher(client client.Client, kubeClient kubernetes.Interface, mgr manager.Manager, taskType string, taskName string, namespace string, - podName string, containerName string, associatedObject string, callbacks Callbacks, log logrus.FieldLogger) AsyncBR { + podName string, containerName string, associatedObject string, callbacks Callbacks, log logrus.FieldLogger, +) AsyncBR { ms := µServiceBRWatcher{ mgr: mgr, client: client, @@ -246,10 +247,12 @@ func (ms *microServiceBRWatcher) reEnsureThisPod(ctx context.Context) error { return nil } -var funcGetPodTerminationMessage = kube.GetPodContainerTerminateMessage -var funcRedirectLog = redirectDataMoverLogs -var funcGetResultFromMessage = getResultFromMessage -var funcGetProgressFromMessage = getProgressFromMessage +var ( + funcGetPodTerminationMessage = kube.GetPodContainerTerminateMessage + funcRedirectLog = redirectDataMoverLogs + funcGetResultFromMessage = getResultFromMessage + funcGetProgressFromMessage = getProgressFromMessage +) var eventWaitTimeout time.Duration = time.Minute @@ -394,8 +397,10 @@ func (ms *microServiceBRWatcher) Cancel() { ms.log.WithField("taskType", ms.taskType).WithField("taskName", ms.taskName).Info("MicroServiceBR is canceled") } -var funcCreateTemp = os.CreateTemp -var funcCollectPodLogs = kube.CollectPodLogs +var ( + funcCreateTemp = os.CreateTemp + funcCollectPodLogs = kube.CollectPodLogs +) func redirectDataMoverLogs(ctx context.Context, kubeClient kubernetes.Interface, namespace string, thisPod string, thisContainer string, logger logrus.FieldLogger) error { logger.Infof("Starting to collect data mover pod log for %s", thisPod) diff --git a/pkg/exposer/generic_restore.go b/pkg/exposer/generic_restore.go index 5dc39e5a90..380274431a 100644 --- a/pkg/exposer/generic_restore.go +++ b/pkg/exposer/generic_restore.go @@ -374,7 +374,8 @@ func (e *genericRestoreExposer) RebindVolume(ctx context.Context, ownerObject co } func (e *genericRestoreExposer) createRestorePod(ctx context.Context, ownerObject corev1.ObjectReference, targetPVC *corev1.PersistentVolumeClaim, - operationTimeout time.Duration, label map[string]string, selectedNode string, resources corev1.ResourceRequirements, nodeType string) (*corev1.Pod, error) { + operationTimeout time.Duration, label map[string]string, selectedNode string, resources corev1.ResourceRequirements, nodeType string, +) (*corev1.Pod, error) { restorePodName := ownerObject.Name restorePVCName := ownerObject.Name diff --git a/pkg/exposer/generic_restore_test.go b/pkg/exposer/generic_restore_test.go index 15f8c1615b..6bb61249d0 100644 --- a/pkg/exposer/generic_restore_test.go +++ b/pkg/exposer/generic_restore_test.go @@ -184,7 +184,8 @@ func TestRestoreExpose(t *testing.T) { TargetNamespace: test.targetNamespace, HostingPodLabels: map[string]string{}, Resources: corev1api.ResourceRequirements{}, - ExposeTimeout: time.Millisecond}) + ExposeTimeout: time.Millisecond, + }) assert.EqualError(t, err, test.err) }) } diff --git a/pkg/exposer/host_path.go b/pkg/exposer/host_path.go index 94dc4503c3..d108d50a13 100644 --- a/pkg/exposer/host_path.go +++ b/pkg/exposer/host_path.go @@ -31,13 +31,16 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/kube" ) -var getVolumeDirectory = kube.GetVolumeDirectory -var getVolumeMode = kube.GetVolumeMode -var singlePathMatch = kube.SinglePathMatch +var ( + getVolumeDirectory = kube.GetVolumeDirectory + getVolumeMode = kube.GetVolumeMode + singlePathMatch = kube.SinglePathMatch +) // GetPodVolumeHostPath returns a path that can be accessed from the host for a given volume of a pod func GetPodVolumeHostPath(ctx context.Context, pod *corev1.Pod, volumeName string, - cli ctrlclient.Client, fs filesystem.Interface, log logrus.FieldLogger) (datapath.AccessPoint, error) { + cli ctrlclient.Client, fs filesystem.Interface, log logrus.FieldLogger, +) (datapath.AccessPoint, error) { logger := log.WithField("pod name", pod.Name).WithField("pod UID", pod.GetUID()).WithField("volume", volumeName) volDir, err := getVolumeDirectory(ctx, logger, pod, volumeName, cli) diff --git a/pkg/exposer/host_path_test.go b/pkg/exposer/host_path_test.go index 1022dffd30..7bb9e42879 100644 --- a/pkg/exposer/host_path_test.go +++ b/pkg/exposer/host_path_test.go @@ -70,7 +70,8 @@ func TestGetPodVolumeHostPath(t *testing.T) { { name: "get block volume dir success", getVolumeDirFunc: func(context.Context, logrus.FieldLogger, *corev1.Pod, string, ctrlclient.Client) ( - string, error) { + string, error, + ) { return "fake-pvc-1", nil }, pathMatchFunc: func(string, filesystem.Interface, logrus.FieldLogger) (string, error) { diff --git a/pkg/install/deployment.go b/pkg/install/deployment.go index 41fe474d0c..1030280314 100644 --- a/pkg/install/deployment.go +++ b/pkg/install/deployment.go @@ -208,6 +208,7 @@ func WithBackupRepoConfigMap(backupRepoConfigMap string) podTemplateOption { c.backupRepoConfigMap = backupRepoConfigMap } } + func WithRepoMaintenanceJobConfigMap(repoMaintenanceJobConfigMap string) podTemplateOption { return func(c *podTemplateConfig) { c.repoMaintenanceJobConfigMap = repoMaintenanceJobConfigMap diff --git a/pkg/install/install.go b/pkg/install/install.go index 391b97ec1b..ab448ffae2 100644 --- a/pkg/install/install.go +++ b/pkg/install/install.go @@ -339,7 +339,7 @@ func CreateClient(r *unstructured.Unstructured, factory client.DynamicFactory, w func Install(dynamicFactory client.DynamicFactory, kbClient kbclient.Client, resources *unstructured.UnstructuredList, w io.Writer) error { rg := GroupResources(resources) - //Install CRDs first + // Install CRDs first for _, r := range rg.CRDResources { if err := createResource(r, dynamicFactory, w); err != nil { return err diff --git a/pkg/itemoperationmap/backup_operation_map.go b/pkg/itemoperationmap/backup_operation_map.go index 47cdcac814..d4c97f4f9d 100644 --- a/pkg/itemoperationmap/backup_operation_map.go +++ b/pkg/itemoperationmap/backup_operation_map.go @@ -40,7 +40,8 @@ func NewBackupItemOperationsMap() *BackupItemOperationsMap { // returns a deep copy so we can minimize the time the map is locked func (m *BackupItemOperationsMap) GetOperationsForBackup( backupStore persistence.BackupStore, - backupName string) (*OperationsForBackup, error) { + backupName string, +) (*OperationsForBackup, error) { var err error // lock operations map m.opsLock.Lock() @@ -59,7 +60,8 @@ func (m *BackupItemOperationsMap) GetOperationsForBackup( func (m *BackupItemOperationsMap) PutOperationsForBackup( operations *OperationsForBackup, - backupName string) { + backupName string, +) { // lock operations map m.opsLock.Lock() defer m.opsLock.Unlock() @@ -80,7 +82,8 @@ func (m *BackupItemOperationsMap) DeleteOperationsForBackup(backupName string) { func (m *BackupItemOperationsMap) UploadProgressAndPutOperationsForBackup( backupStore persistence.BackupStore, operations *OperationsForBackup, - backupName string) error { + backupName string, +) error { m.opsLock.Lock() defer m.opsLock.Unlock() diff --git a/pkg/itemoperationmap/restore_operation_map.go b/pkg/itemoperationmap/restore_operation_map.go index 4256591bc5..5da9e17356 100644 --- a/pkg/itemoperationmap/restore_operation_map.go +++ b/pkg/itemoperationmap/restore_operation_map.go @@ -40,7 +40,8 @@ func NewRestoreItemOperationsMap() *RestoreItemOperationsMap { // returns a deep copy so we can minimize the time the map is locked func (m *RestoreItemOperationsMap) GetOperationsForRestore( backupStore persistence.BackupStore, - restoreName string) (*OperationsForRestore, error) { + restoreName string, +) (*OperationsForRestore, error) { var err error // lock operations map m.opsLock.Lock() @@ -59,7 +60,8 @@ func (m *RestoreItemOperationsMap) GetOperationsForRestore( func (m *RestoreItemOperationsMap) PutOperationsForRestore( operations *OperationsForRestore, - restoreName string) { + restoreName string, +) { // lock operations map m.opsLock.Lock() defer m.opsLock.Unlock() @@ -80,7 +82,8 @@ func (m *RestoreItemOperationsMap) DeleteOperationsForRestore(restoreName string func (m *RestoreItemOperationsMap) UploadProgressAndPutOperationsForRestore( backupStore persistence.BackupStore, operations *OperationsForRestore, - restoreName string) error { + restoreName string, +) error { m.opsLock.Lock() defer m.opsLock.Unlock() diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 7a477e5f0c..5cb7d1773c 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -30,7 +30,7 @@ type ServerMetrics struct { const ( metricNamespace = "velero" podVolumeMetricsNamespace = "podVolume" - //Velero metrics + // Velero metrics backupTarballSizeBytesGauge = "backup_tarball_size_bytes" backupTotal = "backup_total" backupAttemptTotal = "backup_attempt_total" diff --git a/pkg/persistence/object_store.go b/pkg/persistence/object_store.go index 5cdb36b6f0..9c4ae9a7cf 100644 --- a/pkg/persistence/object_store.go +++ b/pkg/persistence/object_store.go @@ -268,7 +268,7 @@ func (s *objectBackupStore) PutBackup(info BackupInfo) error { // Since the logic for all of these files is the exact same except for the name and the contents, // use a map literal to iterate through them and write them to the bucket. - var backupObjs = map[string]io.Reader{ + backupObjs := map[string]io.Reader{ s.layout.getPodVolumeBackupsKey(info.Name): info.PodVolumeBackups, s.layout.getBackupVolumeSnapshotsKey(info.Name): info.VolumeSnapshots, s.layout.getBackupItemOperationsKey(info.Name): info.BackupItemOperations, diff --git a/pkg/persistence/object_store_test.go b/pkg/persistence/object_store_test.go index e8eb3b9a0e..5ed60a9ef6 100644 --- a/pkg/persistence/object_store_test.go +++ b/pkg/persistence/object_store_test.go @@ -1145,6 +1145,7 @@ func TestGetBackupVolumeInfos(t *testing.T) { }) } } + func TestGetRestoreResults(t *testing.T) { harness := newObjectBackupStoreTestHarness("test-bucket", "") diff --git a/pkg/plugin/clientmgmt/process/process.go b/pkg/plugin/clientmgmt/process/process.go index 5ebb4a7a18..34da65800e 100644 --- a/pkg/plugin/clientmgmt/process/process.go +++ b/pkg/plugin/clientmgmt/process/process.go @@ -28,8 +28,7 @@ type Factory interface { newProcess(command string, logger logrus.FieldLogger, logLevel logrus.Level) (Process, error) } -type processFactory struct { -} +type processFactory struct{} func newProcessFactory() Factory { return &processFactory{} diff --git a/pkg/plugin/clientmgmt/process/registry.go b/pkg/plugin/clientmgmt/process/registry.go index 744048690a..49b5281284 100644 --- a/pkg/plugin/clientmgmt/process/registry.go +++ b/pkg/plugin/clientmgmt/process/registry.go @@ -183,7 +183,7 @@ func executableLinux(info os.FileInfo) bool { the result will be 0 if and only if none of the executable bits is set. */ - return (info.Mode() & 0111) != 0 + return (info.Mode() & 0o111) != 0 } // listPlugins executes command, queries it for registered plugins, and returns the list of PluginIdentifiers. diff --git a/pkg/plugin/clientmgmt/process/registry_test.go b/pkg/plugin/clientmgmt/process/registry_test.go index e188419b2f..3e726880c7 100644 --- a/pkg/plugin/clientmgmt/process/registry_test.go +++ b/pkg/plugin/clientmgmt/process/registry_test.go @@ -66,34 +66,34 @@ func TestExecutable(t *testing.T) { }{ { name: "no perms", - mode: 0000, + mode: 0o000, }, { name: "r--r--r--", - mode: 0444, + mode: 0o444, }, { name: "rw-rw-rw-", - mode: 0666, + mode: 0o666, }, { name: "--x------", - mode: 0100, + mode: 0o100, expectExecutable: true, }, { name: "-----x---", - mode: 0010, + mode: 0o010, expectExecutable: true, }, { name: "--------x", - mode: 0001, + mode: 0o001, expectExecutable: true, }, { name: "rwxrwxrwx", - mode: 0777, + mode: 0o777, expectExecutable: true, }, { @@ -134,13 +134,13 @@ func TestReadPluginsDir(t *testing.T) { r := NewRegistry(dir, logger, logLevel).(*registry) r.fs = test.NewFakeFileSystem(). - WithFileAndMode("/plugins/executable1", []byte("plugin1"), 0755). - WithFileAndMode("/plugins/nonexecutable2", []byte("plugin2"), 0644). - WithFileAndMode("/plugins/executable3", []byte("plugin3"), 0755). - WithFileAndMode("/plugins/nested/executable4", []byte("plugin4"), 0755). - WithFileAndMode("/plugins/nested/nonexecutable5", []byte("plugin4"), 0644). - WithFileAndMode("/plugins/nested/win-exe1.exe", []byte("plugin4"), 0600). - WithFileAndMode("/plugins/nested/WIN-EXE2.EXE", []byte("plugin4"), 0600) + WithFileAndMode("/plugins/executable1", []byte("plugin1"), 0o755). + WithFileAndMode("/plugins/nonexecutable2", []byte("plugin2"), 0o644). + WithFileAndMode("/plugins/executable3", []byte("plugin3"), 0o755). + WithFileAndMode("/plugins/nested/executable4", []byte("plugin4"), 0o755). + WithFileAndMode("/plugins/nested/nonexecutable5", []byte("plugin4"), 0o644). + WithFileAndMode("/plugins/nested/win-exe1.exe", []byte("plugin4"), 0o600). + WithFileAndMode("/plugins/nested/WIN-EXE2.EXE", []byte("plugin4"), 0o600) plugins, err := r.readPluginsDir(dir) require.NoError(t, err) diff --git a/pkg/plugin/clientmgmt/process/restartable_process.go b/pkg/plugin/clientmgmt/process/restartable_process.go index 21ed810225..af1b29a440 100644 --- a/pkg/plugin/clientmgmt/process/restartable_process.go +++ b/pkg/plugin/clientmgmt/process/restartable_process.go @@ -27,8 +27,7 @@ type RestartableProcessFactory interface { NewRestartableProcess(command string, logger logrus.FieldLogger, logLevel logrus.Level) (RestartableProcess, error) } -type restartableProcessFactory struct { -} +type restartableProcessFactory struct{} func NewRestartableProcessFactory() RestartableProcessFactory { return &restartableProcessFactory{} diff --git a/pkg/plugin/framework/action_resolver.go b/pkg/plugin/framework/action_resolver.go index ac8a0b1d09..d2dccb14b4 100644 --- a/pkg/plugin/framework/action_resolver.go +++ b/pkg/plugin/framework/action_resolver.go @@ -57,7 +57,8 @@ type resolvedAction struct { } func (recv resolvedAction) ShouldUse(groupResource schema.GroupResource, namespace string, metadata metav1.Object, - log logrus.FieldLogger) bool { + log logrus.FieldLogger, +) bool { if !recv.ResourceIncludesExcludes.ShouldInclude(groupResource.String()) { log.Debug("Skipping action because it does not apply to this resource") return false @@ -82,7 +83,8 @@ func (recv resolvedAction) ShouldUse(groupResource schema.GroupResource, namespa // resolveAction resolves the resources, namespaces and selector into fully-qualified versions func resolveAction(helper discovery.Helper, action velero.Applicable) (resources *collections.IncludesExcludes, - namespaces *collections.IncludesExcludes, selector labels.Selector, err error) { + namespaces *collections.IncludesExcludes, selector labels.Selector, err error, +) { resourceSelector, err := action.AppliesTo() if err != nil { return nil, nil, nil, err diff --git a/pkg/plugin/framework/backup_item_action_server.go b/pkg/plugin/framework/backup_item_action_server.go index 6511591a9f..a85185bd88 100644 --- a/pkg/plugin/framework/backup_item_action_server.go +++ b/pkg/plugin/framework/backup_item_action_server.go @@ -52,7 +52,8 @@ func (s *BackupItemActionGRPCServer) getImpl(name string) (biav1.BackupItemActio func (s *BackupItemActionGRPCServer) AppliesTo( ctx context.Context, req *proto.BackupItemActionAppliesToRequest) ( - response *proto.BackupItemActionAppliesToResponse, err error) { + response *proto.BackupItemActionAppliesToResponse, err error, +) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr @@ -81,7 +82,8 @@ func (s *BackupItemActionGRPCServer) AppliesTo( } func (s *BackupItemActionGRPCServer) Execute( - ctx context.Context, req *proto.ExecuteRequest) (response *proto.ExecuteResponse, err error) { + ctx context.Context, req *proto.ExecuteRequest, +) (response *proto.ExecuteResponse, err error) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr diff --git a/pkg/plugin/framework/backupitemaction/v2/backup_item_action_server.go b/pkg/plugin/framework/backupitemaction/v2/backup_item_action_server.go index c622490e7d..df3a24639f 100644 --- a/pkg/plugin/framework/backupitemaction/v2/backup_item_action_server.go +++ b/pkg/plugin/framework/backupitemaction/v2/backup_item_action_server.go @@ -56,7 +56,8 @@ func (s *BackupItemActionGRPCServer) getImpl(name string) (biav2.BackupItemActio func (s *BackupItemActionGRPCServer) AppliesTo( ctx context.Context, req *protobiav2.BackupItemActionAppliesToRequest) ( - response *protobiav2.BackupItemActionAppliesToResponse, err error) { + response *protobiav2.BackupItemActionAppliesToResponse, err error, +) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr @@ -85,7 +86,8 @@ func (s *BackupItemActionGRPCServer) AppliesTo( } func (s *BackupItemActionGRPCServer) Execute( - ctx context.Context, req *protobiav2.ExecuteRequest) (response *protobiav2.ExecuteResponse, err error) { + ctx context.Context, req *protobiav2.ExecuteRequest, +) (response *protobiav2.ExecuteResponse, err error) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr @@ -141,7 +143,8 @@ func (s *BackupItemActionGRPCServer) Execute( func (s *BackupItemActionGRPCServer) Progress( ctx context.Context, req *protobiav2.BackupItemActionProgressRequest) ( - response *protobiav2.BackupItemActionProgressResponse, err error) { + response *protobiav2.BackupItemActionProgressResponse, err error, +) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr @@ -180,7 +183,8 @@ func (s *BackupItemActionGRPCServer) Progress( func (s *BackupItemActionGRPCServer) Cancel( ctx context.Context, req *protobiav2.BackupItemActionCancelRequest) ( - response *emptypb.Empty, err error) { + response *emptypb.Empty, err error, +) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr diff --git a/pkg/plugin/framework/itemblockaction/v1/item_block_action_server.go b/pkg/plugin/framework/itemblockaction/v1/item_block_action_server.go index ab9ad7485d..0362c361f5 100644 --- a/pkg/plugin/framework/itemblockaction/v1/item_block_action_server.go +++ b/pkg/plugin/framework/itemblockaction/v1/item_block_action_server.go @@ -53,7 +53,8 @@ func (s *ItemBlockActionGRPCServer) getImpl(name string) (ibav1.ItemBlockAction, func (s *ItemBlockActionGRPCServer) AppliesTo( ctx context.Context, req *protoibav1.ItemBlockActionAppliesToRequest) ( - response *protoibav1.ItemBlockActionAppliesToResponse, err error) { + response *protoibav1.ItemBlockActionAppliesToResponse, err error, +) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr @@ -82,7 +83,8 @@ func (s *ItemBlockActionGRPCServer) AppliesTo( } func (s *ItemBlockActionGRPCServer) GetRelatedItems( - ctx context.Context, req *protoibav1.ItemBlockActionGetRelatedItemsRequest) (response *protoibav1.ItemBlockActionGetRelatedItemsResponse, err error) { + ctx context.Context, req *protoibav1.ItemBlockActionGetRelatedItemsRequest, +) (response *protoibav1.ItemBlockActionGetRelatedItemsResponse, err error) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr diff --git a/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_server.go b/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_server.go index 2795d787df..ee49379696 100644 --- a/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_server.go +++ b/pkg/plugin/framework/restoreitemaction/v2/restore_item_action_server.go @@ -150,7 +150,8 @@ func (s *RestoreItemActionGRPCServer) Execute(ctx context.Context, req *protoria } func (s *RestoreItemActionGRPCServer) Progress(ctx context.Context, req *protoriav2.RestoreItemActionProgressRequest) ( - response *protoriav2.RestoreItemActionProgressResponse, err error) { + response *protoriav2.RestoreItemActionProgressResponse, err error, +) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr @@ -189,7 +190,8 @@ func (s *RestoreItemActionGRPCServer) Progress(ctx context.Context, req *protori func (s *RestoreItemActionGRPCServer) Cancel( ctx context.Context, req *protoriav2.RestoreItemActionCancelRequest) ( - response *emptypb.Empty, err error) { + response *emptypb.Empty, err error, +) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr @@ -215,7 +217,8 @@ func (s *RestoreItemActionGRPCServer) Cancel( } func (s *RestoreItemActionGRPCServer) AreAdditionalItemsReady(ctx context.Context, req *protoriav2.RestoreItemActionItemsReadyRequest) ( - response *protoriav2.RestoreItemActionItemsReadyResponse, err error) { + response *protoriav2.RestoreItemActionItemsReadyResponse, err error, +) { defer func() { if recoveredErr := common.HandlePanic(recover()); recoveredErr != nil { err = recoveredErr diff --git a/pkg/podvolume/backupper_test.go b/pkg/podvolume/backupper_test.go index 1456bdf111..3d38c1d94e 100644 --- a/pkg/podvolume/backupper_test.go +++ b/pkg/podvolume/backupper_test.go @@ -193,7 +193,7 @@ func Test_backupper_BackupPodVolumes_log_test(t *testing.T) { ctx: context.Background(), } logOutput := bytes.Buffer{} - var log = logrus.New() + log := logrus.New() log.SetOutput(&logOutput) b.BackupPodVolumes(tt.args.backup, tt.args.pod, tt.args.volumesToBackup, tt.args.resPolicies, log) fmt.Println(logOutput.String()) @@ -712,6 +712,7 @@ type logHook struct { func (l *logHook) Levels() []logrus.Level { return []logrus.Level{logrus.ErrorLevel} } + func (l *logHook) Fire(entry *logrus.Entry) error { l.entry = entry return nil diff --git a/pkg/podvolume/restorer_factory.go b/pkg/podvolume/restorer_factory.go index 178d720c87..bd66717af5 100644 --- a/pkg/podvolume/restorer_factory.go +++ b/pkg/podvolume/restorer_factory.go @@ -41,7 +41,8 @@ func NewRestorerFactory(repoLocker *repository.RepoLocker, kubeClient kubernetes.Interface, crClient ctrlclient.Client, pvrInformer ctrlcache.Informer, - log logrus.FieldLogger) RestorerFactory { + log logrus.FieldLogger, +) RestorerFactory { return &restorerFactory{ repoLocker: repoLocker, repoEnsurer: repoEnsurer, diff --git a/pkg/repository/backup_repo_op.go b/pkg/repository/backup_repo_op.go index ff253e6038..744f709b57 100644 --- a/pkg/repository/backup_repo_op.go +++ b/pkg/repository/backup_repo_op.go @@ -51,7 +51,7 @@ func repoLabelsFromKey(key BackupRepositoryKey) labels.Set { // GetBackupRepository gets a backup repository through BackupRepositoryKey and ensure ready if required. func GetBackupRepository(ctx context.Context, cli client.Client, namespace string, key BackupRepositoryKey, options ...bool) (*velerov1api.BackupRepository, error) { - var ensureReady = true + ensureReady := true if len(options) > 0 { ensureReady = options[0] } @@ -63,7 +63,6 @@ func GetBackupRepository(ctx context.Context, cli client.Client, namespace strin Namespace: namespace, LabelSelector: selector, }) - if err != nil { return nil, errors.Wrap(err, "error getting backup repository list") } diff --git a/pkg/repository/backup_repo_op_test.go b/pkg/repository/backup_repo_op_test.go index a317e22c2b..c915ae8dcc 100644 --- a/pkg/repository/backup_repo_op_test.go +++ b/pkg/repository/backup_repo_op_test.go @@ -19,11 +19,10 @@ package repository import ( "context" "fmt" + "testing" "github.com/stretchr/testify/assert" - "testing" - "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -76,7 +75,8 @@ func TestGetBackupRepository(t *testing.T) { name: "found more than one repository", backupRepositories: []velerov1api.BackupRepository{ buildBackupRepo(BackupRepositoryKey{"fake-volume-ns", "fake-bsl", "fake-repository-type"}, velerov1api.BackupRepositoryPhaseReady, "01"), - buildBackupRepo(BackupRepositoryKey{"fake-volume-ns", "fake-bsl", "fake-repository-type"}, velerov1api.BackupRepositoryPhaseReady, "02")}, + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns", "fake-bsl", "fake-repository-type"}, velerov1api.BackupRepositoryPhaseReady, "02"), + }, backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns", "fake-bsl", "fake-repository-type"}, expectedErr: "more than one BackupRepository found for workload namespace \"fake-volume-ns\", backup storage location \"fake-bsl\", repository type \"fake-repository-type\"", }, @@ -84,7 +84,8 @@ func TestGetBackupRepository(t *testing.T) { name: "repository not ready, not expect ready", backupRepositories: []velerov1api.BackupRepository{ buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseReady, "01"), - buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNotReady, "02")}, + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNotReady, "02"), + }, backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, expected: buildBackupRepoPointer(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNotReady, "02"), }, @@ -92,7 +93,8 @@ func TestGetBackupRepository(t *testing.T) { name: "repository is new, not expect ready", backupRepositories: []velerov1api.BackupRepository{ buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseReady, "01"), - buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNew, "02")}, + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNew, "02"), + }, backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, expected: buildBackupRepoPointer(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNew, "02"), }, @@ -100,7 +102,8 @@ func TestGetBackupRepository(t *testing.T) { name: "repository state is empty, not expect ready", backupRepositories: []velerov1api.BackupRepository{ buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseReady, "01"), - buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, "", "02")}, + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, "", "02"), + }, backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, expected: buildBackupRepoPointer(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, "", "02"), }, @@ -108,7 +111,8 @@ func TestGetBackupRepository(t *testing.T) { name: "repository not ready, expect ready", backupRepositories: []velerov1api.BackupRepository{ buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseReady, "01"), - buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNotReady, "02")}, + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNotReady, "02"), + }, backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, ensureReady: true, expectedErr: "backup repository is not ready: ", @@ -117,7 +121,8 @@ func TestGetBackupRepository(t *testing.T) { name: "repository is new, expect ready", backupRepositories: []velerov1api.BackupRepository{ buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseReady, "01"), - buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNew, "02")}, + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseNew, "02"), + }, backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, ensureReady: true, expectedErr: "backup repository not provisioned", @@ -126,7 +131,8 @@ func TestGetBackupRepository(t *testing.T) { name: "repository state is empty, expect ready", backupRepositories: []velerov1api.BackupRepository{ buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseReady, "01"), - buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, "", "02")}, + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, "", "02"), + }, backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, ensureReady: true, expectedErr: "backup repository not provisioned", @@ -135,7 +141,8 @@ func TestGetBackupRepository(t *testing.T) { name: "repository ready, expect ready", backupRepositories: []velerov1api.BackupRepository{ buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-01", "fake-bsl-01", "fake-repository-type-01"}, velerov1api.BackupRepositoryPhaseNotReady, "01"), - buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseReady, "02")}, + buildBackupRepo(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseReady, "02"), + }, backupRepositoryKey: BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, ensureReady: true, expected: buildBackupRepoPointer(BackupRepositoryKey{"fake-volume-ns-02", "fake-bsl-02", "fake-repository-type-02"}, velerov1api.BackupRepositoryPhaseReady, "02"), diff --git a/pkg/repository/maintenance.go b/pkg/repository/maintenance.go index 5ea63979f0..95e8967614 100644 --- a/pkg/repository/maintenance.go +++ b/pkg/repository/maintenance.go @@ -141,7 +141,6 @@ func GetLatestMaintenanceJob(cli client.Client, ns string) (*batchv1.Job, error) }, &client.HasLabels{RepositoryNameLabel}, ) - if err != nil { return nil, err } diff --git a/pkg/repository/maintenance_test.go b/pkg/repository/maintenance_test.go index f6344b166a..047c85fcb2 100644 --- a/pkg/repository/maintenance_test.go +++ b/pkg/repository/maintenance_test.go @@ -70,6 +70,7 @@ func TestGenerateJobName1(t *testing.T) { }) } } + func TestDeleteOldMaintenanceJobs(t *testing.T) { // Set up test repo and keep value repo := "test-repo" diff --git a/pkg/repository/provider/unified_repo.go b/pkg/repository/provider/unified_repo.go index 6191c44528..2cf55f1ae2 100644 --- a/pkg/repository/provider/unified_repo.go +++ b/pkg/repository/provider/unified_repo.go @@ -48,9 +48,11 @@ type unifiedRepoProvider struct { // this func is assigned to a package-level variable so it can be // replaced when unit-testing -var getS3Credentials = repoconfig.GetS3Credentials -var getGCPCredentials = repoconfig.GetGCPCredentials -var getS3BucketRegion = repoconfig.GetAWSBucketRegion +var ( + getS3Credentials = repoconfig.GetS3Credentials + getGCPCredentials = repoconfig.GetGCPCredentials + getS3BucketRegion = repoconfig.GetAWSBucketRegion +) type localFuncTable struct { getStorageVariables func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) @@ -107,7 +109,6 @@ func (urp *unifiedRepoProvider) InitRepo(ctx context.Context, param RepoParam) e udmrepo.WithStoreOptions(urp, param), udmrepo.WithDescription(repoConnectDesc), ) - if err != nil { return errors.Wrap(err, "error to get repo options") } @@ -143,7 +144,6 @@ func (urp *unifiedRepoProvider) ConnectToRepo(ctx context.Context, param RepoPar udmrepo.WithStoreOptions(urp, param), udmrepo.WithDescription(repoConnectDesc), ) - if err != nil { return errors.Wrap(err, "error to get repo options") } @@ -179,7 +179,6 @@ func (urp *unifiedRepoProvider) PrepareRepo(ctx context.Context, param RepoParam udmrepo.WithStoreOptions(urp, param), udmrepo.WithDescription(repoConnectDesc), ) - if err != nil { return errors.Wrap(err, "error to get repo options") } @@ -217,7 +216,6 @@ func (urp *unifiedRepoProvider) BoostRepoConnect(ctx context.Context, param Repo udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)), udmrepo.WithDescription(repoConnectDesc), ) - if err != nil { return errors.Wrap(err, "error to get repo options") } @@ -248,7 +246,6 @@ func (urp *unifiedRepoProvider) PruneRepo(ctx context.Context, param RepoParam) udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)), udmrepo.WithDescription(repoOpDescMaintain), ) - if err != nil { return errors.Wrap(err, "error to get repo options") } @@ -282,7 +279,6 @@ func (urp *unifiedRepoProvider) Forget(ctx context.Context, snapshotID string, p udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)), udmrepo.WithDescription(repoOpDescForget), ) - if err != nil { return errors.Wrap(err, "error to get repo options") } @@ -329,7 +325,6 @@ func (urp *unifiedRepoProvider) BatchForget(ctx context.Context, snapshotIDs []s udmrepo.WithConfigFile(urp.workPath, string(param.BackupRepo.UID)), udmrepo.WithDescription(repoOpDescForget), ) - if err != nil { return []error{errors.Wrap(err, "error to get repo options")} } diff --git a/pkg/repository/udmrepo/kopialib/lib_repo.go b/pkg/repository/udmrepo/kopialib/lib_repo.go index d4e1f88133..e7f4e4aff1 100644 --- a/pkg/repository/udmrepo/kopialib/lib_repo.go +++ b/pkg/repository/udmrepo/kopialib/lib_repo.go @@ -136,7 +136,6 @@ func (ks *kopiaRepoService) Open(ctx context.Context, repoOption udmrepo.RepoOpt Purpose: repoOption.Description, OnUpload: kr.updateProgress, }) - if err != nil { if e := r.Close(repoCtx); e != nil { ks.logger.WithError(e).Error("Failed to close raw repository on error") @@ -199,7 +198,6 @@ func (ks *kopiaRepoService) Maintain(ctx context.Context, repoOption udmrepo.Rep }, func(ctx context.Context, dw repo.DirectRepositoryWriter) error { return km.runMaintenance(ctx, dw) }) - if err != nil { return errors.Wrap(err, "error to maintain repo") } @@ -609,7 +607,6 @@ func writeInitParameters(ctx context.Context, repoOption udmrepo.RepoOptions, lo return nil }) - if err != nil { return errors.Wrap(err, "error to init write repo parameters") } diff --git a/pkg/restic/exec_commands.go b/pkg/restic/exec_commands.go index 94c17c04a4..965738e7fa 100644 --- a/pkg/restic/exec_commands.go +++ b/pkg/restic/exec_commands.go @@ -31,8 +31,10 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) -const restoreProgressCheckInterval = 10 * time.Second -const backupProgressCheckInterval = 10 * time.Second +const ( + restoreProgressCheckInterval = 10 * time.Second + backupProgressCheckInterval = 10 * time.Second +) var fileSystem = filesystem.NewFileSystem() diff --git a/pkg/restore/actions/change_image_name_action.go b/pkg/restore/actions/change_image_name_action.go index 66fac1f186..5486721afe 100644 --- a/pkg/restore/actions/change_image_name_action.go +++ b/pkg/restore/actions/change_image_name_action.go @@ -114,27 +114,27 @@ func (a *ChangeImageNameAction) Execute(input *velero.RestoreItemActionExecuteIn return nil, errors.Wrap(err, "error getting item's spec.containers") } } else if obj.GetKind() == "CronJob" { - //handle containers + // handle containers err = a.replaceImageName(obj, config, "spec", "jobTemplate", "spec", "template", "spec", "containers") if err != nil { a.logger.Infof("replace image name meet error: %v", err) return nil, errors.Wrap(err, "error getting item's spec.containers") } - //handle initContainers + // handle initContainers err = a.replaceImageName(obj, config, "spec", "jobTemplate", "spec", "template", "spec", "initContainers") if err != nil { a.logger.Infof("replace image name meet error: %v", err) return nil, errors.Wrap(err, "error getting item's spec.containers") } } else { - //handle containers + // handle containers err = a.replaceImageName(obj, config, "spec", "template", "spec", "containers") if err != nil { a.logger.Infof("replace image name meet error: %v", err) return nil, errors.Wrap(err, "error getting item's spec.containers") } - //handle initContainers + // handle initContainers err = a.replaceImageName(obj, config, "spec", "template", "spec", "initContainers") if err != nil { a.logger.Infof("replace image name meet error: %v", err) diff --git a/pkg/restore/actions/csi/pvc_action.go b/pkg/restore/actions/csi/pvc_action.go index 0462bb74c9..b353183ca6 100644 --- a/pkg/restore/actions/csi/pvc_action.go +++ b/pkg/restore/actions/csi/pvc_action.go @@ -68,7 +68,7 @@ type pvcRestoreItemAction struct { func (p *pvcRestoreItemAction) AppliesTo() (velero.ResourceSelector, error) { return velero.ResourceSelector{ IncludedResources: []string{"persistentvolumeclaims"}, - //TODO: add label selector volumeSnapshotLabel + // TODO: add label selector volumeSnapshotLabel }, nil } @@ -188,7 +188,6 @@ func (p *pvcRestoreItemAction) Execute( }, backup, ) - if err != nil { logger.Error("Fail to get backup for restore.") return nil, fmt.Errorf("fail to get backup for restore: %s", err.Error()) @@ -312,7 +311,8 @@ func (p *pvcRestoreItemAction) Progress( } func (p *pvcRestoreItemAction) Cancel( - operationID string, restore *velerov1api.Restore) error { + operationID string, restore *velerov1api.Restore, +) error { if operationID == "" { return riav2.InvalidOperationIDError(operationID) } @@ -431,7 +431,8 @@ func getDataDownload( } func cancelDataDownload(ctx context.Context, crClient crclient.Client, - dataDownload *velerov2alpha1.DataDownload) error { + dataDownload *velerov2alpha1.DataDownload, +) error { updatedDataDownload := dataDownload.DeepCopy() updatedDataDownload.Spec.Cancel = true diff --git a/pkg/restore/actions/csi/volumesnapshot_action.go b/pkg/restore/actions/csi/volumesnapshot_action.go index 61af12c675..f0a5118a7c 100644 --- a/pkg/restore/actions/csi/volumesnapshot_action.go +++ b/pkg/restore/actions/csi/volumesnapshot_action.go @@ -48,14 +48,16 @@ type volumeSnapshotRestoreItemAction struct { // VolumeSnapshotRestoreItemAction should be invoked while // restoring volumesnapshots.snapshot.storage.k8s.io resources. func (p *volumeSnapshotRestoreItemAction) AppliesTo() ( - velero.ResourceSelector, error) { + velero.ResourceSelector, error, +) { return velero.ResourceSelector{ IncludedResources: []string{"volumesnapshots.snapshot.storage.k8s.io"}, }, nil } func resetVolumeSnapshotSpecForRestore( - vs *snapshotv1api.VolumeSnapshot, vscName *string) { + vs *snapshotv1api.VolumeSnapshot, vscName *string, +) { // Spec of the backed-up object used the PVC as the source // of the volumeSnapshot. Restore operation will however, // restore the VolumeSnapshot from the VolumeSnapshotContent @@ -64,8 +66,7 @@ func resetVolumeSnapshotSpecForRestore( } func resetVolumeSnapshotAnnotation(vs *snapshotv1api.VolumeSnapshot) { - vs.ObjectMeta.Annotations[velerov1api.VSCDeletionPolicyAnnotation] = - string(snapshotv1api.VolumeSnapshotContentRetain) + vs.ObjectMeta.Annotations[velerov1api.VSCDeletionPolicyAnnotation] = string(snapshotv1api.VolumeSnapshotContentRetain) } // Execute uses the data such as CSI driver name, storage @@ -206,7 +207,8 @@ func (p *volumeSnapshotRestoreItemAction) AreAdditionalItemsReady( } func NewVolumeSnapshotRestoreItemAction( - f client.Factory) plugincommon.HandlerInitializer { + f client.Factory, +) plugincommon.HandlerInitializer { return func(logger logrus.FieldLogger) (interface{}, error) { crClient, err := f.KubebuilderClient() if err != nil { diff --git a/pkg/restore/actions/csi/volumesnapshotclass_action.go b/pkg/restore/actions/csi/volumesnapshotclass_action.go index 577445a4bb..7f9a65eb49 100644 --- a/pkg/restore/actions/csi/volumesnapshotclass_action.go +++ b/pkg/restore/actions/csi/volumesnapshotclass_action.go @@ -106,6 +106,7 @@ func (p *volumeSnapshotClassRestoreItemAction) AreAdditionalItemsReady( } func NewVolumeSnapshotClassRestoreItemAction( - logger logrus.FieldLogger) (interface{}, error) { + logger logrus.FieldLogger, +) (interface{}, error) { return &volumeSnapshotClassRestoreItemAction{logger}, nil } diff --git a/pkg/restore/actions/init_restorehook_pod_action_test.go b/pkg/restore/actions/init_restorehook_pod_action_test.go index 259898cba7..3b09088712 100644 --- a/pkg/restore/actions/init_restorehook_pod_action_test.go +++ b/pkg/restore/actions/init_restorehook_pod_action_test.go @@ -58,7 +58,8 @@ func TestInitContainerRestoreHookPodActionExecute(t *testing.T) { builder.ForContainer("init-app-step2", "busy-box"). Command([]string{"init-step2"}).Result(), builder.ForContainer("init-app-step3", "busy-box"). - Command([]string{"init-step3"}).Result()}...).Result(), + Command([]string{"init-step3"}).Result(), + }...).Result(), expectedRes: builder.ForPod("default", "app1"). ObjectMeta(builder.WithAnnotations( "init.hook.restore.velero.io/container-image", "nginx", @@ -75,7 +76,8 @@ func TestInitContainerRestoreHookPodActionExecute(t *testing.T) { builder.ForContainer("init-app-step2", "busy-box"). Command([]string{"init-step2"}).Result(), builder.ForContainer("init-app-step3", "busy-box"). - Command([]string{"init-step3"}).Result()}...).Result(), + Command([]string{"init-step3"}).Result(), + }...).Result(), }, { name: "should run restore hook from restore spec", diff --git a/pkg/restore/actions/pod_action.go b/pkg/restore/actions/pod_action.go index 9a98aa88c6..cd263bc097 100644 --- a/pkg/restore/actions/pod_action.go +++ b/pkg/restore/actions/pod_action.go @@ -90,7 +90,8 @@ func (a *PodAction) Execute(input *velero.RestoreItemActionExecuteInput) (*veler if pod.Spec.PriorityClassName != "" { a.logger.Infof("Adding priorityclass %s to AdditionalItems", pod.Spec.PriorityClassName) restoreExecuteOutput.AdditionalItems = []velero.ResourceIdentifier{ - {GroupResource: kuberesource.PriorityClasses, Name: pod.Spec.PriorityClassName}} + {GroupResource: kuberesource.PriorityClasses, Name: pod.Spec.PriorityClassName}, + } } return restoreExecuteOutput, nil } diff --git a/pkg/restore/actions/pod_action_test.go b/pkg/restore/actions/pod_action_test.go index 2c886d39cd..172279de05 100644 --- a/pkg/restore/actions/pod_action_test.go +++ b/pkg/restore/actions/pod_action_test.go @@ -215,8 +215,9 @@ func TestPodActionExecute(t *testing.T) { }, }, additionalItems: []velero.ResourceIdentifier{ - {GroupResource: kuberesource.PriorityClasses, - Name: "testPriorityClass", + { + GroupResource: kuberesource.PriorityClasses, + Name: "testPriorityClass", }, }, }, diff --git a/pkg/restore/actions/service_action.go b/pkg/restore/actions/service_action.go index ee392a3810..5bc44ac08f 100644 --- a/pkg/restore/actions/service_action.go +++ b/pkg/restore/actions/service_action.go @@ -163,7 +163,6 @@ func deleteNodePorts(service *corev1api.Service) error { } ports, bool, err := unstructured.NestedSlice(*appliedServiceUnstructured, "spec", "ports") - if err != nil { return errors.WithStack(err) } diff --git a/pkg/restore/merge_service_account_test.go b/pkg/restore/merge_service_account_test.go index 4322ae0033..34a0a74978 100644 --- a/pkg/restore/merge_service_account_test.go +++ b/pkg/restore/merge_service_account_test.go @@ -316,7 +316,7 @@ func stripWhitespace(s string) string { } func TestMergeMaps(t *testing.T) { - var testCases = []struct { + testCases := []struct { name string source map[string]string destination map[string]string diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 619db5b87e..29ecf3a5d1 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -989,7 +989,6 @@ func (ctx *restoreContext) itemsAvailable(action framework.RestoreItemResolvedAc err := wait.PollUntilContextTimeout(go_context.Background(), time.Second, timeout, true, func(go_context.Context) (bool, error) { var err error available, err = action.AreAdditionalItemsReady(restoreItemOut.AdditionalItems, ctx.restore) - if err != nil { return true, err } @@ -1016,6 +1015,7 @@ func getResourceClientKey(groupResource schema.GroupResource, version, namespace namespace: namespace, } } + func (ctx *restoreContext) getResourceClient(groupResource schema.GroupResource, obj *unstructured.Unstructured, namespace string) (client.Dynamic, error) { key := getResourceClientKey(groupResource, obj.GroupVersionKind().Version, namespace) @@ -1145,7 +1145,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } // Make a copy of object retrieved from backup to make it available unchanged - //inside restore actions. + // inside restore actions. itemFromBackup := obj.DeepCopy() complete, err := isCompleted(obj, groupResource) @@ -1588,7 +1588,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso if len(ctx.restore.Spec.ExistingResourcePolicy) > 0 && ctx.restore.Spec.ExistingResourcePolicy == velerov1api.PolicyTypeUpdate { // remove restore labels so that we apply the latest backup/restore names on the object via patch removeRestoreLabels(fromCluster) - //try patching just the backup/restore labels + // try patching just the backup/restore labels warningsFromUpdate, errsFromUpdate := ctx.updateBackupRestoreLabels(fromCluster, fromClusterWithLabels, namespace, resourceClient) warnings.Merge(&warningsFromUpdate) errs.Merge(&errsFromUpdate) @@ -1630,7 +1630,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso return warnings, errs, itemExists } - //update backup/restore labels on the unchanged resources if existingResourcePolicy is set as update + // update backup/restore labels on the unchanged resources if existingResourcePolicy is set as update if ctx.restore.Spec.ExistingResourcePolicy == velerov1api.PolicyTypeUpdate { resourcePolicy := ctx.restore.Spec.ExistingResourcePolicy ctx.log.Infof("restore API has resource policy defined %s , executing restore workflow accordingly for unchanged resource %s %s ", resourcePolicy, obj.GroupVersionKind().Kind, kube.NamespaceAndName(fromCluster)) @@ -2324,8 +2324,8 @@ func (ctx *restoreContext) getSelectedRestoreableItems(resource string, original // Processing OrLabelSelectors when specified in the restore request. LabelSelectors as well as OrLabelSelectors // cannot co-exist, only one of them can be specified - var skipItem = false - var skip = 0 + skipItem := false + skip := 0 ctx.log.Debugf("orSelectors specified: %s for item: %s", ctx.OrSelectors, item) for _, s := range ctx.OrSelectors { if !s.Matches(labels.Set(obj.GetLabels())) { @@ -2350,8 +2350,7 @@ func (ctx *restoreContext) getSelectedRestoreableItems(resource string, original targetNamespace: targetNamespace, version: obj.GroupVersionKind().Version, } - restorable.selectedItemsByNamespace[originalNamespace] = - append(restorable.selectedItemsByNamespace[originalNamespace], selectedItem) + restorable.selectedItemsByNamespace[originalNamespace] = append(restorable.selectedItemsByNamespace[originalNamespace], selectedItem) restorable.totalItems++ } return restorable, warnings, errs diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index 065d23cbe5..206a6395e7 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -449,8 +449,12 @@ func TestRestoreResourceFiltering(t *testing.T) { }, { name: "OrLabelSelectors only restores matching resources", - restore: defaultRestore().OrLabelSelector([]*metav1.LabelSelector{{MatchLabels: map[string]string{"a1": "b1"}}, {MatchLabels: map[string]string{"a2": "b2"}}, - {MatchLabels: map[string]string{"a3": "b3"}}, {MatchLabels: map[string]string{"a4": "b4"}}}).Result(), + restore: defaultRestore().OrLabelSelector([]*metav1.LabelSelector{ + {MatchLabels: map[string]string{"a1": "b1"}}, + {MatchLabels: map[string]string{"a2": "b2"}}, + {MatchLabels: map[string]string{"a3": "b3"}}, + {MatchLabels: map[string]string{"a4": "b4"}}, + }).Result(), backup: defaultBackup().Result(), tarball: test.NewTarWriter(t). AddItems("pods", @@ -1925,7 +1929,8 @@ func TestRestoreWithAsyncOperations(t *testing.T) { ResourceIdentifier: velero.ResourceIdentifier{ GroupResource: kuberesource.Pods, Namespace: "ns-1", - Name: "pod-1"}, + Name: "pod-1", + }, OperationID: "pod-1-1", }, Status: itemoperation.OperationStatus{ @@ -1950,7 +1955,8 @@ func TestRestoreWithAsyncOperations(t *testing.T) { ResourceIdentifier: velero.ResourceIdentifier{ GroupResource: kuberesource.Pods, Namespace: "ns-1", - Name: "pod-2"}, + Name: "pod-2", + }, OperationID: "pod-2-1", }, Status: itemoperation.OperationStatus{ @@ -2852,7 +2858,7 @@ func TestRestorePersistentVolumes(t *testing.T) { AddItems( "persistentvolumes", builder.ForPersistentVolume("source-pv"). - //ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + // ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). AWSEBSVolumeID("source-volume"). ClaimRef("source-ns", "pvc-1"). Result(), @@ -2865,7 +2871,7 @@ func TestRestorePersistentVolumes(t *testing.T) { apiResources: []*test.APIResource{ test.PVs( builder.ForPersistentVolume("source-pv"). - //ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + // ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). AWSEBSVolumeID("source-volume"). ClaimRef("source-ns", "pvc-1"). Result(), @@ -2914,7 +2920,7 @@ func TestRestorePersistentVolumes(t *testing.T) { want: []*test.APIResource{ test.PVs( builder.ForPersistentVolume("source-pv"). - //ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + // ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). ObjectMeta( builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"), ). @@ -3765,12 +3771,14 @@ func Test_resetVolumeBindingInfo(t *testing.T) { "namespace": "ns-1", "name": "pvc-1", "uid": "abc", - "resourceVersion": "1"}).Unstructured, + "resourceVersion": "1", + }).Unstructured, expected: newTestUnstructured().WithMetadataField("kind", "persistentVolume"). WithName("pv-1"). WithAnnotations(kubeutil.KubeAnnDynamicallyProvisioned). WithSpecField("claimRef", map[string]interface{}{ - "namespace": "ns-1", "name": "pvc-1"}).Unstructured, + "namespace": "ns-1", "name": "pvc-1", + }).Unstructured, }, { name: "PVCs that are bound have their binding annotations removed, but the volume name stays", diff --git a/pkg/test/fake_file_system.go b/pkg/test/fake_file_system.go index 7a744647b4..98b8387104 100644 --- a/pkg/test/fake_file_system.go +++ b/pkg/test/fake_file_system.go @@ -96,7 +96,7 @@ func (fs *FakeFileSystem) WithFileAndMode(path string, data []byte, mode os.File } func (fs *FakeFileSystem) WithDirectory(path string) *FakeFileSystem { - _ = fs.fs.MkdirAll(path, 0755) + _ = fs.fs.MkdirAll(path, 0o755) return fs } diff --git a/pkg/test/fake_mapper.go b/pkg/test/fake_mapper.go index 1686af8156..3a80001212 100644 --- a/pkg/test/fake_mapper.go +++ b/pkg/test/fake_mapper.go @@ -77,7 +77,7 @@ func (m *FakeMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta } for _, gvk := range potentialGVK { - //Ensure we have a REST mapping + // Ensure we have a REST mapping res, ok := m.KindToPluralResource[gvk] if !ok { continue diff --git a/pkg/test/tar_writer.go b/pkg/test/tar_writer.go index 6cfcd8bd7c..3ce307073f 100644 --- a/pkg/test/tar_writer.go +++ b/pkg/test/tar_writer.go @@ -87,7 +87,7 @@ func (tw *TarWriter) Add(name string, obj interface{}) *TarWriter { Name: name, Size: int64(len(data)), Typeflag: tar.TypeReg, - Mode: 0755, + Mode: 0o755, ModTime: time.Now(), })) diff --git a/pkg/uploader/kopia/progress.go b/pkg/uploader/kopia/progress.go index 9f2498379c..c1cc6ff40f 100644 --- a/pkg/uploader/kopia/progress.go +++ b/pkg/uploader/kopia/progress.go @@ -47,21 +47,21 @@ func (t *Throttle) ShouldOutput() bool { type Progress struct { // all int64 must precede all int32 due to alignment requirements on ARM // +checkatomic - uploadedBytes int64 //the total bytes has uploaded - cachedBytes int64 //the total bytes has cached - hashededBytes int64 //the total bytes has hashed + uploadedBytes int64 // the total bytes has uploaded + cachedBytes int64 // the total bytes has cached + hashededBytes int64 // the total bytes has hashed // +checkatomic - uploadedFiles int32 //the total files has ignored + uploadedFiles int32 // the total files has ignored // +checkatomic - ignoredErrorCount int32 //the total errors has ignored + ignoredErrorCount int32 // the total errors has ignored // +checkatomic - fatalErrorCount int32 //the total errors has occurred + fatalErrorCount int32 // the total errors has occurred estimatedFileCount int64 // +checklocksignore the total count of files to be processed estimatedTotalBytes int64 // +checklocksignore the total size of files to be processed // +checkatomic processedBytes int64 // which statistic all bytes has been processed currently outputThrottle Throttle // which control the frequency of update progress - updater uploader.ProgressUpdater //which kopia progress will call the UpdateProgress interface, the third party will implement the interface to do the progress update + updater uploader.ProgressUpdater // which kopia progress will call the UpdateProgress interface, the third party will implement the interface to do the progress update log logrus.FieldLogger // output info into log when backup estimationParam snapshotfs.EstimationParameters } diff --git a/pkg/uploader/kopia/snapshot.go b/pkg/uploader/kopia/snapshot.go index fce620eb73..40c11959d2 100644 --- a/pkg/uploader/kopia/snapshot.go +++ b/pkg/uploader/kopia/snapshot.go @@ -45,17 +45,21 @@ import ( ) // All function mainly used to make testing more convenient -var applyRetentionPolicyFunc = policy.ApplyRetentionPolicy -var treeForSourceFunc = policy.TreeForSource -var setPolicyFunc = policy.SetPolicy -var saveSnapshotFunc = snapshot.SaveSnapshot -var loadSnapshotFunc = snapshot.LoadSnapshot -var listSnapshotsFunc = snapshot.ListSnapshots -var filesystemEntryFunc = snapshotfs.FilesystemEntryFromIDWithPath -var restoreEntryFunc = restore.Entry - -const UploaderConfigMultipartKey = "uploader-multipart" -const MaxErrorReported = 10 +var ( + applyRetentionPolicyFunc = policy.ApplyRetentionPolicy + treeForSourceFunc = policy.TreeForSource + setPolicyFunc = policy.SetPolicy + saveSnapshotFunc = snapshot.SaveSnapshot + loadSnapshotFunc = snapshot.LoadSnapshot + listSnapshotsFunc = snapshot.ListSnapshots + filesystemEntryFunc = snapshotfs.FilesystemEntryFromIDWithPath + restoreEntryFunc = restore.Entry +) + +const ( + UploaderConfigMultipartKey = "uploader-multipart" + MaxErrorReported = 10 +) // SnapshotUploader which mainly used for UT test that could overwrite Upload interface type SnapshotUploader interface { @@ -152,7 +156,8 @@ func setupPolicy(ctx context.Context, rep repo.RepositoryWriter, sourceInfo snap // Backup backup specific sourcePath and update progress func Backup(ctx context.Context, fsUploader SnapshotUploader, repoWriter repo.RepositoryWriter, sourcePath string, realSource string, - forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger) (*uploader.SnapshotInfo, bool, error) { + forceFull bool, parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, tags map[string]string, log logrus.FieldLogger, +) (*uploader.SnapshotInfo, bool, error) { if fsUploader == nil { return nil, false, errors.New("get empty kopia uploader") } @@ -377,7 +382,8 @@ func findPreviousSnapshotManifest(ctx context.Context, rep repo.Repository, sour // Restore restore specific sourcePath with given snapshotID and update progress func Restore(ctx context.Context, rep repo.RepositoryWriter, progress *Progress, snapshotID, dest string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, - log logrus.FieldLogger, cancleCh chan struct{}) (int64, int32, error) { + log logrus.FieldLogger, cancleCh chan struct{}, +) (int64, int32, error) { log.Info("Start to restore...") kopiaCtx := kopia.SetupKopiaLog(ctx, log) @@ -449,7 +455,6 @@ func Restore(ctx context.Context, rep repo.RepositoryWriter, progress *Progress, progress.ProgressBytes(stats.RestoredTotalFileSize, stats.EnqueuedTotalFileSize) }, }) - if err != nil { return 0, 0, errors.Wrapf(err, "Failed to copy snapshot data to the target") } diff --git a/pkg/uploader/provider/kopia.go b/pkg/uploader/provider/kopia.go index eafac59dc0..167be26e54 100644 --- a/pkg/uploader/provider/kopia.go +++ b/pkg/uploader/provider/kopia.go @@ -37,9 +37,11 @@ import ( ) // BackupFunc mainly used to make testing more convenient -var BackupFunc = kopia.Backup -var RestoreFunc = kopia.Restore -var BackupRepoServiceCreateFunc = service.Create +var ( + BackupFunc = kopia.Backup + RestoreFunc = kopia.Restore + BackupRepoServiceCreateFunc = service.Create +) // kopiaProvider recorded info related with kopiaProvider type kopiaProvider struct { @@ -63,7 +65,7 @@ func NewKopiaUploaderProvider( log: log, credGetter: credGetter, } - //repoUID which is used to generate kopia repository config with unique directory path + // repoUID which is used to generate kopia repository config with unique directory path repoUID := string(backupRepo.GetUID()) repoOpt, err := udmrepo.NewRepoOptions( udmrepo.WithPassword(kp, ""), @@ -120,7 +122,8 @@ func (kp *kopiaProvider) RunBackup( parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, - updater uploader.ProgressUpdater) (string, bool, error) { + updater uploader.ProgressUpdater, +) (string, bool, error) { if updater == nil { return "", false, errors.New("Need to initial backup progress updater first") } @@ -211,7 +214,8 @@ func (kp *kopiaProvider) RunRestore( volumePath string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, - updater uploader.ProgressUpdater) error { + updater uploader.ProgressUpdater, +) error { log := kp.log.WithFields(logrus.Fields{ "snapshotID": snapshotID, "volumePath": volumePath, @@ -233,7 +237,6 @@ func (kp *kopiaProvider) RunRestore( // Otherwise, Kopia restore will not response to the cancel control but return an arbitrary error. // Kopia restore cancel is not designed as well as Kopia backup which uses the context to control backup cancel all the way. size, fileCount, err := RestoreFunc(context.Background(), repoWriter, progress, snapshotID, volumePath, volMode, uploaderCfg, log, restoreCancel) - if err != nil { return errors.Wrapf(err, "Failed to run kopia restore") } diff --git a/pkg/uploader/provider/provider.go b/pkg/uploader/provider/provider.go index 20a3dc4368..2347f3e1f0 100644 --- a/pkg/uploader/provider/provider.go +++ b/pkg/uploader/provider/provider.go @@ -32,8 +32,10 @@ import ( "github.com/vmware-tanzu/velero/pkg/uploader" ) -const restoreProgressCheckInterval = 10 * time.Second -const backupProgressCheckInterval = 10 * time.Second +const ( + restoreProgressCheckInterval = 10 * time.Second + backupProgressCheckInterval = 10 * time.Second +) var ErrorCanceled error = errors.New("uploader is canceled") diff --git a/pkg/uploader/provider/restic.go b/pkg/uploader/provider/restic.go index 5878461f49..32e81b5602 100644 --- a/pkg/uploader/provider/restic.go +++ b/pkg/uploader/provider/restic.go @@ -35,13 +35,15 @@ import ( ) // resticBackupCMDFunc and resticRestoreCMDFunc are mainly used to make testing more convenient -var resticBackupCMDFunc = restic.BackupCommand -var resticBackupFunc = restic.RunBackup -var resticGetSnapshotFunc = restic.GetSnapshotCommand -var resticGetSnapshotIDFunc = restic.GetSnapshotID -var resticRestoreCMDFunc = restic.RestoreCommand -var resticTempCACertFileFunc = restic.TempCACertFile -var resticCmdEnvFunc = restic.CmdEnv +var ( + resticBackupCMDFunc = restic.BackupCommand + resticBackupFunc = restic.RunBackup + resticGetSnapshotFunc = restic.GetSnapshotCommand + resticGetSnapshotIDFunc = restic.GetSnapshotID + resticRestoreCMDFunc = restic.RestoreCommand + resticTempCACertFileFunc = restic.TempCACertFile + resticCmdEnvFunc = restic.CmdEnv +) type resticProvider struct { repoIdentifier string @@ -124,7 +126,8 @@ func (rp *resticProvider) RunBackup( parentSnapshot string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, - updater uploader.ProgressUpdater) (string, bool, error) { + updater uploader.ProgressUpdater, +) (string, bool, error) { if updater == nil { return "", false, errors.New("Need to initial backup progress updater first") } @@ -198,7 +201,8 @@ func (rp *resticProvider) RunRestore( volumePath string, volMode uploader.PersistentVolumeMode, uploaderCfg map[string]string, - updater uploader.ProgressUpdater) error { + updater uploader.ProgressUpdater, +) error { if updater == nil { return errors.New("Need to initial backup progress updater first") } diff --git a/pkg/uploader/provider/restic_test.go b/pkg/uploader/provider/restic_test.go index 2ef72b134b..c3a7853460 100644 --- a/pkg/uploader/provider/restic_test.go +++ b/pkg/uploader/provider/restic_test.go @@ -73,7 +73,8 @@ func TestResticRunBackup(t *testing.T) { errorHandleFunc: func(err error) bool { return strings.Contains(err.Error(), "error running") }, - }, { + }, + { name: "has parent snapshot", rp: &resticProvider{log: logrus.New()}, parentSnapshot: "parentSnapshot", @@ -315,7 +316,8 @@ func TestNewResticUploaderProvider(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, provider) }, - }, { + }, + { name: "Error in creating temp credentials file", mockCredFunc: func(credGetter *MockCredentialGetter, repoKeySelector *v1.SecretKeySelector) { credGetter.On("Path", repoKeySelector).Return("", errors.New("error creating temp credentials file")) @@ -324,7 +326,8 @@ func TestNewResticUploaderProvider(t *testing.T) { assert.Error(t, err) assert.Nil(t, provider) }, - }, { + }, + { name: "ObjectStorage with CACert present and creating CACert file failed", mockCredFunc: func(credGetter *MockCredentialGetter, repoKeySelector *v1.SecretKeySelector) { credGetter.On("Path", repoKeySelector).Return("temp-credentials", nil) @@ -336,7 +339,8 @@ func TestNewResticUploaderProvider(t *testing.T) { assert.Error(t, err) assert.Nil(t, provider) }, - }, { + }, + { name: "Generating repository cmd failed", mockCredFunc: func(credGetter *MockCredentialGetter, repoKeySelector *v1.SecretKeySelector) { credGetter.On("Path", repoKeySelector).Return("temp-credentials", nil) @@ -351,7 +355,8 @@ func TestNewResticUploaderProvider(t *testing.T) { assert.Error(t, err) assert.Nil(t, provider) }, - }, { + }, + { name: "New provider with not nil bsl", mockCredFunc: func(credGetter *MockCredentialGetter, repoKeySelector *v1.SecretKeySelector) { credGetter.On("Path", repoKeySelector).Return("temp-credentials", nil) diff --git a/pkg/util/actionhelpers/rbac.go b/pkg/util/actionhelpers/rbac.go index b763ec8589..533964111b 100644 --- a/pkg/util/actionhelpers/rbac.go +++ b/pkg/util/actionhelpers/rbac.go @@ -35,8 +35,7 @@ type ClusterRoleBindingLister interface { } // noopClusterRoleBindingLister exists to handle clusters where RBAC is disabled. -type NoopClusterRoleBindingLister struct { -} +type NoopClusterRoleBindingLister struct{} func (noop NoopClusterRoleBindingLister) List() ([]ClusterRoleBinding, error) { return []ClusterRoleBinding{}, nil diff --git a/pkg/util/csi/volume_snapshot.go b/pkg/util/csi/volume_snapshot.go index 53796ab972..52317ca8aa 100644 --- a/pkg/util/csi/volume_snapshot.go +++ b/pkg/util/csi/volume_snapshot.go @@ -131,7 +131,8 @@ func GetVolumeSnapshotContentForVolumeSnapshot( // RetainVSC updates the VSC's deletion policy to Retain and then return the update VSC func RetainVSC(ctx context.Context, snapshotClient snapshotter.SnapshotV1Interface, - vsc *snapshotv1api.VolumeSnapshotContent) (*snapshotv1api.VolumeSnapshotContent, error) { + vsc *snapshotv1api.VolumeSnapshotContent, +) (*snapshotv1api.VolumeSnapshotContent, error) { if vsc.Spec.DeletionPolicy == snapshotv1api.VolumeSnapshotContentRetain { return vsc, nil } @@ -161,7 +162,8 @@ func DeleteVolumeSnapshotContentIfAny( // EnsureDeleteVS asserts the existence of a VS by name, deletes it and waits for its // disappearance and returns errors on any failure. func EnsureDeleteVS(ctx context.Context, snapshotClient snapshotter.SnapshotV1Interface, - vsName string, vsNamespace string, timeout time.Duration) error { + vsName string, vsNamespace string, timeout time.Duration, +) error { err := snapshotClient.VolumeSnapshots(vsNamespace).Delete(ctx, vsName, metav1.DeleteOptions{}) if err != nil { return errors.Wrap(err, "error to delete volume snapshot") @@ -181,7 +183,6 @@ func EnsureDeleteVS(ctx context.Context, snapshotClient snapshotter.SnapshotV1In updated = vs return false, nil }) - if err != nil { if errors.Is(err, context.DeadlineExceeded) { return errors.Errorf("timeout to assure VolumeSnapshot %s is deleted, finalizers in VS %v", vsName, updated.Finalizers) @@ -220,7 +221,8 @@ func RemoveVSCProtect(ctx context.Context, snapshotClient snapshotter.SnapshotV1 // EnsureDeleteVSC asserts the existence of a VSC by name, deletes it and waits for its // disappearance and returns errors on any failure. func EnsureDeleteVSC(ctx context.Context, snapshotClient snapshotter.SnapshotV1Interface, - vscName string, timeout time.Duration) error { + vscName string, timeout time.Duration, +) error { err := snapshotClient.VolumeSnapshotContents().Delete(ctx, vscName, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { return errors.Wrap(err, "error to delete volume snapshot content") @@ -240,7 +242,6 @@ func EnsureDeleteVSC(ctx context.Context, snapshotClient snapshotter.SnapshotV1I updated = vsc return false, nil }) - if err != nil { if errors.Is(err, context.DeadlineExceeded) { return errors.Errorf("timeout to assure VolumeSnapshotContent %s is deleted, finalizers in VSC %v", vscName, updated.Finalizers) @@ -763,7 +764,6 @@ func WaitUntilVSCHandleIsReady( return true, nil }, ) - if err != nil { if wait.Interrupted(err) { if vsc != nil && diff --git a/pkg/util/csi/volume_snapshot_test.go b/pkg/util/csi/volume_snapshot_test.go index e5a87c2bdd..6e355b714d 100644 --- a/pkg/util/csi/volume_snapshot_test.go +++ b/pkg/util/csi/volume_snapshot_test.go @@ -1095,7 +1095,8 @@ func TestGetVolumeSnapshotClassForStorageClass(t *testing.T) { snapshotClasses := &snapshotv1api.VolumeSnapshotClassList{ Items: []snapshotv1api.VolumeSnapshotClass{ - *hostpathClass, *fooClass, *barClass, *bazClass, *ambClass1, *ambClass2}, + *hostpathClass, *fooClass, *barClass, *bazClass, *ambClass1, *ambClass2, + }, } testCases := []struct { diff --git a/pkg/util/kube/periodical_enqueue_source.go b/pkg/util/kube/periodical_enqueue_source.go index 4713343576..3b7b3013eb 100644 --- a/pkg/util/kube/periodical_enqueue_source.go +++ b/pkg/util/kube/periodical_enqueue_source.go @@ -39,7 +39,8 @@ func NewPeriodicalEnqueueSource( client client.Client, objList client.ObjectList, period time.Duration, - option PeriodicalEnqueueSourceOption) *PeriodicalEnqueueSource { + option PeriodicalEnqueueSourceOption, +) *PeriodicalEnqueueSource { return &PeriodicalEnqueueSource{ logger: logger.WithField("resource", reflect.TypeOf(objList).String()), Client: client, diff --git a/pkg/util/kube/pod.go b/pkg/util/kube/pod.go index 04457f0d4b..3e9bfe6051 100644 --- a/pkg/util/kube/pod.go +++ b/pkg/util/kube/pod.go @@ -119,7 +119,6 @@ func EnsureDeletePod(ctx context.Context, podGetter corev1client.CoreV1Interface updated = po return false, nil }) - if err != nil { if errors.Is(err, context.DeadlineExceeded) { return errors.Errorf("timeout to assure pod %s is deleted, finalizers in pod %v", pod, updated.Finalizers) diff --git a/pkg/util/kube/pvc_pv.go b/pkg/util/kube/pvc_pv.go index a80405874b..04b14b0f68 100644 --- a/pkg/util/kube/pvc_pv.go +++ b/pkg/util/kube/pvc_pv.go @@ -78,7 +78,8 @@ func DeletePVAndPVCIfAny(ctx context.Context, client corev1client.CoreV1Interfac // WaitPVCBound wait for binding of a PVC specified by name and returns the bound PV object func WaitPVCBound(ctx context.Context, pvcGetter corev1client.CoreV1Interface, - pvGetter corev1client.CoreV1Interface, pvc string, namespace string, timeout time.Duration) (*corev1api.PersistentVolume, error) { + pvGetter corev1client.CoreV1Interface, pvc string, namespace string, timeout time.Duration, +) (*corev1api.PersistentVolume, error) { var updated *corev1api.PersistentVolumeClaim err := wait.PollUntilContextTimeout(ctx, waitInternal, timeout, true, func(ctx context.Context) (bool, error) { tmpPVC, err := pvcGetter.PersistentVolumeClaims(namespace).Get(ctx, pvc, metav1.GetOptions{}) @@ -94,7 +95,6 @@ func WaitPVCBound(ctx context.Context, pvcGetter corev1client.CoreV1Interface, return true, nil }) - if err != nil { return nil, errors.Wrap(err, "error to wait for rediness of PVC") } @@ -145,7 +145,6 @@ func EnsureDeletePVC(ctx context.Context, pvcGetter corev1client.CoreV1Interface updated = pvc return false, nil }) - if err != nil { if errors.Is(err, context.DeadlineExceeded) { return errors.Errorf("timeout to assure pvc %s is deleted, finalizers in pvc %v", pvcName, updated.Finalizers) @@ -159,7 +158,8 @@ func EnsureDeletePVC(ctx context.Context, pvcGetter corev1client.CoreV1Interface // RebindPVC rebinds a PVC by modifying its VolumeName to the specific PV func RebindPVC(ctx context.Context, pvcGetter corev1client.CoreV1Interface, - pvc *corev1api.PersistentVolumeClaim, pv string) (*corev1api.PersistentVolumeClaim, error) { + pvc *corev1api.PersistentVolumeClaim, pv string, +) (*corev1api.PersistentVolumeClaim, error) { origBytes, err := json.Marshal(pvc) if err != nil { return nil, errors.Wrap(err, "error marshaling original PVC") @@ -190,7 +190,8 @@ func RebindPVC(ctx context.Context, pvcGetter corev1client.CoreV1Interface, // ResetPVBinding resets the binding info of a PV and adds the required labels so as to make it ready for binding func ResetPVBinding(ctx context.Context, pvGetter corev1client.CoreV1Interface, pv *corev1api.PersistentVolume, - labels map[string]string, pvc *corev1api.PersistentVolumeClaim) (*corev1api.PersistentVolume, error) { + labels map[string]string, pvc *corev1api.PersistentVolumeClaim, +) (*corev1api.PersistentVolume, error) { origBytes, err := json.Marshal(pv) if err != nil { return nil, errors.Wrap(err, "error marshaling original PV") @@ -236,7 +237,8 @@ func ResetPVBinding(ctx context.Context, pvGetter corev1client.CoreV1Interface, // SetPVReclaimPolicy sets the specified reclaim policy to a PV func SetPVReclaimPolicy(ctx context.Context, pvGetter corev1client.CoreV1Interface, pv *corev1api.PersistentVolume, - policy corev1api.PersistentVolumeReclaimPolicy) (*corev1api.PersistentVolume, error) { + policy corev1api.PersistentVolumeReclaimPolicy, +) (*corev1api.PersistentVolume, error) { if pv.Spec.PersistentVolumeReclaimPolicy == policy { return nil, nil } @@ -271,7 +273,8 @@ func SetPVReclaimPolicy(ctx context.Context, pvGetter corev1client.CoreV1Interfa // nothing if the consuming doesn't affect the PV provision. // The latest PVC and the selected node will be returned. func WaitPVCConsumed(ctx context.Context, pvcGetter corev1client.CoreV1Interface, pvc string, namespace string, - storageClient storagev1.StorageV1Interface, timeout time.Duration, ignoreConsume bool) (string, *corev1api.PersistentVolumeClaim, error) { + storageClient storagev1.StorageV1Interface, timeout time.Duration, ignoreConsume bool, +) (string, *corev1api.PersistentVolumeClaim, error) { selectedNode := "" var updated *corev1api.PersistentVolumeClaim var storageClass *storagev1api.StorageClass @@ -304,7 +307,6 @@ func WaitPVCConsumed(ctx context.Context, pvcGetter corev1client.CoreV1Interface return true, nil }) - if err != nil { return "", nil, errors.Wrap(err, "error to wait for PVC") } @@ -432,7 +434,8 @@ func DiagnosePV(pv *corev1api.PersistentVolume) string { } func GetPVCAttachingNodeOS(pvc *corev1api.PersistentVolumeClaim, nodeClient corev1client.CoreV1Interface, - storageClient storagev1.StorageV1Interface, log logrus.FieldLogger) (string, error) { + storageClient storagev1.StorageV1Interface, log logrus.FieldLogger, +) (string, error) { var nodeOS string var scFsType string diff --git a/pkg/util/kube/pvc_pv_test.go b/pkg/util/kube/pvc_pv_test.go index 59dfc5788c..faca2ef0f7 100644 --- a/pkg/util/kube/pvc_pv_test.go +++ b/pkg/util/kube/pvc_pv_test.go @@ -1159,9 +1159,7 @@ func TestIsPVCBound(t *testing.T) { } } -var ( - csiStorageClass = "csi-hostpath-sc" -) +var csiStorageClass = "csi-hostpath-sc" func TestGetPVForPVC(t *testing.T) { boundPVC := &corev1api.PersistentVolumeClaim{ diff --git a/pkg/util/kube/utils.go b/pkg/util/kube/utils.go index 5d64f117ba..2f8d79994d 100644 --- a/pkg/util/kube/utils.go +++ b/pkg/util/kube/utils.go @@ -110,7 +110,6 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core ready = true return true, nil }) - // err will be set if we timed out or encountered issues retrieving the namespace, if err != nil { if terminatingNamespace { @@ -180,9 +179,9 @@ func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1 // GetVolumeMode gets the uploader.PersistentVolumeMode of the volume. func GetVolumeMode(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, cli client.Client) ( - uploader.PersistentVolumeMode, error) { + uploader.PersistentVolumeMode, error, +) { _, pv, _, err := GetPodPVCVolume(ctx, log, pod, volumeName, cli) - if err != nil { if err == ErrorPodVolumeIsNotPVC { return uploader.PersistentVolumeFilesystem, nil @@ -199,7 +198,8 @@ func GetVolumeMode(ctx context.Context, log logrus.FieldLogger, pod *corev1api.P // GetPodPVCVolume gets the PVC, PV and volume for a pod volume name. // Returns pod volume in case of ErrorPodVolumeIsNotPVC error func GetPodPVCVolume(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, cli client.Client) ( - *corev1api.PersistentVolumeClaim, *corev1api.PersistentVolume, *corev1api.Volume, error) { + *corev1api.PersistentVolumeClaim, *corev1api.PersistentVolume, *corev1api.Volume, error, +) { var volume *corev1api.Volume for i := range pod.Spec.Volumes { diff --git a/pkg/util/kube/utils_test.go b/pkg/util/kube/utils_test.go index 35792c5272..823730c160 100644 --- a/pkg/util/kube/utils_test.go +++ b/pkg/util/kube/utils_test.go @@ -42,7 +42,7 @@ import ( ) func TestNamespaceAndName(t *testing.T) { - //TODO + // TODO } func TestEnsureNamespaceExistsAndIsReady(t *testing.T) { @@ -489,8 +489,8 @@ func TestIsCRDReady(t *testing.T) { func TestSinglePathMatch(t *testing.T) { fakeFS := velerotest.NewFakeFileSystem() - fakeFS.MkdirAll("testDir1/subpath", 0755) - fakeFS.MkdirAll("testDir2/subpath", 0755) + fakeFS.MkdirAll("testDir1/subpath", 0o755) + fakeFS.MkdirAll("testDir2/subpath", 0o755) _, err := SinglePathMatch("./*/subpath", fakeFS, logrus.StandardLogger()) assert.Error(t, err) diff --git a/pkg/util/logging/log_merge_hook.go b/pkg/util/logging/log_merge_hook.go index b993cb38ab..b775608beb 100644 --- a/pkg/util/logging/log_merge_hook.go +++ b/pkg/util/logging/log_merge_hook.go @@ -35,8 +35,7 @@ const ( // It hooks a log with ListeningMessage message, once the message is hit it replaces // the logger's output to HookWriter so that HookWriter retrieves the logs from a file indicated // by LogSourceKey field. -type MergeHook struct { -} +type MergeHook struct{} type hookWriter struct { orgWriter io.Writer @@ -80,7 +79,7 @@ func (w *hookWriter) Write(p []byte) (n int, err error) { w.logger.Out = w.orgWriter }() - sourceFile, err := os.OpenFile(w.source, os.O_RDONLY, 0400) + sourceFile, err := os.OpenFile(w.source, os.O_RDONLY, 0o400) if err != nil { return 0, err } diff --git a/pkg/util/podvolume/pod_volume_test.go b/pkg/util/podvolume/pod_volume_test.go index 9f6738a652..baeee1c727 100644 --- a/pkg/util/podvolume/pod_volume_test.go +++ b/pkg/util/podvolume/pod_volume_test.go @@ -134,9 +134,13 @@ func TestGetVolumesByPod(t *testing.T) { Spec: corev1api.PodSpec{ Volumes: []corev1api.Volume{ // PVB Volumes - {Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"}, + {Name: "pvbPV1"}, + {Name: "pvbPV2"}, + {Name: "pvbPV3"}, /// Excluded from PVB through annotation - {Name: "nonPvbPV1"}, {Name: "nonPvbPV2"}, {Name: "nonPvbPV3"}, + {Name: "nonPvbPV1"}, + {Name: "nonPvbPV2"}, + {Name: "nonPvbPV3"}, }, }, }, @@ -155,7 +159,9 @@ func TestGetVolumesByPod(t *testing.T) { Spec: corev1api.PodSpec{ Volumes: []corev1api.Volume{ // PVB Volumes - {Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"}, + {Name: "pvbPV1"}, + {Name: "pvbPV2"}, + {Name: "pvbPV3"}, /// Excluded from PVB because colume mounting default service account token {Name: "default-token-5xq45"}, }, @@ -181,9 +187,13 @@ func TestGetVolumesByPod(t *testing.T) { Spec: corev1api.PodSpec{ Volumes: []corev1api.Volume{ // PVB Volumes - {Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"}, + {Name: "pvbPV1"}, + {Name: "pvbPV2"}, + {Name: "pvbPV3"}, /// Excluded from pod volume backup through annotation - {Name: "nonPvbPV1"}, {Name: "nonPvbPV2"}, {Name: "nonPvbPV3"}, + {Name: "nonPvbPV1"}, + {Name: "nonPvbPV2"}, + {Name: "nonPvbPV3"}, // Excluded from pod volume backup because hostpath {Name: "hostPath1", VolumeSource: corev1api.VolumeSource{HostPath: &corev1api.HostPathVolumeSource{Path: "/hostpathVol"}}}, }, @@ -209,9 +219,13 @@ func TestGetVolumesByPod(t *testing.T) { Spec: corev1api.PodSpec{ Volumes: []corev1api.Volume{ // PVB Volumes - {Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"}, + {Name: "pvbPV1"}, + {Name: "pvbPV2"}, + {Name: "pvbPV3"}, /// Excluded from pod volume backup through annotation - {Name: "nonPvbPV1"}, {Name: "nonPvbPV2"}, {Name: "nonPvbPV3"}, + {Name: "nonPvbPV1"}, + {Name: "nonPvbPV2"}, + {Name: "nonPvbPV3"}, // Excluded from pod volume backup because hostpath {Name: "superSecret", VolumeSource: corev1api.VolumeSource{Secret: &corev1api.SecretVolumeSource{SecretName: "super-secret"}}}, }, @@ -237,9 +251,13 @@ func TestGetVolumesByPod(t *testing.T) { Spec: corev1api.PodSpec{ Volumes: []corev1api.Volume{ // PVB Volumes - {Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"}, + {Name: "pvbPV1"}, + {Name: "pvbPV2"}, + {Name: "pvbPV3"}, /// Excluded from pod volume backup through annotation - {Name: "nonPvbPV1"}, {Name: "nonPvbPV2"}, {Name: "nonPvbPV3"}, + {Name: "nonPvbPV1"}, + {Name: "nonPvbPV2"}, + {Name: "nonPvbPV3"}, // Excluded from pod volume backup because hostpath {Name: "appCOnfig", VolumeSource: corev1api.VolumeSource{ConfigMap: &corev1api.ConfigMapVolumeSource{LocalObjectReference: corev1api.LocalObjectReference{Name: "app-config"}}}}, }, @@ -264,7 +282,9 @@ func TestGetVolumesByPod(t *testing.T) { }, Spec: corev1api.PodSpec{ Volumes: []corev1api.Volume{ - {Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"}, + {Name: "pvbPV1"}, + {Name: "pvbPV2"}, + {Name: "pvbPV3"}, { Name: "projected", VolumeSource: corev1api.VolumeSource{ @@ -305,7 +325,9 @@ func TestGetVolumesByPod(t *testing.T) { }, Spec: corev1api.PodSpec{ Volumes: []corev1api.Volume{ - {Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"}, + {Name: "pvbPV1"}, + {Name: "pvbPV2"}, + {Name: "pvbPV3"}, { Name: "downwardAPI", VolumeSource: corev1api.VolumeSource{ @@ -345,7 +367,9 @@ func TestGetVolumesByPod(t *testing.T) { }, Spec: corev1api.PodSpec{ Volumes: []corev1api.Volume{ - {Name: "pvbPV1"}, {Name: "pvbPV2"}, {Name: "pvbPV3"}, + {Name: "pvbPV1"}, + {Name: "pvbPV2"}, + {Name: "pvbPV3"}, { Name: "downwardAPI", VolumeSource: corev1api.VolumeSource{