From 6eead06d688f7336eab681aa269347d869d01c1b Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Thu, 3 Nov 2022 17:14:50 -0500 Subject: [PATCH 01/41] WIP: Check PVC access mode --- pkg/migrate/migrate.go | 190 ++++++++++++++++++++++++++++++++++------- 1 file changed, 159 insertions(+), 31 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 9fd5514..d378b43 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -22,12 +22,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" ) -const baseAnnotation = "kurl.sh/pvcmigrate" -const scaleAnnotation = baseAnnotation + "-scale" -const kindAnnotation = baseAnnotation + "-kind" -const sourceNsAnnotation = baseAnnotation + "-sourcens" -const sourcePvcAnnotation = baseAnnotation + "-sourcepvc" -const desiredReclaimAnnotation = baseAnnotation + "-reclaim" +const ( + baseAnnotation = "kurl.sh/pvcmigrate" + scaleAnnotation = baseAnnotation + "-scale" + kindAnnotation = baseAnnotation + "-kind" + sourceNsAnnotation = baseAnnotation + "-sourcens" + sourcePvcAnnotation = baseAnnotation + "-sourcepvc" + desiredReclaimAnnotation = baseAnnotation + "-reclaim" +) // IsDefaultStorageClassAnnotation - this is also exported by https://github.com/kubernetes/kubernetes/blob/v1.21.3/pkg/apis/storage/v1/util/helpers.go#L25 // but that would require adding the k8s import overrides to our go.mod @@ -48,6 +50,7 @@ type Options struct { SetDefaults bool VerboseCopy bool SkipSourceValidation bool + PvcCopyTimeout int } // Cli uses CLI options to run Migrate @@ -61,6 +64,7 @@ func Cli() { flag.BoolVar(&options.SetDefaults, "set-defaults", false, "change default storage class from source to dest") flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") + flag.IntVar(&options.PvcCopyTimeout, "timeout", 300, "length of time to wait (in seconds) when transferring data from the source to the destination storage volume") flag.Parse() @@ -98,12 +102,21 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, return err } + // validate pvc access modes + unsupportedPVcs, err := validatePVCs(ctx, w, clientset, matchingPVCs) + if err != nil { + return err + } + if unsupportedPVcs != nil { + // TODO: format and return error + } + updatedMatchingPVCs, err := scaleDownPods(ctx, w, clientset, matchingPVCs, time.Second*5) if err != nil { return fmt.Errorf("failed to scale down pods: %w", err) } - err = copyAllPVCs(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.RsyncImage, updatedMatchingPVCs, options.VerboseCopy, time.Second) + err = copyAllPVCs(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.RsyncImage, updatedMatchingPVCs, options.VerboseCopy, time.Duration(options.PvcCopyTimeout)) if err != nil { return err } @@ -212,7 +225,7 @@ func swapDefaultStorageClasses(ctx context.Context, w *log.Logger, clientset k8s return nil } -func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, sourceSCName string, destSCName string, rsyncImage string, matchingPVCs map[string][]pvcCtx, verboseCopy bool, waitTime time.Duration) error { +func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, sourceSCName string, destSCName string, rsyncImage string, matchingPVCs map[string][]pvcCtx, verboseCopy bool, timeout time.Duration) error { // create a pod for each PVC migration, and wait for it to finish w.Printf("\nCopying data from %s PVCs to %s PVCs\n", sourceSCName, destSCName) for ns, nsPvcs := range matchingPVCs { @@ -220,7 +233,10 @@ func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interfa sourcePvcName, destPvcName := nsPvc.claim.Name, newPvcName(nsPvc.claim.Name) w.Printf("Copying data from %s (%s) to %s in %s\n", sourcePvcName, nsPvc.claim.Spec.VolumeName, destPvcName, ns) - err := copyOnePVC(ctx, w, clientset, ns, sourcePvcName, destPvcName, rsyncImage, verboseCopy, waitTime, nsPvc.getNodeNameRef()) + // setup timeout + timeoutCtx, cancelCtx := context.WithTimeout(ctx, timeout) + defer cancelCtx() + err := copyOnePVC(timeoutCtx, w, clientset, ns, sourcePvcName, destPvcName, rsyncImage, verboseCopy, nsPvc.getNodeNameRef()) if err != nil { return fmt.Errorf("failed to copy PVC %s in %s: %w", nsPvc.claim.Name, ns, err) } @@ -229,7 +245,7 @@ func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interfa return nil } -func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, rsyncImage string, verboseCopy bool, waitTime time.Duration, nodeName string) error { +func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, rsyncImage string, verboseCopy bool, nodeName string) error { w.Printf("Creating pvc migrator pod on node %s\n", nodeName) createdPod, err := createMigrationPod(ctx, clientset, ns, sourcePvcName, destPvcName, rsyncImage, nodeName) if err != nil { @@ -245,8 +261,10 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac } }() - // wait for the pod to be created - time.Sleep(waitTime) + // initial wait for the pod to be created + waitInterval := time.Duration(time.Second * 1) + time.Sleep(waitInterval) + for { gotPod, err := clientset.CoreV1().Pods(ns).Get(ctx, createdPod.Name, metav1.GetOptions{}) if err != nil { @@ -255,7 +273,7 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac } if gotPod.Status.Phase == corev1.PodPending { - time.Sleep(waitTime) + time.Sleep(waitInterval) continue } @@ -265,6 +283,13 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac } w.Printf("got status %s for pod %s, this is likely an error\n", gotPod.Status.Phase, gotPod.Name) + + select { + case <-ctx.Done(): + err = getPvcError(ctx, w, clientset, destPvcName, ns) + w.Printf("ERROR: Copy operation from PVC %s to PVC %s timed out\n", sourcePvcName, destPvcName) + return fmt.Errorf("context deadline exceeded waiting for migration pod %s to go into Running phase: %w", createdPod.Name, err) + } } w.Printf("migrating PVC %s:\n", sourcePvcName) @@ -291,8 +316,8 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac break } - //if the pod is running, wait to see if getting logs works in a few seconds - time.Sleep(waitTime) + // if the pod is running, wait to see if getting logs works in a few seconds + time.Sleep(waitInterval) } } @@ -340,7 +365,7 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac return fmt.Errorf("logs for the migration pod %s in %s ended, but the status was %s and not succeeded", createdPod.Name, ns, gotPod.Status.Phase) } - time.Sleep(waitTime) + time.Sleep(waitInterval) } w.Printf("finished migrating PVC %s\n", sourcePvcName) @@ -348,7 +373,6 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac } func createMigrationPod(ctx context.Context, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, rsyncImage string, nodeName string) (*corev1.Pod, error) { - // apply nodeAffinity when migrating to a local volume provisioner var nodeAffinity *corev1.Affinity if isDestScLocalVolumeProvisioner && nodeName != "" { @@ -435,7 +459,6 @@ func createMigrationPod(ctx context.Context, clientset k8sclient.Interface, ns s }, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("failed to create pod to migrate PVC %s to %s in %s: %w", sourcePvcName, destPvcName, ns, err) - } return createdPod, nil } @@ -709,7 +732,6 @@ func mutateSC(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, // if waitForCleanup is true, after scaling down deployments/statefulsets it will wait for all pods to be deleted. // It returns a map of namespace to PVCs and any errors encountered. func scaleDownPods(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, matchingPVCs map[string][]pvcCtx, checkInterval time.Duration) (map[string][]pvcCtx, error) { - // build new map with complete pvcCtx updatedPVCs := matchingPVCs @@ -808,11 +830,11 @@ func scaleDownPods(ctx context.Context, w *log.Logger, clientset k8sclient.Inter ss.Spec.Replicas = &int320 // add an annotation with the current scale (if it does not already exist) - if ss.ObjectMeta.Annotations == nil { - ss.ObjectMeta.Annotations = map[string]string{} + if ss.Annotations == nil { + ss.Annotations = map[string]string{} } - if _, ok := ss.ObjectMeta.Annotations[scaleAnnotation]; !ok { - ss.ObjectMeta.Annotations[scaleAnnotation] = fmt.Sprintf("%d", formerScale) + if _, ok := ss.Annotations[scaleAnnotation]; !ok { + ss.Annotations[scaleAnnotation] = fmt.Sprintf("%d", formerScale) } w.Printf("scaling StatefulSet %s from %d to 0 in %s\n", ownerName, formerScale, ns) @@ -846,11 +868,11 @@ func scaleDownPods(ctx context.Context, w *log.Logger, clientset k8sclient.Inter dep.Spec.Replicas = &int320 // add an annotation with the current scale (if it does not already exist) - if dep.ObjectMeta.Annotations == nil { - dep.ObjectMeta.Annotations = map[string]string{} + if dep.Annotations == nil { + dep.Annotations = map[string]string{} } - if _, ok := dep.ObjectMeta.Annotations[scaleAnnotation]; !ok { - dep.ObjectMeta.Annotations[scaleAnnotation] = fmt.Sprintf("%d", formerScale) + if _, ok := dep.Annotations[scaleAnnotation]; !ok { + dep.Annotations[scaleAnnotation] = fmt.Sprintf("%d", formerScale) } w.Printf("scaling Deployment %s from %d to 0 in %s\n", ownerName, formerScale, ns) @@ -910,12 +932,12 @@ func scaleUpPods(ctx context.Context, w *log.Logger, clientset k8sclient.Interfa return fmt.Errorf("failed to get statefulsets in %s: %w", ns, err) } for _, ss := range sses.Items { - if desiredScale, ok := ss.ObjectMeta.Annotations[scaleAnnotation]; ok { + if desiredScale, ok := ss.Annotations[scaleAnnotation]; ok { desiredScaleInt, err := strconv.Atoi(desiredScale) if err != nil { return fmt.Errorf("failed to parse scale %q for StatefulSet %s in %s: %w", desiredScale, ss.Name, ns, err) } - delete(ss.ObjectMeta.Annotations, scaleAnnotation) + delete(ss.Annotations, scaleAnnotation) desiredScaleInt32 := int32(desiredScaleInt) ss.Spec.Replicas = &desiredScaleInt32 @@ -934,12 +956,12 @@ func scaleUpPods(ctx context.Context, w *log.Logger, clientset k8sclient.Interfa return fmt.Errorf("failed to get deployments in %s: %w", ns, err) } for _, dep := range deps.Items { - if desiredScale, ok := dep.ObjectMeta.Annotations[scaleAnnotation]; ok { + if desiredScale, ok := dep.Annotations[scaleAnnotation]; ok { desiredScaleInt, err := strconv.Atoi(desiredScale) if err != nil { return fmt.Errorf("failed to parse scale %q for Deployment %s in %s: %w", desiredScale, dep.Name, ns, err) } - delete(dep.ObjectMeta.Annotations, scaleAnnotation) + delete(dep.Annotations, scaleAnnotation) desiredScaleInt32 := int32(desiredScaleInt) dep.Spec.Replicas = &desiredScaleInt32 @@ -1146,3 +1168,109 @@ func resetReclaimPolicy(ctx context.Context, w *log.Logger, clientset k8sclient. return nil } + +func validatePVCs(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, pvcs map[string][]pvcCtx) ([]corev1.PersistentVolumeClaim, error) { + var invalidPVclaims []corev1.PersistentVolumeClaim + + for _, pvcCtxs := range pvcs { + for _, pvcCtx := range pvcCtxs { + validPVC, err := isVolumeAccessModeValid(ctx, l, clientset, *pvcCtx.claim) + if err != nil { + l.Printf("could not validate access mode of %+q for PVC %s", pvcCtx.claim.Spec.AccessModes, pvcCtx.claim.Name) + } + if !validPVC { + invalidPVclaims = append(invalidPVclaims, *pvcCtx.claim) + } + } + } + + if len(invalidPVclaims) > 0 { + return invalidPVclaims, nil + } + return nil, nil +} + +func isVolumeAccessModeValid(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, pvc corev1.PersistentVolumeClaim) (bool, error) { + pvcConsumerPod := getPvcConsumerPod(pvc) + // TODO: Determine if pod was created + + // cleanup pod after completion + defer func() { + err = clientset.CoreV1().Pods(pvc.Namespace).Delete(context.TODO(), pvcConsumerPod.Name, metav1.DeleteOptions{}) + if err != nil { + l.Printf("failed to delete PVC consumer pod %s: %v", pvcConsumerPod.Name, err) + } + }() + + return true, nil +} + +func getPvcConsumerPod(pvc corev1.PersistentVolumeClaim) *corev1.Pod { + return &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-access-mode-pvc-" + pvc.Name, + Namespace: pvc.Namespace, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Volumes: []corev1.Volume{ + { + Name: *pvc.Spec.StorageClassName + "-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + Command: []string{ + "sleep", + "3600", + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/dest", + Name: *pvc.Spec.StorageClassName + "-volume", + }, + }, + }, + }, + }, + } +} + +// getPvcError returns the reason for why a PVC is in Pending status +// returns nil if PVC is not pending +func getPvcError(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, pvcName string, namespace string) error { + pvc, err := clientset.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to check access mode for PVC %s: %w", pvcName, err) + } + + // no need to inspect pvc + if pvc.Status.Phase != corev1.ClaimPending { + return nil + } + + eventSelector := clientset.CoreV1().Events(namespace).GetFieldSelector(&pvc.Name, &pvc.Namespace, &pvc.Kind, (*string)(&pvc.UID)) + pvcEvents, err := clientset.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{FieldSelector: eventSelector.String()}) + if err != nil { + return fmt.Errorf("failed to list events for PVC %s", pvcName) + } + + // get pending reason + for _, event := range pvcEvents.Items { + if event.Reason == "ProvisioningFailed" { + return fmt.Errorf("PVC %s could not be bound: %s", pvcName, event.Message) + } + } + return fmt.Errorf("Could not determine reason for why PVC %s is in Pending phase", pvcName) +} From 3ac07a8d217db7e96dc3127f4627c0793ccb5972 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 4 Nov 2022 18:07:07 -0500 Subject: [PATCH 02/41] Still WIP --- go.mod | 1 + go.sum | 2 + pkg/migrate/migrate.go | 87 +++++++++++++++++++++++++----------------- 3 files changed, 55 insertions(+), 35 deletions(-) diff --git a/go.mod b/go.mod index a271bb8..f9234a6 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( k8s.io/api v0.25.4 k8s.io/apimachinery v0.25.4 k8s.io/client-go v0.25.3 + k8s.io/kubernetes v1.25.3 k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed sigs.k8s.io/controller-runtime v0.13.1 ) diff --git a/go.sum b/go.sum index 9b5c7b2..b9c559f 100644 --- a/go.sum +++ b/go.sum @@ -674,6 +674,8 @@ k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= +k8s.io/kubernetes v1.25.3 h1:Ljx/Ew9+dt7rN9ob3V+N/aoDy7nDSbmr35IbYGRTyqE= +k8s.io/kubernetes v1.25.3/go.mod h1:lvEY+3iJhh+sGIK1LorGkI56rW0eLGsfalnp68wQwYU= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index d378b43..94db36c 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -19,6 +19,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sclient "k8s.io/client-go/kubernetes" + k8spodutils "k8s.io/kubernetes/pkg/api/v1/pod" "sigs.k8s.io/controller-runtime/pkg/client/config" ) @@ -103,12 +104,10 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, } // validate pvc access modes - unsupportedPVcs, err := validatePVCs(ctx, w, clientset, matchingPVCs) - if err != nil { - return err - } + unsupportedPVcs := validatePVCs(ctx, w, clientset, matchingPVCs) if unsupportedPVcs != nil { // TODO: format and return error + return fmt.Errorf("Some PVCs are not supported for storage class %s", options.DestSCName) } updatedMatchingPVCs, err := scaleDownPods(ctx, w, clientset, matchingPVCs, time.Second*5) @@ -159,6 +158,11 @@ func (pvc pvcCtx) getNodeNameRef() string { return pvc.usedByPod.Spec.NodeName } +type pvcValidation struct { + valid bool + reason string +} + // swapDefaultStorageClasses attempts to set newDefaultSC as the default StorageClass // if oldDefaultSC was set as the default, then it will be unset first // if another StorageClass besides these two is currently the default, it will return an error @@ -286,9 +290,10 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac select { case <-ctx.Done(): - err = getPvcError(ctx, w, clientset, destPvcName, ns) + // TODO: revisit this w.Printf("ERROR: Copy operation from PVC %s to PVC %s timed out\n", sourcePvcName, destPvcName) return fmt.Errorf("context deadline exceeded waiting for migration pod %s to go into Running phase: %w", createdPod.Name, err) + default: } } @@ -1169,40 +1174,57 @@ func resetReclaimPolicy(ctx context.Context, w *log.Logger, clientset k8sclient. return nil } -func validatePVCs(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, pvcs map[string][]pvcCtx) ([]corev1.PersistentVolumeClaim, error) { - var invalidPVclaims []corev1.PersistentVolumeClaim +func validatePVCs(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, pvcs map[string][]pvcCtx) map[string]map[string]string { + validationErrors := make(map[string]map[string]string) - for _, pvcCtxs := range pvcs { + for ns, pvcCtxs := range pvcs { for _, pvcCtx := range pvcCtxs { - validPVC, err := isVolumeAccessModeValid(ctx, l, clientset, *pvcCtx.claim) + v, err := checkVolumeAccessModeValid(ctx, l, clientset, *pvcCtx.claim) if err != nil { - l.Printf("could not validate access mode of %+q for PVC %s", pvcCtx.claim.Spec.AccessModes, pvcCtx.claim.Name) - } - if !validPVC { - invalidPVclaims = append(invalidPVclaims, *pvcCtx.claim) + validationErrors[ns] = map[string]string{pvcCtx.claim.Name: fmt.Sprintf("Failed to validate volume access mode: %s", err)} + continue } + validationErrors[ns] = map[string]string{pvcCtx.claim.Name: v.reason} } } - - if len(invalidPVclaims) > 0 { - return invalidPVclaims, nil - } - return nil, nil + return validationErrors } -func isVolumeAccessModeValid(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, pvc corev1.PersistentVolumeClaim) (bool, error) { - pvcConsumerPod := getPvcConsumerPod(pvc) - // TODO: Determine if pod was created +func checkVolumeAccessModeValid(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, pvc corev1.PersistentVolumeClaim) (pvcValidation, error) { + pvcConsumerPodSpec := getPvcConsumerPod(pvc) + pvcConsumerPod, err := clientset.CoreV1().Pods(pvc.Namespace).Create(ctx, pvcConsumerPodSpec, metav1.CreateOptions{}) + if err != nil { + return pvcValidation{}, err + } // cleanup pod after completion defer func() { err = clientset.CoreV1().Pods(pvc.Namespace).Delete(context.TODO(), pvcConsumerPod.Name, metav1.DeleteOptions{}) if err != nil { - l.Printf("failed to delete PVC consumer pod %s: %v", pvcConsumerPod.Name, err) + l.Printf("Failed to delete PVC consumer pod %s: %v", pvcConsumerPod.Name, err) } }() - return true, nil + podReadyCh := make(chan bool, 1) + go func() { + for { + if k8spodutils.IsPodReady(pvcConsumerPod) { + podReadyCh <- true + } + } + }() + select { + case <-podReadyCh: + case <-time.After(time.Second * 10): + // check pvc status and get error + pvcPendingError, err := getPvcError(ctx, l, clientset, pvc) + if err != nil { + return pvcValidation{false, fmt.Sprintf("Failed to get PVC error: %s", err)}, nil + } + return pvcValidation{false, fmt.Sprintf("PVC Error: %s", pvcPendingError)}, nil + } + + return pvcValidation{true, ""}, nil } func getPvcConsumerPod(pvc corev1.PersistentVolumeClaim) *corev1.Pod { @@ -1249,28 +1271,23 @@ func getPvcConsumerPod(pvc corev1.PersistentVolumeClaim) *corev1.Pod { // getPvcError returns the reason for why a PVC is in Pending status // returns nil if PVC is not pending -func getPvcError(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, pvcName string, namespace string) error { - pvc, err := clientset.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("failed to check access mode for PVC %s: %w", pvcName, err) - } - +func getPvcError(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, pvc corev1.PersistentVolumeClaim) (error, error) { // no need to inspect pvc if pvc.Status.Phase != corev1.ClaimPending { - return nil + return nil, fmt.Errorf("PVC %s is not in Pending status", pvc.Name) } - eventSelector := clientset.CoreV1().Events(namespace).GetFieldSelector(&pvc.Name, &pvc.Namespace, &pvc.Kind, (*string)(&pvc.UID)) - pvcEvents, err := clientset.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{FieldSelector: eventSelector.String()}) + eventSelector := clientset.CoreV1().Events(pvc.Namespace).GetFieldSelector(&pvc.Name, &pvc.Namespace, &pvc.Kind, (*string)(&pvc.UID)) + pvcEvents, err := clientset.CoreV1().Events(pvc.Namespace).List(ctx, metav1.ListOptions{FieldSelector: eventSelector.String()}) if err != nil { - return fmt.Errorf("failed to list events for PVC %s", pvcName) + return nil, fmt.Errorf("failed to list events for PVC %s", pvc.Name) } // get pending reason for _, event := range pvcEvents.Items { if event.Reason == "ProvisioningFailed" { - return fmt.Errorf("PVC %s could not be bound: %s", pvcName, event.Message) + return errors.New(event.Message), nil } } - return fmt.Errorf("Could not determine reason for why PVC %s is in Pending phase", pvcName) + return nil, fmt.Errorf("Could not determine reason for why PVC %s is in Pending phase", pvc.Name) } From 3ef7e7d430622d93430cd21a73f5619d06754445 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Wed, 16 Nov 2022 16:26:56 -0600 Subject: [PATCH 03/41] More WIP --- go.mod | 34 +-- go.sum | 508 ++++------------------------------------- pkg/migrate/migrate.go | 304 +++++++++++++++++++----- 3 files changed, 316 insertions(+), 530 deletions(-) diff --git a/go.mod b/go.mod index f9234a6..c966805 100644 --- a/go.mod +++ b/go.mod @@ -3,17 +3,20 @@ module github.com/replicatedhq/pvmigrate go 1.19 require ( + github.com/google/uuid v1.3.0 + github.com/replicatedhq/kurl v0.0.0-20221116172021-8ac37b7b9867 github.com/stretchr/testify v1.8.1 k8s.io/api v0.25.4 k8s.io/apimachinery v0.25.4 - k8s.io/client-go v0.25.3 + k8s.io/client-go v0.25.4 k8s.io/kubernetes v1.25.3 k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed sigs.k8s.io/controller-runtime v0.13.1 ) require ( - cloud.google.com/go v0.97.0 // indirect + cloud.google.com/go/compute v1.12.1 // indirect + cloud.google.com/go/compute/metadata v0.2.1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.27 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect @@ -27,36 +30,37 @@ require ( github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-openapi/jsonreference v0.19.6 // indirect + github.com/go-openapi/swag v0.21.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.2.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/gofuzz v1.1.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect - golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 // indirect + golang.org/x/net v0.1.0 // indirect + golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect + golang.org/x/sys v0.1.0 // indirect + golang.org/x/term v0.1.0 // indirect + golang.org/x/text v0.4.0 // indirect golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.70.1 // indirect + k8s.io/klog/v2 v2.80.1 // indirect k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect diff --git a/go.sum b/go.sum index b9c559f..2f49c0a 100644 --- a/go.sum +++ b/go.sum @@ -1,49 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= @@ -61,45 +20,26 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -108,104 +48,50 @@ github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -216,8 +102,11 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -229,16 +118,15 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= +github.com/onsi/ginkgo/v2 v2.3.0 h1:kUMoxMoQG3ogk/QWyKh3zibV7BKZ+xBpWil1cTylVqc= github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/replicatedhq/kurl v0.0.0-20221116172021-8ac37b7b9867 h1:2nu0z28pXfM6J7qXQiqXsc143aAHggB5WldO7FGqMC8= +github.com/replicatedhq/kurl v0.0.0-20221116172021-8ac37b7b9867/go.mod h1:oBUm8d7up1EZbU4EdvtjOMUeYFeslgkionv0CRbf52Q= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -247,440 +135,142 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= -golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 h1:a5Yg6ylndHHYJqIPrdq0AhvR6KTvDTAvgBtaidhEevY= +golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.25.4 h1:3YO8J4RtmG7elEgaWMb4HgmpS2CfY1QlaOz9nwB+ZSs= k8s.io/api v0.25.4/go.mod h1:IG2+RzyPQLllQxnhzD8KQNEu4c4YvyDTpSMztf4A0OQ= k8s.io/apimachinery v0.25.4 h1:CtXsuaitMESSu339tfhVXhQrPET+EiWnIY1rcurKnAc= k8s.io/apimachinery v0.25.4/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= -k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0= -k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA= +k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8= +k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= k8s.io/kubernetes v1.25.3 h1:Ljx/Ew9+dt7rN9ob3V+N/aoDy7nDSbmr35IbYGRTyqE= k8s.io/kubernetes v1.25.3/go.mod h1:lvEY+3iJhh+sGIK1LorGkI56rW0eLGsfalnp68wQwYU= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/controller-runtime v0.13.1 h1:tUsRCSJVM1QQOOeViGeX3GMT3dQF1eePPw6sEE3xSlg= sigs.k8s.io/controller-runtime v0.13.1/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 94db36c..c79206f 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -13,12 +13,15 @@ import ( "text/tabwriter" "time" + "github.com/google/uuid" + kurlutils "github.com/replicatedhq/kurl/pkg/k8sutil" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" k8spodutils "k8s.io/kubernetes/pkg/api/v1/pod" "sigs.k8s.io/controller-runtime/pkg/client/config" ) @@ -54,6 +57,16 @@ type Options struct { PvcCopyTimeout int } +type PVMigrator struct { + ctx context.Context + log *log.Logger + k8scli k8sclient.Interface + srcSc string + dstSc string + deletePVTimeout time.Duration + podTimeout time.Duration +} + // Cli uses CLI options to run Migrate func Cli() { var options Options @@ -103,11 +116,19 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, return err } - // validate pvc access modes - unsupportedPVcs := validatePVCs(ctx, w, clientset, matchingPVCs) - if unsupportedPVcs != nil { - // TODO: format and return error - return fmt.Errorf("Some PVCs are not supported for storage class %s", options.DestSCName) + srcPVs, err := kurlutils.PVSByStorageClass(ctx, clientset, options.SourceSCName) + if err != nil { + return fmt.Errorf("failed to get volumes using storage class %s: %w", options.SourceSCName, err) + } + pvMigrator := PVMigrator{ctx, w, clientset, options.SourceSCName, options.DestSCName, 5 * time.Minute, 10 * time.Second} + unsupportedPVCs, err := pvMigrator.ValidateVolumeAccessModes(srcPVs) + if err != nil { + return fmt.Errorf("failed to validate volume access modes for destination storage class %s", options.DestSCName) + } + + if unsupportedPVCs != nil { + // TODO: print error + return fmt.Errorf("existing volumes have access modes not supported by the destination storage class %s", options.DestSCName) } updatedMatchingPVCs, err := scaleDownPods(ctx, w, clientset, matchingPVCs, time.Second*5) @@ -158,9 +179,14 @@ func (pvc pvcCtx) getNodeNameRef() string { return pvc.usedByPod.Spec.NodeName } -type pvcValidation struct { - valid bool - reason string +type pvcError struct { + reason string + from string + message string +} + +func (e *pvcError) Error() string { + return fmt.Sprintf("volume claim error from %s during %s: %s", e.from, e.reason, e.message) } // swapDefaultStorageClasses attempts to set newDefaultSC as the default StorageClass @@ -1174,77 +1200,119 @@ func resetReclaimPolicy(ctx context.Context, w *log.Logger, clientset k8sclient. return nil } -func validatePVCs(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, pvcs map[string][]pvcCtx) map[string]map[string]string { +// ValidateVolumeAccessModes checks whether the provided persistent volumes support the access modes +// of a given storage class. +// returns a map of pvc errors indexed by namespace +func (pvm *PVMigrator) ValidateVolumeAccessModes(pvs map[string]corev1.PersistentVolume) (map[string]map[string]string, error) { validationErrors := make(map[string]map[string]string) - for ns, pvcCtxs := range pvcs { - for _, pvcCtx := range pvcCtxs { - v, err := checkVolumeAccessModeValid(ctx, l, clientset, *pvcCtx.claim) - if err != nil { - validationErrors[ns] = map[string]string{pvcCtx.claim.Name: fmt.Sprintf("Failed to validate volume access mode: %s", err)} - continue - } - validationErrors[ns] = map[string]string{pvcCtx.claim.Name: v.reason} + if _, err := pvm.k8scli.StorageV1().StorageClasses().Get(pvm.ctx, pvm.dstSc, metav1.GetOptions{}); err != nil { + return nil, fmt.Errorf("failed to get destination storage class %s: %w", pvm.dstSc, err) + } + + pvcs, err := kurlutils.PVCSForPVs(pvm.ctx, pvm.k8scli, pvs) + if err != nil { + return nil, fmt.Errorf("failed to get pv to pvc mapping: %w", err) + } + + for pv, pvc := range pvcs { + v, err := pvm.checkVolumeAccessModes(pvc) + if err != nil { + pvm.log.Printf("failed to validate volume access mode for claim %s (%s): %s", pvc.Name, pv, err) + continue } + validationErrors[pvc.Namespace] = map[string]string{pvc.Name: v.reason} } - return validationErrors + return validationErrors, nil } -func checkVolumeAccessModeValid(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, pvc corev1.PersistentVolumeClaim) (pvcValidation, error) { - pvcConsumerPodSpec := getPvcConsumerPod(pvc) - pvcConsumerPod, err := clientset.CoreV1().Pods(pvc.Namespace).Create(ctx, pvcConsumerPodSpec, metav1.CreateOptions{}) +// checkVolumeAccessModeValid checks if the access modes of pvc are supported by storage class sc. +func (pvm *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (pvcError, error) { + var err error + + // create temp pvc for storage class + tmpPVC := buildTmpPVC(pvc, pvm.dstSc) + if tmpPVC, err = pvm.k8scli.CoreV1().PersistentVolumeClaims("default").Create( + pvm.ctx, tmpPVC, metav1.CreateOptions{}, + ); err != nil { + return pvcError{}, fmt.Errorf("failed to create temporary pvc: %w", err) + } + + // consume pvc to determine any access mode errors + pvConsumerPodSpec := buildPVConsumerPod(pvc.Name) + pvConsumerPod, err := pvm.k8scli.CoreV1().Pods(pvc.Namespace).Create(pvm.ctx, pvConsumerPodSpec, metav1.CreateOptions{}) if err != nil { - return pvcValidation{}, err + return pvcError{}, err } - // cleanup pod after completion + // cleanup pvc and pod at the end defer func() { - err = clientset.CoreV1().Pods(pvc.Namespace).Delete(context.TODO(), pvcConsumerPod.Name, metav1.DeleteOptions{}) - if err != nil { - l.Printf("Failed to delete PVC consumer pod %s: %v", pvcConsumerPod.Name, err) + if err = pvm.deleteTmpPVC(tmpPVC); err != nil { + pvm.log.Printf("failed to delete tmp claim: %s", err) } }() - - podReadyCh := make(chan bool, 1) - go func() { - for { - if k8spodutils.IsPodReady(pvcConsumerPod) { - podReadyCh <- true - } + defer func() { + if err = pvm.deletePVConsumerPod(pvConsumerPod); err != nil { + pvm.log.Printf("failed to delete pv consumer pod %s: %s", pvConsumerPod.Name, err) } }() - select { - case <-podReadyCh: - case <-time.After(time.Second * 10): - // check pvc status and get error - pvcPendingError, err := getPvcError(ctx, l, clientset, pvc) + + podReadyTimeoutEnd := time.Now().Add(pvm.podTimeout) + for { + gotPod, err := pvm.k8scli.CoreV1().Pods(pvConsumerPod.Namespace).Get(pvm.ctx, pvConsumerPod.Name, metav1.GetOptions{}) if err != nil { - return pvcValidation{false, fmt.Sprintf("Failed to get PVC error: %s", err)}, nil + return pvcError{}, fmt.Errorf("failed getting pv consumer pod %s: %w", gotPod.Name, err) + } + + switch { + case k8spodutils.IsPodReady(gotPod): + return pvcError{}, nil + default: + time.Sleep(time.Second) } - return pvcValidation{false, fmt.Sprintf("PVC Error: %s", pvcPendingError)}, nil - } - return pvcValidation{true, ""}, nil + if time.Now().After(podReadyTimeoutEnd) { + // The volume consumer pod never went into running phase which means it's probably an error + // with provisioning the volume. + // A pod in Pending phase means the API Server has created the resource and stored it in etcd, + // but the pod has not been scheduled yet, nor have container images been pulled from the registry. + if gotPod.Status.Phase == corev1.PodPending { + // check pvc status and get error + pvcPendingError, err := pvm.getPvcError(tmpPVC) + if err != nil { + return pvcError{}, fmt.Errorf("failed to get PVC error: %s", err) + } + return pvcPendingError, nil + } + // pod failed for other reason(s) + return pvcError{}, fmt.Errorf("unexpected status for pod %s: %s", gotPod.Name, gotPod.Status.Phase) + } + } } -func getPvcConsumerPod(pvc corev1.PersistentVolumeClaim) *corev1.Pod { +func buildPVConsumerPod(pvcName string) *corev1.Pod { + tmp := uuid.New().String()[:5] + podName := fmt.Sprintf("pv-access-modes-checker-%s-%s", pvcName, tmp) + if len(podName) > 63 { + podName = podName[0:31] + podName[len(podName)-32:] + } return &corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: "test-access-mode-pvc-" + pvc.Name, - Namespace: pvc.Namespace, + Name: podName, + Namespace: "default", }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, Volumes: []corev1.Volume{ { - Name: *pvc.Spec.StorageClassName + "-volume", + Name: "tmp", VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvc.Name, + ClaimName: pvcName, }, }, }, @@ -1259,8 +1327,8 @@ func getPvcConsumerPod(pvc corev1.PersistentVolumeClaim) *corev1.Pod { }, VolumeMounts: []corev1.VolumeMount{ { - MountPath: "/dest", - Name: *pvc.Spec.StorageClassName + "-volume", + MountPath: "/tmpmount", + Name: "tmp", }, }, }, @@ -1269,25 +1337,149 @@ func getPvcConsumerPod(pvc corev1.PersistentVolumeClaim) *corev1.Pod { } } +// buildPVC creates a temporary PVC requesting for 1Mi of storage for a provided storage class name. +func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.PersistentVolumeClaim { + tmp := uuid.New().String()[:5] + pvcName := fmt.Sprintf("pvmigrate-%s-accessmode-test-%s", sc, tmp) + if len(pvcName) > 63 { + pvcName = pvcName[0:31] + pvcName[len(pvcName)-32:] + } + + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: &sc, + AccessModes: pvc.Spec.AccessModes, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + } +} + +// deleteTmpPVC deletes the provided pvc from the default namespace and waits until the +// backing pv dissapear as well (this is mandatory so we don't leave any orphan pv as this would +// make the pvmigrate to fail). this function has a timeout of 5 minutes, after that an error is +// returned. +func (pvm *PVMigrator) deleteTmpPVC(pvc *corev1.PersistentVolumeClaim) error { + // Cleanup should use background context so as not to fail if context has already been canceled + ctx := context.Background() + + pvs, err := pvm.k8scli.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("failed to list persistent volumes: %w", err) + } + + pvsByPVCName := map[string]corev1.PersistentVolume{} + for _, pv := range pvs.Items { + if pv.Spec.ClaimRef == nil { + continue + } + pvsByPVCName[pv.Spec.ClaimRef.Name] = pv + } + + var waitFor []string + propagation := metav1.DeletePropagationForeground + delopts := metav1.DeleteOptions{PropagationPolicy: &propagation} + if err := pvm.k8scli.CoreV1().PersistentVolumeClaims("default").Delete( + ctx, pvc.Name, delopts, + ); err != nil { + log.Printf("failed to delete temp pvc %s: %s", pvc.Name, err) + } + waitFor = append(waitFor, pvc.Name) + + timeout := time.NewTicker(5 * time.Minute) + interval := time.NewTicker(5 * time.Second) + defer timeout.Stop() + defer interval.Stop() + for _, pvc := range waitFor { + pv, ok := pvsByPVCName[pvc] + if !ok { + log.Printf("failed to find pv for temp pvc %s", pvc) + continue + } + + for { + // break the loop as soon as we can't find the pv anymore. + if _, err := pvm.k8scli.CoreV1().PersistentVolumes().Get( + ctx, pv.Name, metav1.GetOptions{}, + ); err != nil && !k8serrors.IsNotFound(err) { + log.Printf("failed to get pv for temp pvc %s: %s", pvc, err) + } else if err != nil && k8serrors.IsNotFound(err) { + break + } + + select { + case <-interval.C: + continue + case <-timeout.C: + return fmt.Errorf("failed to delete pvs: timeout") + } + } + } + return nil +} + +func (pvm *PVMigrator) deletePVConsumerPod(pod *corev1.Pod) error { + propagation := metav1.DeletePropagationForeground + delopts := metav1.DeleteOptions{PropagationPolicy: &propagation} + if err := pvm.k8scli.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, delopts); err != nil { + return err + } + return nil +} + // getPvcError returns the reason for why a PVC is in Pending status // returns nil if PVC is not pending -func getPvcError(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, pvc corev1.PersistentVolumeClaim) (error, error) { +func (pvm *PVMigrator) getPvcError(pvc *corev1.PersistentVolumeClaim) (pvcError, error) { // no need to inspect pvc if pvc.Status.Phase != corev1.ClaimPending { - return nil, fmt.Errorf("PVC %s is not in Pending status", pvc.Name) + return pvcError{}, fmt.Errorf("PVC %s is not in Pending status", pvc.Name) } - eventSelector := clientset.CoreV1().Events(pvc.Namespace).GetFieldSelector(&pvc.Name, &pvc.Namespace, &pvc.Kind, (*string)(&pvc.UID)) - pvcEvents, err := clientset.CoreV1().Events(pvc.Namespace).List(ctx, metav1.ListOptions{FieldSelector: eventSelector.String()}) + eventSelector := pvm.k8scli.CoreV1().Events(pvc.Namespace).GetFieldSelector(&pvc.Name, &pvc.Namespace, &pvc.Kind, (*string)(&pvc.UID)) + pvcEvents, err := pvm.k8scli.CoreV1().Events(pvc.Namespace).List(pvm.ctx, metav1.ListOptions{FieldSelector: eventSelector.String()}) if err != nil { - return nil, fmt.Errorf("failed to list events for PVC %s", pvc.Name) + return pvcError{}, fmt.Errorf("failed to list events for PVC %s", pvc.Name) } // get pending reason for _, event := range pvcEvents.Items { if event.Reason == "ProvisioningFailed" { - return errors.New(event.Message), nil + return pvcError{event.Reason, event.Source.Component, event.Type}, nil } } - return nil, fmt.Errorf("Could not determine reason for why PVC %s is in Pending phase", pvc.Name) + return pvcError{}, fmt.Errorf("Could not determine reason for why PVC %s is in Pending status", pvc.Name) +} + +func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string) (*PVMigrator, error) { + k8scli, err := k8sclient.NewForConfig(cfg) + if err != nil { + return nil, fmt.Errorf("failed to create kubernetes client: %w", err) + } + + if srcSC == "" { + return nil, fmt.Errorf("empty source storage class") + } + if dstSC == "" { + return nil, fmt.Errorf("empty destination storage class") + } + if log == nil { + return nil, fmt.Errorf("no logger provided") + } + + return &PVMigrator{ + ctx: context.Background(), + log: log, + k8scli: k8scli, + srcSc: srcSC, + dstSc: dstSC, + deletePVTimeout: 5 * time.Minute, + podTimeout: 10 * time.Second, + }, nil } From aa01da425a950d4caef2abae9bf8bd7135991b34 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Thu, 17 Nov 2022 02:35:55 -0600 Subject: [PATCH 04/41] Adding tests --- pkg/migrate/migrate.go | 331 +++++++++++++++++++----------------- pkg/migrate/migrate_test.go | 155 ++++++++++++++++- 2 files changed, 324 insertions(+), 162 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index c79206f..703a66d 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -57,6 +57,8 @@ type Options struct { PvcCopyTimeout int } +// PVMigrator represents a migration context for migrating data from all srcSC volumes to +// dstSc volumes. type PVMigrator struct { ctx context.Context log *log.Logger @@ -127,7 +129,7 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, } if unsupportedPVCs != nil { - // TODO: print error + PrintPVAccessModeErrors(unsupportedPVCs) return fmt.Errorf("existing volumes have access modes not supported by the destination storage class %s", options.DestSCName) } @@ -167,6 +169,74 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, return nil } +// NewPVMigrator returns a PV migration context +func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string) (*PVMigrator, error) { + k8scli, err := k8sclient.NewForConfig(cfg) + if err != nil { + return nil, fmt.Errorf("failed to create kubernetes client: %w", err) + } + + if srcSC == "" { + return nil, fmt.Errorf("empty source storage class") + } + if dstSC == "" { + return nil, fmt.Errorf("empty destination storage class") + } + if log == nil { + return nil, fmt.Errorf("no logger provided") + } + + return &PVMigrator{ + ctx: context.Background(), + log: log, + k8scli: k8scli, + srcSc: srcSC, + dstSc: dstSC, + deletePVTimeout: 5 * time.Minute, + podTimeout: 10 * time.Second, + }, nil +} + +// PrintPVAccessModeErrors prints and formats the volume access mode errors in pvcErrors +func PrintPVAccessModeErrors(pvcErrors map[string]map[string]pvcError) { + tw := tabwriter.NewWriter(os.Stdout, 0, 8, 2, '\t', 0) + fmt.Fprintf(tw, "The following persistent volume claims cannot be migrated:\n\n") + fmt.Fprintln(tw, "NAMESPACE\tPVC\tSOURCE\tREASON\tMESSAGE") + fmt.Fprintf(tw, "---------\t---\t------\t-------\t------\n") + for ns, pvcErrs := range pvcErrors { + for pvc, pvcErr := range pvcErrs { + fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\n", ns, pvc, pvcErr.from, pvcErr.reason, pvcErr.message) + } + } + tw.Flush() +} + +// ValidateVolumeAccessModes checks whether the provided persistent volumes support the access modes +// of the destination storage class. +// returns a map of pvc errors indexed by namespace +func (p *PVMigrator) ValidateVolumeAccessModes(pvs map[string]corev1.PersistentVolume) (map[string]map[string]pvcError, error) { + validationErrors := make(map[string]map[string]pvcError) + + if _, err := p.k8scli.StorageV1().StorageClasses().Get(p.ctx, p.dstSc, metav1.GetOptions{}); err != nil { + return nil, fmt.Errorf("failed to get destination storage class %s: %w", p.dstSc, err) + } + + pvcs, err := kurlutils.PVCSForPVs(p.ctx, p.k8scli, pvs) + if err != nil { + return nil, fmt.Errorf("failed to get pv to pvc mapping: %w", err) + } + + for pv, pvc := range pvcs { + v, err := p.checkVolumeAccessModes(pvc) + if err != nil { + p.log.Printf("failed to check volume access mode for claim %s (%s): %s", pvc.Name, pv, err) + continue + } + validationErrors[pvc.Namespace] = map[string]pvcError{pvc.Name: v} + } + return nil, nil +} + type pvcCtx struct { claim *corev1.PersistentVolumeClaim usedByPod *corev1.Pod @@ -266,7 +336,7 @@ func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interfa // setup timeout timeoutCtx, cancelCtx := context.WithTimeout(ctx, timeout) defer cancelCtx() - err := copyOnePVC(timeoutCtx, w, clientset, ns, sourcePvcName, destPvcName, rsyncImage, verboseCopy, nsPvc.getNodeNameRef()) + err := copyOnePVC(timeoutCtx, w, clientset, ns, sourcePvcName, destPvcName, rsyncImage, verboseCopy, nsPvc.getNodeNameRef(), timeout) if err != nil { return fmt.Errorf("failed to copy PVC %s in %s: %w", nsPvc.claim.Name, ns, err) } @@ -275,7 +345,7 @@ func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interfa return nil } -func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, rsyncImage string, verboseCopy bool, nodeName string) error { +func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, rsyncImage string, verboseCopy bool, nodeName string, timeout time.Duration) error { w.Printf("Creating pvc migrator pod on node %s\n", nodeName) createdPod, err := createMigrationPod(ctx, clientset, ns, sourcePvcName, destPvcName, rsyncImage, nodeName) if err != nil { @@ -291,23 +361,15 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac } }() - // initial wait for the pod to be created - waitInterval := time.Duration(time.Second * 1) - time.Sleep(waitInterval) - + migrationTimeout := time.NewTicker(timeout) + waitInterval := time.NewTicker(1 * time.Second) + defer migrationTimeout.Stop() + defer waitInterval.Stop() for { gotPod, err := clientset.CoreV1().Pods(ns).Get(ctx, createdPod.Name, metav1.GetOptions{}) if err != nil { w.Printf("failed to get newly created migration pod %s: %v\n", createdPod.Name, err) - continue - } - - if gotPod.Status.Phase == corev1.PodPending { - time.Sleep(waitInterval) - continue - } - - if gotPod.Status.Phase == corev1.PodRunning || gotPod.Status.Phase == corev1.PodSucceeded { + } else if gotPod.Status.Phase == corev1.PodRunning || gotPod.Status.Phase == corev1.PodSucceeded { // time to get logs break } @@ -315,11 +377,10 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac w.Printf("got status %s for pod %s, this is likely an error\n", gotPod.Status.Phase, gotPod.Name) select { - case <-ctx.Done(): - // TODO: revisit this - w.Printf("ERROR: Copy operation from PVC %s to PVC %s timed out\n", sourcePvcName, destPvcName) - return fmt.Errorf("context deadline exceeded waiting for migration pod %s to go into Running phase: %w", createdPod.Name, err) - default: + case <-waitInterval.C: + continue + case <-migrationTimeout.C: + return fmt.Errorf("migration pod %s failed to go into Running phase: timedout", createdPod.Name) } } @@ -339,16 +400,16 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac gotPod, err := clientset.CoreV1().Pods(ns).Get(ctx, createdPod.Name, metav1.GetOptions{}) if err != nil { w.Printf("failed to check status of newly created migration pod %s: %v\n", createdPod.Name, err) - continue - } - - if gotPod.Status.Phase != corev1.PodRunning { + } else if gotPod.Status.Phase != corev1.PodRunning { // if the pod is not running, go to the "validate success" section break } - - // if the pod is running, wait to see if getting logs works in a few seconds - time.Sleep(waitInterval) + } + select { + case <-waitInterval.C: + continue + case <-migrationTimeout.C: + return fmt.Errorf("failed to get logs for migration container %s: timedout", pvMigrateContainerName) } } @@ -396,7 +457,12 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac return fmt.Errorf("logs for the migration pod %s in %s ended, but the status was %s and not succeeded", createdPod.Name, ns, gotPod.Status.Phase) } - time.Sleep(waitInterval) + select { + case <-waitInterval.C: + continue + case <-migrationTimeout.C: + return fmt.Errorf("could not determine if migration pod %s succeeded: timedout", createdPod.Name) + } } w.Printf("finished migrating PVC %s\n", sourcePvcName) @@ -1200,96 +1266,7 @@ func resetReclaimPolicy(ctx context.Context, w *log.Logger, clientset k8sclient. return nil } -// ValidateVolumeAccessModes checks whether the provided persistent volumes support the access modes -// of a given storage class. -// returns a map of pvc errors indexed by namespace -func (pvm *PVMigrator) ValidateVolumeAccessModes(pvs map[string]corev1.PersistentVolume) (map[string]map[string]string, error) { - validationErrors := make(map[string]map[string]string) - - if _, err := pvm.k8scli.StorageV1().StorageClasses().Get(pvm.ctx, pvm.dstSc, metav1.GetOptions{}); err != nil { - return nil, fmt.Errorf("failed to get destination storage class %s: %w", pvm.dstSc, err) - } - - pvcs, err := kurlutils.PVCSForPVs(pvm.ctx, pvm.k8scli, pvs) - if err != nil { - return nil, fmt.Errorf("failed to get pv to pvc mapping: %w", err) - } - - for pv, pvc := range pvcs { - v, err := pvm.checkVolumeAccessModes(pvc) - if err != nil { - pvm.log.Printf("failed to validate volume access mode for claim %s (%s): %s", pvc.Name, pv, err) - continue - } - validationErrors[pvc.Namespace] = map[string]string{pvc.Name: v.reason} - } - return validationErrors, nil -} - -// checkVolumeAccessModeValid checks if the access modes of pvc are supported by storage class sc. -func (pvm *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (pvcError, error) { - var err error - - // create temp pvc for storage class - tmpPVC := buildTmpPVC(pvc, pvm.dstSc) - if tmpPVC, err = pvm.k8scli.CoreV1().PersistentVolumeClaims("default").Create( - pvm.ctx, tmpPVC, metav1.CreateOptions{}, - ); err != nil { - return pvcError{}, fmt.Errorf("failed to create temporary pvc: %w", err) - } - - // consume pvc to determine any access mode errors - pvConsumerPodSpec := buildPVConsumerPod(pvc.Name) - pvConsumerPod, err := pvm.k8scli.CoreV1().Pods(pvc.Namespace).Create(pvm.ctx, pvConsumerPodSpec, metav1.CreateOptions{}) - if err != nil { - return pvcError{}, err - } - - // cleanup pvc and pod at the end - defer func() { - if err = pvm.deleteTmpPVC(tmpPVC); err != nil { - pvm.log.Printf("failed to delete tmp claim: %s", err) - } - }() - defer func() { - if err = pvm.deletePVConsumerPod(pvConsumerPod); err != nil { - pvm.log.Printf("failed to delete pv consumer pod %s: %s", pvConsumerPod.Name, err) - } - }() - - podReadyTimeoutEnd := time.Now().Add(pvm.podTimeout) - for { - gotPod, err := pvm.k8scli.CoreV1().Pods(pvConsumerPod.Namespace).Get(pvm.ctx, pvConsumerPod.Name, metav1.GetOptions{}) - if err != nil { - return pvcError{}, fmt.Errorf("failed getting pv consumer pod %s: %w", gotPod.Name, err) - } - - switch { - case k8spodutils.IsPodReady(gotPod): - return pvcError{}, nil - default: - time.Sleep(time.Second) - } - - if time.Now().After(podReadyTimeoutEnd) { - // The volume consumer pod never went into running phase which means it's probably an error - // with provisioning the volume. - // A pod in Pending phase means the API Server has created the resource and stored it in etcd, - // but the pod has not been scheduled yet, nor have container images been pulled from the registry. - if gotPod.Status.Phase == corev1.PodPending { - // check pvc status and get error - pvcPendingError, err := pvm.getPvcError(tmpPVC) - if err != nil { - return pvcError{}, fmt.Errorf("failed to get PVC error: %s", err) - } - return pvcPendingError, nil - } - // pod failed for other reason(s) - return pvcError{}, fmt.Errorf("unexpected status for pod %s: %s", gotPod.Name, gotPod.Status.Phase) - } - } -} - +// buildPVConsumerPod creates a pod spec for consuming a pvc func buildPVConsumerPod(pvcName string) *corev1.Pod { tmp := uuid.New().String()[:5] podName := fmt.Sprintf("pv-access-modes-checker-%s-%s", pvcName, tmp) @@ -1362,15 +1339,80 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent } } +// checkVolumeAccessModeValid checks if the access modes of a pv are supported by the +// destination storage class. +func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (pvcError, error) { + var err error + + // create temp pvc for storage class + tmpPVC := buildTmpPVC(pvc, p.dstSc) + if tmpPVC, err = p.k8scli.CoreV1().PersistentVolumeClaims("default").Create( + p.ctx, tmpPVC, metav1.CreateOptions{}, + ); err != nil { + return pvcError{}, fmt.Errorf("failed to create temporary pvc: %w", err) + } + + // consume pvc to determine any access mode errors + pvConsumerPodSpec := buildPVConsumerPod(pvc.Name) + pvConsumerPod, err := p.k8scli.CoreV1().Pods(pvc.Namespace).Create(p.ctx, pvConsumerPodSpec, metav1.CreateOptions{}) + if err != nil { + return pvcError{}, err + } + + // cleanup pvc and pod at the end + defer func() { + if err = p.deleteTmpPVC(tmpPVC); err != nil { + p.log.Printf("failed to delete tmp claim: %s", err) + } + }() + defer func() { + if err = p.deletePVConsumerPod(pvConsumerPod); err != nil { + p.log.Printf("failed to delete pv consumer pod %s: %s", pvConsumerPod.Name, err) + } + }() + + podReadyTimeoutEnd := time.Now().Add(p.podTimeout) + for { + gotPod, err := p.k8scli.CoreV1().Pods(pvConsumerPod.Namespace).Get(p.ctx, pvConsumerPod.Name, metav1.GetOptions{}) + if err != nil { + return pvcError{}, fmt.Errorf("failed getting pv consumer pod %s: %w", gotPod.Name, err) + } + + switch { + case k8spodutils.IsPodReady(gotPod): + return pvcError{}, nil + default: + time.Sleep(time.Second) + } + + if time.Now().After(podReadyTimeoutEnd) { + // The volume consumer pod never went into running phase which means it's probably an error + // with provisioning the volume. + // A pod in Pending phase means the API Server has created the resource and stored it in etcd, + // but the pod has not been scheduled yet, nor have container images been pulled from the registry. + if gotPod.Status.Phase == corev1.PodPending { + // check pvc status and get error + pvcPendingError, err := p.getPvcError(tmpPVC) + if err != nil { + return pvcError{}, fmt.Errorf("failed to get PVC error: %s", err) + } + return pvcPendingError, nil + } + // pod failed for other reason(s) + return pvcError{}, fmt.Errorf("unexpected status for pod %s: %s", gotPod.Name, gotPod.Status.Phase) + } + } +} + // deleteTmpPVC deletes the provided pvc from the default namespace and waits until the // backing pv dissapear as well (this is mandatory so we don't leave any orphan pv as this would -// make the pvmigrate to fail). this function has a timeout of 5 minutes, after that an error is +// cause pvmigrate to fail). this function has a timeout of 5 minutes, after that an error is // returned. -func (pvm *PVMigrator) deleteTmpPVC(pvc *corev1.PersistentVolumeClaim) error { +func (p *PVMigrator) deleteTmpPVC(pvc *corev1.PersistentVolumeClaim) error { // Cleanup should use background context so as not to fail if context has already been canceled ctx := context.Background() - pvs, err := pvm.k8scli.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) + pvs, err := p.k8scli.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) if err != nil { return fmt.Errorf("failed to list persistent volumes: %w", err) } @@ -1386,7 +1428,7 @@ func (pvm *PVMigrator) deleteTmpPVC(pvc *corev1.PersistentVolumeClaim) error { var waitFor []string propagation := metav1.DeletePropagationForeground delopts := metav1.DeleteOptions{PropagationPolicy: &propagation} - if err := pvm.k8scli.CoreV1().PersistentVolumeClaims("default").Delete( + if err := p.k8scli.CoreV1().PersistentVolumeClaims("default").Delete( ctx, pvc.Name, delopts, ); err != nil { log.Printf("failed to delete temp pvc %s: %s", pvc.Name, err) @@ -1406,7 +1448,7 @@ func (pvm *PVMigrator) deleteTmpPVC(pvc *corev1.PersistentVolumeClaim) error { for { // break the loop as soon as we can't find the pv anymore. - if _, err := pvm.k8scli.CoreV1().PersistentVolumes().Get( + if _, err := p.k8scli.CoreV1().PersistentVolumes().Get( ctx, pv.Name, metav1.GetOptions{}, ); err != nil && !k8serrors.IsNotFound(err) { log.Printf("failed to get pv for temp pvc %s: %s", pvc, err) @@ -1425,25 +1467,25 @@ func (pvm *PVMigrator) deleteTmpPVC(pvc *corev1.PersistentVolumeClaim) error { return nil } -func (pvm *PVMigrator) deletePVConsumerPod(pod *corev1.Pod) error { +// deletePVConsumerPod removes the pod resource from the api servere +func (p *PVMigrator) deletePVConsumerPod(pod *corev1.Pod) error { propagation := metav1.DeletePropagationForeground delopts := metav1.DeleteOptions{PropagationPolicy: &propagation} - if err := pvm.k8scli.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, delopts); err != nil { + if err := p.k8scli.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, delopts); err != nil { return err } return nil } -// getPvcError returns the reason for why a PVC is in Pending status -// returns nil if PVC is not pending -func (pvm *PVMigrator) getPvcError(pvc *corev1.PersistentVolumeClaim) (pvcError, error) { +// getPvcError returns the error event for why a PVC is in Pending status +func (p *PVMigrator) getPvcError(pvc *corev1.PersistentVolumeClaim) (pvcError, error) { // no need to inspect pvc if pvc.Status.Phase != corev1.ClaimPending { return pvcError{}, fmt.Errorf("PVC %s is not in Pending status", pvc.Name) } - eventSelector := pvm.k8scli.CoreV1().Events(pvc.Namespace).GetFieldSelector(&pvc.Name, &pvc.Namespace, &pvc.Kind, (*string)(&pvc.UID)) - pvcEvents, err := pvm.k8scli.CoreV1().Events(pvc.Namespace).List(pvm.ctx, metav1.ListOptions{FieldSelector: eventSelector.String()}) + eventSelector := p.k8scli.CoreV1().Events(pvc.Namespace).GetFieldSelector(&pvc.Name, &pvc.Namespace, &pvc.Kind, (*string)(&pvc.UID)) + pvcEvents, err := p.k8scli.CoreV1().Events(pvc.Namespace).List(p.ctx, metav1.ListOptions{FieldSelector: eventSelector.String()}) if err != nil { return pvcError{}, fmt.Errorf("failed to list events for PVC %s", pvc.Name) } @@ -1456,30 +1498,3 @@ func (pvm *PVMigrator) getPvcError(pvc *corev1.PersistentVolumeClaim) (pvcError, } return pvcError{}, fmt.Errorf("Could not determine reason for why PVC %s is in Pending status", pvc.Name) } - -func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string) (*PVMigrator, error) { - k8scli, err := k8sclient.NewForConfig(cfg) - if err != nil { - return nil, fmt.Errorf("failed to create kubernetes client: %w", err) - } - - if srcSC == "" { - return nil, fmt.Errorf("empty source storage class") - } - if dstSC == "" { - return nil, fmt.Errorf("empty destination storage class") - } - if log == nil { - return nil, fmt.Errorf("no logger provided") - } - - return &PVMigrator{ - ctx: context.Background(), - log: log, - k8scli: k8scli, - srcSc: srcSC, - dstSc: dstSC, - deletePVTimeout: 5 * time.Minute, - podTimeout: 10 * time.Second, - }, nil -} diff --git a/pkg/migrate/migrate_test.go b/pkg/migrate/migrate_test.go index 329df6e..29188a9 100644 --- a/pkg/migrate/migrate_test.go +++ b/pkg/migrate/migrate_test.go @@ -349,7 +349,6 @@ func TestValidateStorageClasses(t *testing.T) { } else { req.Error(err) } - }) } } @@ -782,7 +781,6 @@ func TestGetPVCs(t *testing.T) { require.NoError(t, err) require.Equalf(t, dscString, *pvc2.Spec.StorageClassName, "storage class name was %q not dsc", *pvc2.Spec.StorageClassName) require.Equalf(t, "1Gi", pvc2.Spec.Resources.Requests.Storage().String(), "PVC size was %q not 1Gi", pvc2.Spec.Resources.Requests.Storage().String()) - }, originalPVCs: map[string][]pvcCtx{ "ns1": { @@ -884,7 +882,6 @@ func TestGetPVCs(t *testing.T) { pvc2, err := clientset.CoreV1().PersistentVolumeClaims("ns2").Get(context.TODO(), "pvc2", metav1.GetOptions{}) require.NoError(t, err) require.Equalf(t, "sc1", *pvc2.Spec.StorageClassName, "storage class name was %q not sc1", *pvc2.Spec.StorageClassName) - }, originalPVCs: map[string][]pvcCtx{ "ns1": { @@ -3058,7 +3055,7 @@ func Test_copyAllPVCs(t *testing.T) { } }(testCtx, testlog, clientset, tt.events) - err := copyAllPVCs(testCtx, testlog, clientset, "sourcesc", "destsc", "testrsyncimage", tt.matchingPVCs, false, time.Millisecond*10) + err := copyAllPVCs(testCtx, testlog, clientset, "sourcesc", "destsc", "testrsyncimage", tt.matchingPVCs, false, time.Second*4) if tt.wantErr { req.Error(err) testlog.Printf("got expected error %q", err.Error()) @@ -3068,3 +3065,153 @@ func Test_copyAllPVCs(t *testing.T) { }) } } + +func Test_validateVolumeAccessModes(t *testing.T) { + for _, tt := range []struct { + name string + srcStorageClass string + dstStorageClass string + deletePVTimeout time.Duration + podTimeout time.Duration + wantErr bool + resources []runtime.Object + input map[string]corev1.PersistentVolume + expected map[string]map[string]pvcError + }{ + { + name: "no errors", + input: map[string]corev1.PersistentVolume{ + "pv0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "srcSc", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc", + }, + }, + }, + }, + expected: nil, + srcStorageClass: "srcSc", + dstStorageClass: "dstSc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dstSc", + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "srcSc", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc", + }, + }, + }, + }, + }, + { + name: "error", + input: map[string]corev1.PersistentVolume{ + "pv0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "srcSc", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc", + }, + }, + }, + }, + expected: map[string]map[string]pvcError{ + "default": { + "pvc": pvcError{}, + }, + }, + srcStorageClass: "srcSc", + dstStorageClass: "dstSc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dstSc", + }, + Provisioner: "kubernetes.io/no-provisioner", + VolumeBindingMode: (*storagev1.VolumeBindingMode)(pointer.String("WaitForFirstConsumer")), + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "ns1", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("srcSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteAll"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "srcSc", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc", + Namespace: "ns1", + }, + }, + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + kcli := fake.NewSimpleClientset(tt.resources...) + testlog := log.New(testWriter{t: t}, "", 0) + pvm := PVMigrator{ + ctx: context.Background(), + log: testlog, + k8scli: kcli, + srcSc: tt.srcStorageClass, + dstSc: tt.dstStorageClass, + deletePVTimeout: 1 * time.Millisecond, + podTimeout: 1 * time.Millisecond, + } + result, err := pvm.ValidateVolumeAccessModes(tt.input) + if err != nil { + if tt.wantErr { + req.Error(err) + } else { + req.NoError(err) + } + } + req.Equal(result, tt.expected) + }) + } +} From 61bc196bc3cd86697b5d901d781b85355b5696ce Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Thu, 17 Nov 2022 17:20:30 -0600 Subject: [PATCH 05/41] Add more unit test --- pkg/migrate/migrate.go | 16 +- pkg/migrate/migrate_test.go | 371 ++++++++++++++++++++++++++++++++++-- 2 files changed, 371 insertions(+), 16 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 703a66d..bdb93c0 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -1269,7 +1269,7 @@ func resetReclaimPolicy(ctx context.Context, w *log.Logger, clientset k8sclient. // buildPVConsumerPod creates a pod spec for consuming a pvc func buildPVConsumerPod(pvcName string) *corev1.Pod { tmp := uuid.New().String()[:5] - podName := fmt.Sprintf("pv-access-modes-checker-%s-%s", pvcName, tmp) + podName := fmt.Sprintf("pv-access-modes-checker-for-%s-%s", pvcName, tmp) if len(podName) > 63 { podName = podName[0:31] + podName[len(podName)-32:] } @@ -1317,7 +1317,7 @@ func buildPVConsumerPod(pvcName string) *corev1.Pod { // buildPVC creates a temporary PVC requesting for 1Mi of storage for a provided storage class name. func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.PersistentVolumeClaim { tmp := uuid.New().String()[:5] - pvcName := fmt.Sprintf("pvmigrate-%s-accessmode-test-%s", sc, tmp) + pvcName := fmt.Sprintf("pvmigrate-%s-accessmode-test-claim-%s", pvc.Name, tmp) if len(pvcName) > 63 { pvcName = pvcName[0:31] + pvcName[len(pvcName)-32:] } @@ -1326,6 +1326,7 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: "default", + UID: pvc.UID, // for testing }, Spec: corev1.PersistentVolumeClaimSpec{ StorageClassName: &sc, @@ -1336,6 +1337,8 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent }, }, }, + // for testing + Status: corev1.PersistentVolumeClaimStatus{Phase: pvc.Status.Phase}, } } @@ -1354,7 +1357,7 @@ func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (p // consume pvc to determine any access mode errors pvConsumerPodSpec := buildPVConsumerPod(pvc.Name) - pvConsumerPod, err := p.k8scli.CoreV1().Pods(pvc.Namespace).Create(p.ctx, pvConsumerPodSpec, metav1.CreateOptions{}) + pvConsumerPod, err := p.k8scli.CoreV1().Pods(pvConsumerPodSpec.Namespace).Create(p.ctx, pvConsumerPodSpec, metav1.CreateOptions{}) if err != nil { return pvcError{}, err } @@ -1431,7 +1434,10 @@ func (p *PVMigrator) deleteTmpPVC(pvc *corev1.PersistentVolumeClaim) error { if err := p.k8scli.CoreV1().PersistentVolumeClaims("default").Delete( ctx, pvc.Name, delopts, ); err != nil { - log.Printf("failed to delete temp pvc %s: %s", pvc.Name, err) + if !k8serrors.IsNotFound(err) { + log.Printf("failed to delete temp pvc %s: %s", pvc.Name, err) + return err + } } waitFor = append(waitFor, pvc.Name) @@ -1493,7 +1499,7 @@ func (p *PVMigrator) getPvcError(pvc *corev1.PersistentVolumeClaim) (pvcError, e // get pending reason for _, event := range pvcEvents.Items { if event.Reason == "ProvisioningFailed" { - return pvcError{event.Reason, event.Source.Component, event.Type}, nil + return pvcError{event.Reason, event.Source.Component, event.Message}, nil } } return pvcError{}, fmt.Errorf("Could not determine reason for why PVC %s is in Pending status", pvc.Name) diff --git a/pkg/migrate/migrate_test.go b/pkg/migrate/migrate_test.go index 29188a9..2d5aeea 100644 --- a/pkg/migrate/migrate_test.go +++ b/pkg/migrate/migrate_test.go @@ -3,6 +3,7 @@ package migrate import ( "context" "fmt" + "io" "log" "strings" "testing" @@ -17,6 +18,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" k8sclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" "k8s.io/utils/pointer" ) @@ -923,6 +925,34 @@ func TestGetPVCs(t *testing.T) { } } +func TestNewPVMigrator(t *testing.T) { + // test empty logger + _, err := NewPVMigrator(&rest.Config{}, nil, "src", "dst") + if err == nil || err.Error() != "no logger provided" { + t.Errorf("expected failure creating object: %v", err) + } + + logger := log.New(io.Discard, "", 0) + + // test src storage class + _, err = NewPVMigrator(&rest.Config{}, logger, "", "dst") + if err == nil || err.Error() != "empty source storage class" { + t.Errorf("expected failure creating object: %v", err) + } + + // test empty dst sc + _, err = NewPVMigrator(&rest.Config{}, logger, "src", "") + if err == nil || err.Error() != "empty destination storage class" { + t.Errorf("expected failure creating object: %v", err) + } + + // happy path + _, err = NewPVMigrator(&rest.Config{}, logger, "src", "dst") + if err != nil { + t.Errorf("unexpected failure creating object: %v", err) + } +} + func Test_createMigrationPod(t *testing.T) { type args struct { ns string @@ -3126,7 +3156,7 @@ func Test_validateVolumeAccessModes(t *testing.T) { }, }, { - name: "error", + name: "should fail when there is no matching pvc for a pv", input: map[string]corev1.PersistentVolume{ "pv0": { ObjectMeta: metav1.ObjectMeta{ @@ -3134,17 +3164,15 @@ func Test_validateVolumeAccessModes(t *testing.T) { }, Spec: corev1.PersistentVolumeSpec{ StorageClassName: "srcSc", - ClaimRef: &corev1.ObjectReference{ - Name: "pvc", + ClaimRef: &corev1.ObjectReference{ // this pvc will not be found + Name: "pvc", + Namespace: "default", }, }, }, }, - expected: map[string]map[string]pvcError{ - "default": { - "pvc": pvcError{}, - }, - }, + expected: nil, + wantErr: true, srcStorageClass: "srcSc", dstStorageClass: "dstSc", resources: []runtime.Object{ @@ -3157,12 +3185,12 @@ func Test_validateVolumeAccessModes(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "dstSc", }, - Provisioner: "kubernetes.io/no-provisioner", + Provisioner: "kubernetes.io/no-provisioner", VolumeBindingMode: (*storagev1.VolumeBindingMode)(pointer.String("WaitForFirstConsumer")), }, &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", + Name: "pvc", Namespace: "ns1", }, Spec: corev1.PersistentVolumeClaimSpec{ @@ -3182,13 +3210,41 @@ func Test_validateVolumeAccessModes(t *testing.T) { Spec: corev1.PersistentVolumeSpec{ StorageClassName: "srcSc", ClaimRef: &corev1.ObjectReference{ - Name: "pvc", + Name: "pvc", Namespace: "ns1", }, }, }, }, }, + { + name: "should fail when destination storage class is not found", + input: map[string]corev1.PersistentVolume{ + "pv0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "srcSc", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc", + Namespace: "default", + }, + }, + }, + }, + expected: nil, + wantErr: true, + srcStorageClass: "srcSc", + dstStorageClass: "dstSc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + }, + }, } { t.Run(tt.name, func(t *testing.T) { req := require.New(t) @@ -3215,3 +3271,296 @@ func Test_validateVolumeAccessModes(t *testing.T) { }) } } + +func Test_getPvcError(t *testing.T) { + for _, tt := range []struct { + name string + srcStorageClass string + dstStorageClass string + deletePVTimeout time.Duration + podTimeout time.Duration + wantErr bool + resources []runtime.Object + input *corev1.PersistentVolumeClaim + expected pvcError + }{ + { + name: "get pvc error from events", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, + expected: pvcError{ + reason: "ProvisioningFailed", + from: "kubernetes.io/no-provisioner", + message: "Only support ReadWriteOnce access mode", + }, + srcStorageClass: "srcSc", + dstStorageClass: "dstSc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dstSc", + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, + &corev1.EventList{ + Items: []corev1.Event{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-error-event", + Namespace: "default", + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "PersistentVolumeClaim", + Namespace: "default", + Name: "pvc", + UID: "12345", + }, + Source: corev1.EventSource{Component: "kubernetes.io/no-provisioner"}, + Reason: "ProvisioningFailed", + Message: "Only support ReadWriteOnce access mode", + }, + }, + }, + }, + }, + { + name: "no events that match the reason ProvisioningFailed", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, + expected: pvcError{}, + wantErr: true, + srcStorageClass: "srcSc", + dstStorageClass: "dstSc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dstSc", + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, + &corev1.EventList{ + Items: []corev1.Event{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-error-event", + Namespace: "default", + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "PersistentVolumeClaim", + Namespace: "default", + Name: "pvc", + UID: "12345", + }, + Source: corev1.EventSource{Component: "kubernetes.io/no-provisioner"}, + Reason: "Provisioning", + Message: "External provisioner is provisiong volume for claim pvc", + }, + }, + }, + }, + }, + { + name: "no events for pvc", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, + expected: pvcError{}, + wantErr: true, + srcStorageClass: "srcSc", + dstStorageClass: "dstSc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dstSc", + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, + }, + }, + { + name: "pvc is not in Pending status", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimBound}, + }, + expected: pvcError{}, + wantErr: true, + srcStorageClass: "srcSc", + dstStorageClass: "dstSc", + resources: []runtime.Object{}, + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + kcli := fake.NewSimpleClientset(tt.resources...) + testlog := log.New(testWriter{t: t}, "", 0) + pvm := PVMigrator{ + ctx: context.Background(), + log: testlog, + k8scli: kcli, + srcSc: tt.srcStorageClass, + dstSc: tt.dstStorageClass, + deletePVTimeout: 1 * time.Millisecond, + podTimeout: 1 * time.Millisecond, + } + result, err := pvm.getPvcError(tt.input) + if err != nil { + if tt.wantErr { + req.Error(err) + } else { + req.NoError(err) + } + } + req.Equal(tt.expected, result) + }) + } +} + +func Test_checkVolumeAccessModes(t *testing.T) { + for _, tt := range []struct { + name string + srcStorageClass string + dstStorageClass string + deletePVTimeout time.Duration + podTimeout time.Duration + wantErr bool + resources []runtime.Object + input *corev1.PersistentVolumeClaim + expected pvcError + }{ + { + name: "access mode not supported", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("srcSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, + expected: pvcError{ + reason: "ProvisioningFailed", + from: "kubernetes.io/no-provisioner", + message: "Only support ReadWriteOnce access mode", + }, + srcStorageClass: "srcSc", + dstStorageClass: "dstSc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dstSc", + }, + }, + &corev1.EventList{ + Items: []corev1.Event{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-error-event", + Namespace: "default", + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "PersistentVolumeClaim", + Namespace: "default", + UID: "12345", + }, + Source: corev1.EventSource{Component: "kubernetes.io/no-provisioner"}, + Reason: "ProvisioningFailed", + Message: "Only support ReadWriteOnce access mode", + }, + }, + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + kcli := fake.NewSimpleClientset(tt.resources...) + testlog := log.New(testWriter{t: t}, "", 0) + pvm := PVMigrator{ + ctx: context.Background(), + log: testlog, + k8scli: kcli, + srcSc: tt.srcStorageClass, + dstSc: tt.dstStorageClass, + deletePVTimeout: 1 * time.Millisecond, + podTimeout: 1 * time.Millisecond, + } + result, err := pvm.getPvcError(tt.input) + if err != nil { + if tt.wantErr { + req.Error(err) + } else { + req.NoError(err) + } + } + req.Equal(tt.expected, result) + req.Fail("failing") + }) + } +} From 00a0a868a22a05bcb1f38eb4a97eefe564bcd1a6 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Thu, 17 Nov 2022 23:42:53 -0600 Subject: [PATCH 06/41] Passing UT --- pkg/migrate/migrate.go | 34 ++++++++++++++++++++++++---------- pkg/migrate/migrate_test.go | 28 ++++++++++++++++++++++++++-- 2 files changed, 50 insertions(+), 12 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index bdb93c0..5bdb6c2 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -67,6 +67,7 @@ type PVMigrator struct { dstSc string deletePVTimeout time.Duration podTimeout time.Duration + tmpPodName string } // Cli uses CLI options to run Migrate @@ -122,7 +123,15 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, if err != nil { return fmt.Errorf("failed to get volumes using storage class %s: %w", options.SourceSCName, err) } - pvMigrator := PVMigrator{ctx, w, clientset, options.SourceSCName, options.DestSCName, 5 * time.Minute, 10 * time.Second} + pvMigrator := PVMigrator{ + ctx: ctx, + log: w, + k8scli: clientset, + srcSc: options.SourceSCName, + dstSc: options.DestSCName, + deletePVTimeout: 5 * time.Minute, + podTimeout: 10 * time.Second, + } unsupportedPVCs, err := pvMigrator.ValidateVolumeAccessModes(srcPVs) if err != nil { return fmt.Errorf("failed to validate volume access modes for destination storage class %s", options.DestSCName) @@ -1267,9 +1276,12 @@ func resetReclaimPolicy(ctx context.Context, w *log.Logger, clientset k8sclient. } // buildPVConsumerPod creates a pod spec for consuming a pvc -func buildPVConsumerPod(pvcName string) *corev1.Pod { - tmp := uuid.New().String()[:5] - podName := fmt.Sprintf("pv-access-modes-checker-for-%s-%s", pvcName, tmp) +func buildPVConsumerPod(name, pvcName string) *corev1.Pod { + podName := name + if name == "" { + tmp := uuid.New().String()[:5] + podName = fmt.Sprintf("pvmigrate-vol-consumer-%s", tmp) + } if len(podName) > 63 { podName = podName[0:31] + podName[len(podName)-32:] } @@ -1317,7 +1329,7 @@ func buildPVConsumerPod(pvcName string) *corev1.Pod { // buildPVC creates a temporary PVC requesting for 1Mi of storage for a provided storage class name. func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.PersistentVolumeClaim { tmp := uuid.New().String()[:5] - pvcName := fmt.Sprintf("pvmigrate-%s-accessmode-test-claim-%s", pvc.Name, tmp) + pvcName := fmt.Sprintf("pvmigrate-claim-%s-%s", pvc.Name, tmp) if len(pvcName) > 63 { pvcName = pvcName[0:31] + pvcName[len(pvcName)-32:] } @@ -1356,9 +1368,9 @@ func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (p } // consume pvc to determine any access mode errors - pvConsumerPodSpec := buildPVConsumerPod(pvc.Name) + pvConsumerPodSpec := buildPVConsumerPod(p.tmpPodName, pvc.Name) pvConsumerPod, err := p.k8scli.CoreV1().Pods(pvConsumerPodSpec.Namespace).Create(p.ctx, pvConsumerPodSpec, metav1.CreateOptions{}) - if err != nil { + if err != nil && !k8serrors.IsAlreadyExists(err) { return pvcError{}, err } @@ -1376,7 +1388,7 @@ func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (p podReadyTimeoutEnd := time.Now().Add(p.podTimeout) for { - gotPod, err := p.k8scli.CoreV1().Pods(pvConsumerPod.Namespace).Get(p.ctx, pvConsumerPod.Name, metav1.GetOptions{}) + gotPod, err := p.k8scli.CoreV1().Pods(pvConsumerPodSpec.Namespace).Get(p.ctx, pvConsumerPodSpec.Name, metav1.GetOptions{}) if err != nil { return pvcError{}, fmt.Errorf("failed getting pv consumer pod %s: %w", gotPod.Name, err) } @@ -1477,8 +1489,10 @@ func (p *PVMigrator) deleteTmpPVC(pvc *corev1.PersistentVolumeClaim) error { func (p *PVMigrator) deletePVConsumerPod(pod *corev1.Pod) error { propagation := metav1.DeletePropagationForeground delopts := metav1.DeleteOptions{PropagationPolicy: &propagation} - if err := p.k8scli.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, delopts); err != nil { - return err + if pod != nil { + if err := p.k8scli.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, delopts); err != nil { + return err + } } return nil } diff --git a/pkg/migrate/migrate_test.go b/pkg/migrate/migrate_test.go index 2d5aeea..77d0b5b 100644 --- a/pkg/migrate/migrate_test.go +++ b/pkg/migrate/migrate_test.go @@ -3535,6 +3535,30 @@ func Test_checkVolumeAccessModes(t *testing.T) { }, }, }, + &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "tmpPod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + Command: []string{ + "sleep", + "3600", + }, + }, + }, + }, + Status: corev1.PodStatus{Phase: corev1.PodPending}, + }, }, }, } { @@ -3550,8 +3574,9 @@ func Test_checkVolumeAccessModes(t *testing.T) { dstSc: tt.dstStorageClass, deletePVTimeout: 1 * time.Millisecond, podTimeout: 1 * time.Millisecond, + tmpPodName: "tmpPod", } - result, err := pvm.getPvcError(tt.input) + result, err := pvm.checkVolumeAccessModes(*tt.input) if err != nil { if tt.wantErr { req.Error(err) @@ -3560,7 +3585,6 @@ func Test_checkVolumeAccessModes(t *testing.T) { } } req.Equal(tt.expected, result) - req.Fail("failing") }) } } From 7f3aabbe46f8efff9231bab99e373f2f1eaa0ad6 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 18 Nov 2022 01:05:01 -0600 Subject: [PATCH 07/41] Add option to skip validation --- pkg/migrate/migrate.go | 61 +++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 28 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 5bdb6c2..6428d88 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -47,14 +47,15 @@ var isDestScLocalVolumeProvisioner bool // Options is the set of options that should be provided to Migrate type Options struct { - SourceSCName string - DestSCName string - RsyncImage string - Namespace string - SetDefaults bool - VerboseCopy bool - SkipSourceValidation bool - PvcCopyTimeout int + SourceSCName string + DestSCName string + RsyncImage string + Namespace string + SetDefaults bool + VerboseCopy bool + SkipSourceValidation bool + SkipPVAccessModeValidation bool + PvcCopyTimeout int } // PVMigrator represents a migration context for migrating data from all srcSC volumes to @@ -81,6 +82,7 @@ func Cli() { flag.BoolVar(&options.SetDefaults, "set-defaults", false, "change default storage class from source to dest") flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") + flag.BoolVar(&options.SkipPVAccessModeValidation, "skip-pv-access-mode-validation", false, "skip the volume access modes validation on the destination storage provider") flag.IntVar(&options.PvcCopyTimeout, "timeout", 300, "length of time to wait (in seconds) when transferring data from the source to the destination storage volume") flag.Parse() @@ -119,27 +121,30 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, return err } - srcPVs, err := kurlutils.PVSByStorageClass(ctx, clientset, options.SourceSCName) - if err != nil { - return fmt.Errorf("failed to get volumes using storage class %s: %w", options.SourceSCName, err) - } - pvMigrator := PVMigrator{ - ctx: ctx, - log: w, - k8scli: clientset, - srcSc: options.SourceSCName, - dstSc: options.DestSCName, - deletePVTimeout: 5 * time.Minute, - podTimeout: 10 * time.Second, - } - unsupportedPVCs, err := pvMigrator.ValidateVolumeAccessModes(srcPVs) - if err != nil { - return fmt.Errorf("failed to validate volume access modes for destination storage class %s", options.DestSCName) - } + // escape hatch - for DEV/TEST ONLY + if !options.SkipPVAccessModeValidation { + srcPVs, err := kurlutils.PVSByStorageClass(ctx, clientset, options.SourceSCName) + if err != nil { + return fmt.Errorf("failed to get volumes using storage class %s: %w", options.SourceSCName, err) + } + pvMigrator := PVMigrator{ + ctx: ctx, + log: w, + k8scli: clientset, + srcSc: options.SourceSCName, + dstSc: options.DestSCName, + deletePVTimeout: 5 * time.Minute, + podTimeout: 10 * time.Second, + } + unsupportedPVCs, err := pvMigrator.ValidateVolumeAccessModes(srcPVs) + if err != nil { + return fmt.Errorf("failed to validate volume access modes for destination storage class %s", options.DestSCName) + } - if unsupportedPVCs != nil { - PrintPVAccessModeErrors(unsupportedPVCs) - return fmt.Errorf("existing volumes have access modes not supported by the destination storage class %s", options.DestSCName) + if unsupportedPVCs != nil { + PrintPVAccessModeErrors(unsupportedPVCs) + return fmt.Errorf("existing volumes have access modes not supported by the destination storage class %s", options.DestSCName) + } } updatedMatchingPVCs, err := scaleDownPods(ctx, w, clientset, matchingPVCs, time.Second*5) From d02b37cdd44da01e31238b491903900d5e801997 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 18 Nov 2022 01:42:45 -0600 Subject: [PATCH 08/41] Export PVCError --- pkg/migrate/migrate.go | 36 ++++++++++++++++++------------------ pkg/migrate/migrate_test.go | 16 ++++++++-------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 6428d88..bafc561 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -212,7 +212,7 @@ func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string) (*PVM } // PrintPVAccessModeErrors prints and formats the volume access mode errors in pvcErrors -func PrintPVAccessModeErrors(pvcErrors map[string]map[string]pvcError) { +func PrintPVAccessModeErrors(pvcErrors map[string]map[string]PVCError) { tw := tabwriter.NewWriter(os.Stdout, 0, 8, 2, '\t', 0) fmt.Fprintf(tw, "The following persistent volume claims cannot be migrated:\n\n") fmt.Fprintln(tw, "NAMESPACE\tPVC\tSOURCE\tREASON\tMESSAGE") @@ -228,8 +228,8 @@ func PrintPVAccessModeErrors(pvcErrors map[string]map[string]pvcError) { // ValidateVolumeAccessModes checks whether the provided persistent volumes support the access modes // of the destination storage class. // returns a map of pvc errors indexed by namespace -func (p *PVMigrator) ValidateVolumeAccessModes(pvs map[string]corev1.PersistentVolume) (map[string]map[string]pvcError, error) { - validationErrors := make(map[string]map[string]pvcError) +func (p *PVMigrator) ValidateVolumeAccessModes(pvs map[string]corev1.PersistentVolume) (map[string]map[string]PVCError, error) { + validationErrors := make(map[string]map[string]PVCError) if _, err := p.k8scli.StorageV1().StorageClasses().Get(p.ctx, p.dstSc, metav1.GetOptions{}); err != nil { return nil, fmt.Errorf("failed to get destination storage class %s: %w", p.dstSc, err) @@ -246,7 +246,7 @@ func (p *PVMigrator) ValidateVolumeAccessModes(pvs map[string]corev1.PersistentV p.log.Printf("failed to check volume access mode for claim %s (%s): %s", pvc.Name, pv, err) continue } - validationErrors[pvc.Namespace] = map[string]pvcError{pvc.Name: v} + validationErrors[pvc.Namespace] = map[string]PVCError{pvc.Name: v} } return nil, nil } @@ -263,13 +263,13 @@ func (pvc pvcCtx) getNodeNameRef() string { return pvc.usedByPod.Spec.NodeName } -type pvcError struct { +type PVCError struct { reason string from string message string } -func (e *pvcError) Error() string { +func (e *PVCError) Error() string { return fmt.Sprintf("volume claim error from %s during %s: %s", e.from, e.reason, e.message) } @@ -1361,7 +1361,7 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent // checkVolumeAccessModeValid checks if the access modes of a pv are supported by the // destination storage class. -func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (pvcError, error) { +func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (PVCError, error) { var err error // create temp pvc for storage class @@ -1369,14 +1369,14 @@ func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (p if tmpPVC, err = p.k8scli.CoreV1().PersistentVolumeClaims("default").Create( p.ctx, tmpPVC, metav1.CreateOptions{}, ); err != nil { - return pvcError{}, fmt.Errorf("failed to create temporary pvc: %w", err) + return PVCError{}, fmt.Errorf("failed to create temporary pvc: %w", err) } // consume pvc to determine any access mode errors pvConsumerPodSpec := buildPVConsumerPod(p.tmpPodName, pvc.Name) pvConsumerPod, err := p.k8scli.CoreV1().Pods(pvConsumerPodSpec.Namespace).Create(p.ctx, pvConsumerPodSpec, metav1.CreateOptions{}) if err != nil && !k8serrors.IsAlreadyExists(err) { - return pvcError{}, err + return PVCError{}, err } // cleanup pvc and pod at the end @@ -1395,12 +1395,12 @@ func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (p for { gotPod, err := p.k8scli.CoreV1().Pods(pvConsumerPodSpec.Namespace).Get(p.ctx, pvConsumerPodSpec.Name, metav1.GetOptions{}) if err != nil { - return pvcError{}, fmt.Errorf("failed getting pv consumer pod %s: %w", gotPod.Name, err) + return PVCError{}, fmt.Errorf("failed getting pv consumer pod %s: %w", gotPod.Name, err) } switch { case k8spodutils.IsPodReady(gotPod): - return pvcError{}, nil + return PVCError{}, nil default: time.Sleep(time.Second) } @@ -1414,12 +1414,12 @@ func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (p // check pvc status and get error pvcPendingError, err := p.getPvcError(tmpPVC) if err != nil { - return pvcError{}, fmt.Errorf("failed to get PVC error: %s", err) + return PVCError{}, fmt.Errorf("failed to get PVC error: %s", err) } return pvcPendingError, nil } // pod failed for other reason(s) - return pvcError{}, fmt.Errorf("unexpected status for pod %s: %s", gotPod.Name, gotPod.Status.Phase) + return PVCError{}, fmt.Errorf("unexpected status for pod %s: %s", gotPod.Name, gotPod.Status.Phase) } } } @@ -1503,23 +1503,23 @@ func (p *PVMigrator) deletePVConsumerPod(pod *corev1.Pod) error { } // getPvcError returns the error event for why a PVC is in Pending status -func (p *PVMigrator) getPvcError(pvc *corev1.PersistentVolumeClaim) (pvcError, error) { +func (p *PVMigrator) getPvcError(pvc *corev1.PersistentVolumeClaim) (PVCError, error) { // no need to inspect pvc if pvc.Status.Phase != corev1.ClaimPending { - return pvcError{}, fmt.Errorf("PVC %s is not in Pending status", pvc.Name) + return PVCError{}, fmt.Errorf("PVC %s is not in Pending status", pvc.Name) } eventSelector := p.k8scli.CoreV1().Events(pvc.Namespace).GetFieldSelector(&pvc.Name, &pvc.Namespace, &pvc.Kind, (*string)(&pvc.UID)) pvcEvents, err := p.k8scli.CoreV1().Events(pvc.Namespace).List(p.ctx, metav1.ListOptions{FieldSelector: eventSelector.String()}) if err != nil { - return pvcError{}, fmt.Errorf("failed to list events for PVC %s", pvc.Name) + return PVCError{}, fmt.Errorf("failed to list events for PVC %s", pvc.Name) } // get pending reason for _, event := range pvcEvents.Items { if event.Reason == "ProvisioningFailed" { - return pvcError{event.Reason, event.Source.Component, event.Message}, nil + return PVCError{event.Reason, event.Source.Component, event.Message}, nil } } - return pvcError{}, fmt.Errorf("Could not determine reason for why PVC %s is in Pending status", pvc.Name) + return PVCError{}, fmt.Errorf("Could not determine reason for why PVC %s is in Pending status", pvc.Name) } diff --git a/pkg/migrate/migrate_test.go b/pkg/migrate/migrate_test.go index 77d0b5b..3ed2b01 100644 --- a/pkg/migrate/migrate_test.go +++ b/pkg/migrate/migrate_test.go @@ -3106,7 +3106,7 @@ func Test_validateVolumeAccessModes(t *testing.T) { wantErr bool resources []runtime.Object input map[string]corev1.PersistentVolume - expected map[string]map[string]pvcError + expected map[string]map[string]PVCError }{ { name: "no errors", @@ -3282,7 +3282,7 @@ func Test_getPvcError(t *testing.T) { wantErr bool resources []runtime.Object input *corev1.PersistentVolumeClaim - expected pvcError + expected PVCError }{ { name: "get pvc error from events", @@ -3294,7 +3294,7 @@ func Test_getPvcError(t *testing.T) { }, Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, }, - expected: pvcError{ + expected: PVCError{ reason: "ProvisioningFailed", from: "kubernetes.io/no-provisioner", message: "Only support ReadWriteOnce access mode", @@ -3351,7 +3351,7 @@ func Test_getPvcError(t *testing.T) { }, Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, }, - expected: pvcError{}, + expected: PVCError{}, wantErr: true, srcStorageClass: "srcSc", dstStorageClass: "dstSc", @@ -3405,7 +3405,7 @@ func Test_getPvcError(t *testing.T) { }, Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, }, - expected: pvcError{}, + expected: PVCError{}, wantErr: true, srcStorageClass: "srcSc", dstStorageClass: "dstSc", @@ -3440,7 +3440,7 @@ func Test_getPvcError(t *testing.T) { }, Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimBound}, }, - expected: pvcError{}, + expected: PVCError{}, wantErr: true, srcStorageClass: "srcSc", dstStorageClass: "dstSc", @@ -3483,7 +3483,7 @@ func Test_checkVolumeAccessModes(t *testing.T) { wantErr bool resources []runtime.Object input *corev1.PersistentVolumeClaim - expected pvcError + expected PVCError }{ { name: "access mode not supported", @@ -3499,7 +3499,7 @@ func Test_checkVolumeAccessModes(t *testing.T) { }, Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, }, - expected: pvcError{ + expected: PVCError{ reason: "ProvisioningFailed", from: "kubernetes.io/no-provisioner", message: "Only support ReadWriteOnce access mode", From 304e3d836b27017ec5e8e8e8416335a77d491ae0 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 18 Nov 2022 13:23:08 -0600 Subject: [PATCH 09/41] correctly return map --- pkg/migrate/migrate.go | 4 ++-- pkg/migrate/migrate_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index bafc561..ea127bc 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -141,7 +141,7 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, return fmt.Errorf("failed to validate volume access modes for destination storage class %s", options.DestSCName) } - if unsupportedPVCs != nil { + if len(unsupportedPVCs) == 0 { PrintPVAccessModeErrors(unsupportedPVCs) return fmt.Errorf("existing volumes have access modes not supported by the destination storage class %s", options.DestSCName) } @@ -248,7 +248,7 @@ func (p *PVMigrator) ValidateVolumeAccessModes(pvs map[string]corev1.PersistentV } validationErrors[pvc.Namespace] = map[string]PVCError{pvc.Name: v} } - return nil, nil + return validationErrors, nil } type pvcCtx struct { diff --git a/pkg/migrate/migrate_test.go b/pkg/migrate/migrate_test.go index 3ed2b01..a77c1b3 100644 --- a/pkg/migrate/migrate_test.go +++ b/pkg/migrate/migrate_test.go @@ -3109,7 +3109,7 @@ func Test_validateVolumeAccessModes(t *testing.T) { expected map[string]map[string]PVCError }{ { - name: "no errors", + name: "should pass due to no accessmode errors", input: map[string]corev1.PersistentVolume{ "pv0": { ObjectMeta: metav1.ObjectMeta{ @@ -3123,7 +3123,7 @@ func Test_validateVolumeAccessModes(t *testing.T) { }, }, }, - expected: nil, + expected: make(map[string]map[string]PVCError), srcStorageClass: "srcSc", dstStorageClass: "dstSc", resources: []runtime.Object{ From 5f1f4a83f59bb1ab070595a1997495a6d177b970 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 18 Nov 2022 13:25:23 -0600 Subject: [PATCH 10/41] print when there are validation errors --- pkg/migrate/migrate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index ea127bc..afa8c6c 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -141,7 +141,7 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, return fmt.Errorf("failed to validate volume access modes for destination storage class %s", options.DestSCName) } - if len(unsupportedPVCs) == 0 { + if len(unsupportedPVCs) != 0 { PrintPVAccessModeErrors(unsupportedPVCs) return fmt.Errorf("existing volumes have access modes not supported by the destination storage class %s", options.DestSCName) } From 0a40686ab37dadc0e24e0b703d6f560f3954aec8 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 18 Nov 2022 15:47:44 -0600 Subject: [PATCH 11/41] don't overwrite status for non-pending pvcs --- pkg/migrate/migrate.go | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index afa8c6c..8ace163 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -1339,11 +1339,32 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent pvcName = pvcName[0:31] + pvcName[len(pvcName)-32:] } + // for testing purpose, this returns a pvc that is Pending to emulate a failure + if pvc.Status.Phase == corev1.ClaimPending { + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: "default", + UID: pvc.UID, // for testing + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: &sc, + AccessModes: pvc.Spec.AccessModes, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + // for testing + Status: corev1.PersistentVolumeClaimStatus{Phase: pvc.Status.Phase}, + } + } + return &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: "default", - UID: pvc.UID, // for testing }, Spec: corev1.PersistentVolumeClaimSpec{ StorageClassName: &sc, @@ -1354,8 +1375,6 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent }, }, }, - // for testing - Status: corev1.PersistentVolumeClaimStatus{Phase: pvc.Status.Phase}, } } @@ -1452,7 +1471,7 @@ func (p *PVMigrator) deleteTmpPVC(pvc *corev1.PersistentVolumeClaim) error { ctx, pvc.Name, delopts, ); err != nil { if !k8serrors.IsNotFound(err) { - log.Printf("failed to delete temp pvc %s: %s", pvc.Name, err) + p.log.Printf("failed to delete temp pvc %s: %s", pvc.Name, err) return err } } @@ -1465,7 +1484,7 @@ func (p *PVMigrator) deleteTmpPVC(pvc *corev1.PersistentVolumeClaim) error { for _, pvc := range waitFor { pv, ok := pvsByPVCName[pvc] if !ok { - log.Printf("failed to find pv for temp pvc %s", pvc) + p.log.Printf("failed to find pv for temp pvc %s", pvc) continue } @@ -1474,7 +1493,7 @@ func (p *PVMigrator) deleteTmpPVC(pvc *corev1.PersistentVolumeClaim) error { if _, err := p.k8scli.CoreV1().PersistentVolumes().Get( ctx, pv.Name, metav1.GetOptions{}, ); err != nil && !k8serrors.IsNotFound(err) { - log.Printf("failed to get pv for temp pvc %s: %s", pvc, err) + p.log.Printf("failed to get pv for temp pvc %s: %s", pvc, err) } else if err != nil && k8serrors.IsNotFound(err) { break } From 24a161ead15482cfff560acfdffcc8ec511f8c14 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 18 Nov 2022 17:59:35 -0600 Subject: [PATCH 12/41] revert previous change --- pkg/migrate/migrate.go | 27 ++++----------------------- 1 file changed, 4 insertions(+), 23 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 8ace163..e284cbf 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -1339,32 +1339,11 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent pvcName = pvcName[0:31] + pvcName[len(pvcName)-32:] } - // for testing purpose, this returns a pvc that is Pending to emulate a failure - if pvc.Status.Phase == corev1.ClaimPending { - return &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: pvcName, - Namespace: "default", - UID: pvc.UID, // for testing - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: &sc, - AccessModes: pvc.Spec.AccessModes, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Mi"), - }, - }, - }, - // for testing - Status: corev1.PersistentVolumeClaimStatus{Phase: pvc.Status.Phase}, - } - } - return &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: "default", + UID: pvc.UID, // for testing }, Spec: corev1.PersistentVolumeClaimSpec{ StorageClassName: &sc, @@ -1375,6 +1354,8 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent }, }, }, + // for testing + Status: corev1.PersistentVolumeClaimStatus{Phase: pvc.Status.Phase}, } } @@ -1540,5 +1521,5 @@ func (p *PVMigrator) getPvcError(pvc *corev1.PersistentVolumeClaim) (PVCError, e return PVCError{event.Reason, event.Source.Component, event.Message}, nil } } - return PVCError{}, fmt.Errorf("Could not determine reason for why PVC %s is in Pending status", pvc.Name) + return PVCError{}, fmt.Errorf("could not determine reason for why PVC %s is in Pending status", pvc.Name) } From 834ee8a83cd6a9e4701138dd61887142df4cdb12 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 18 Nov 2022 18:08:29 -0600 Subject: [PATCH 13/41] fix events and tab writer output --- pkg/migrate/migrate.go | 22 ++++++++++++---------- pkg/migrate/migrate_test.go | 8 ++++---- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index e284cbf..493f4d9 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" k8spodutils "k8s.io/kubernetes/pkg/api/v1/pod" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -67,7 +68,7 @@ type PVMigrator struct { srcSc string dstSc string deletePVTimeout time.Duration - podTimeout time.Duration + podReadyTimeout time.Duration tmpPodName string } @@ -134,7 +135,7 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, srcSc: options.SourceSCName, dstSc: options.DestSCName, deletePVTimeout: 5 * time.Minute, - podTimeout: 10 * time.Second, + podReadyTimeout: 30 * time.Second, } unsupportedPVCs, err := pvMigrator.ValidateVolumeAccessModes(srcPVs) if err != nil { @@ -207,16 +208,16 @@ func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string) (*PVM srcSc: srcSC, dstSc: dstSC, deletePVTimeout: 5 * time.Minute, - podTimeout: 10 * time.Second, + podReadyTimeout: 30 * time.Second, }, nil } // PrintPVAccessModeErrors prints and formats the volume access mode errors in pvcErrors func PrintPVAccessModeErrors(pvcErrors map[string]map[string]PVCError) { - tw := tabwriter.NewWriter(os.Stdout, 0, 8, 2, '\t', 0) + tw := tabwriter.NewWriter(os.Stdout, 0, 8, 8, '\t', 0) fmt.Fprintf(tw, "The following persistent volume claims cannot be migrated:\n\n") fmt.Fprintln(tw, "NAMESPACE\tPVC\tSOURCE\tREASON\tMESSAGE") - fmt.Fprintf(tw, "---------\t---\t------\t-------\t------\n") + fmt.Fprintf(tw, "---------\t---\t------\t------\t-------\n") for ns, pvcErrs := range pvcErrors { for pvc, pvcErr := range pvcErrs { fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\n", ns, pvc, pvcErr.from, pvcErr.reason, pvcErr.message) @@ -246,7 +247,9 @@ func (p *PVMigrator) ValidateVolumeAccessModes(pvs map[string]corev1.PersistentV p.log.Printf("failed to check volume access mode for claim %s (%s): %s", pvc.Name, pv, err) continue } - validationErrors[pvc.Namespace] = map[string]PVCError{pvc.Name: v} + if v != (PVCError{}) { // test for empty struct + validationErrors[pvc.Namespace] = map[string]PVCError{pvc.Name: v} + } } return validationErrors, nil } @@ -1373,7 +1376,7 @@ func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (P } // consume pvc to determine any access mode errors - pvConsumerPodSpec := buildPVConsumerPod(p.tmpPodName, pvc.Name) + pvConsumerPodSpec := buildPVConsumerPod(p.tmpPodName, tmpPVC.Name) pvConsumerPod, err := p.k8scli.CoreV1().Pods(pvConsumerPodSpec.Namespace).Create(p.ctx, pvConsumerPodSpec, metav1.CreateOptions{}) if err != nil && !k8serrors.IsAlreadyExists(err) { return PVCError{}, err @@ -1391,7 +1394,7 @@ func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (P } }() - podReadyTimeoutEnd := time.Now().Add(p.podTimeout) + podReadyTimeoutEnd := time.Now().Add(p.podReadyTimeout) for { gotPod, err := p.k8scli.CoreV1().Pods(pvConsumerPodSpec.Namespace).Get(p.ctx, pvConsumerPodSpec.Name, metav1.GetOptions{}) if err != nil { @@ -1509,8 +1512,7 @@ func (p *PVMigrator) getPvcError(pvc *corev1.PersistentVolumeClaim) (PVCError, e return PVCError{}, fmt.Errorf("PVC %s is not in Pending status", pvc.Name) } - eventSelector := p.k8scli.CoreV1().Events(pvc.Namespace).GetFieldSelector(&pvc.Name, &pvc.Namespace, &pvc.Kind, (*string)(&pvc.UID)) - pvcEvents, err := p.k8scli.CoreV1().Events(pvc.Namespace).List(p.ctx, metav1.ListOptions{FieldSelector: eventSelector.String()}) + pvcEvents, err := p.k8scli.CoreV1().Events(pvc.Namespace).Search(scheme.Scheme, pvc) if err != nil { return PVCError{}, fmt.Errorf("failed to list events for PVC %s", pvc.Name) } diff --git a/pkg/migrate/migrate_test.go b/pkg/migrate/migrate_test.go index a77c1b3..d1b9efd 100644 --- a/pkg/migrate/migrate_test.go +++ b/pkg/migrate/migrate_test.go @@ -3102,7 +3102,7 @@ func Test_validateVolumeAccessModes(t *testing.T) { srcStorageClass string dstStorageClass string deletePVTimeout time.Duration - podTimeout time.Duration + podReadyTimeout time.Duration wantErr bool resources []runtime.Object input map[string]corev1.PersistentVolume @@ -3257,7 +3257,7 @@ func Test_validateVolumeAccessModes(t *testing.T) { srcSc: tt.srcStorageClass, dstSc: tt.dstStorageClass, deletePVTimeout: 1 * time.Millisecond, - podTimeout: 1 * time.Millisecond, + podReadyTimeout: 1 * time.Millisecond, } result, err := pvm.ValidateVolumeAccessModes(tt.input) if err != nil { @@ -3458,7 +3458,7 @@ func Test_getPvcError(t *testing.T) { srcSc: tt.srcStorageClass, dstSc: tt.dstStorageClass, deletePVTimeout: 1 * time.Millisecond, - podTimeout: 1 * time.Millisecond, + podReadyTimeout: 1 * time.Millisecond, } result, err := pvm.getPvcError(tt.input) if err != nil { @@ -3573,7 +3573,7 @@ func Test_checkVolumeAccessModes(t *testing.T) { srcSc: tt.srcStorageClass, dstSc: tt.dstStorageClass, deletePVTimeout: 1 * time.Millisecond, - podTimeout: 1 * time.Millisecond, + podReadyTimeout: 1 * time.Millisecond, tmpPodName: "tmpPod", } result, err := pvm.checkVolumeAccessModes(*tt.input) From 8c6683d094951d72408746e4dfd544fda8c01f01 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Mon, 21 Nov 2022 16:10:45 -0600 Subject: [PATCH 14/41] Add more unit test --- pkg/migrate/migrate.go | 28 ++-- pkg/migrate/migrate_test.go | 272 +++++++++++++++++++++++++++++++++++- 2 files changed, 287 insertions(+), 13 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 493f4d9..545c959 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -69,7 +69,8 @@ type PVMigrator struct { dstSc string deletePVTimeout time.Duration podReadyTimeout time.Duration - tmpPodName string + podNameOverride string + pvcNameOverride string } // Cli uses CLI options to run Migrate @@ -1284,11 +1285,11 @@ func resetReclaimPolicy(ctx context.Context, w *log.Logger, clientset k8sclient. } // buildPVConsumerPod creates a pod spec for consuming a pvc -func buildPVConsumerPod(name, pvcName string) *corev1.Pod { - podName := name - if name == "" { +func (p *PVMigrator) buildPVConsumerPod(pvcName string) *corev1.Pod { + podName := p.podNameOverride + if podName == "" { tmp := uuid.New().String()[:5] - podName = fmt.Sprintf("pvmigrate-vol-consumer-%s", tmp) + podName = fmt.Sprintf("pvmigrate-vol-consumer-%s-%s", pvcName, tmp) } if len(podName) > 63 { podName = podName[0:31] + podName[len(podName)-32:] @@ -1335,9 +1336,12 @@ func buildPVConsumerPod(name, pvcName string) *corev1.Pod { } // buildPVC creates a temporary PVC requesting for 1Mi of storage for a provided storage class name. -func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.PersistentVolumeClaim { - tmp := uuid.New().String()[:5] - pvcName := fmt.Sprintf("pvmigrate-claim-%s-%s", pvc.Name, tmp) +func (p *PVMigrator) buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.PersistentVolumeClaim { + pvcName := p.pvcNameOverride + if pvcName == "" { + tmp := uuid.New().String()[:5] + pvcName = fmt.Sprintf("pvmigrate-claim-%s-%s", pvc.Name, tmp) + } if len(pvcName) > 63 { pvcName = pvcName[0:31] + pvcName[len(pvcName)-32:] } @@ -1346,7 +1350,6 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: "default", - UID: pvc.UID, // for testing }, Spec: corev1.PersistentVolumeClaimSpec{ StorageClassName: &sc, @@ -1357,7 +1360,8 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent }, }, }, - // for testing + // for testing/mocking + // This fields gets set and updated by the kube api server Status: corev1.PersistentVolumeClaimStatus{Phase: pvc.Status.Phase}, } } @@ -1368,7 +1372,7 @@ func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (P var err error // create temp pvc for storage class - tmpPVC := buildTmpPVC(pvc, p.dstSc) + tmpPVC := p.buildTmpPVC(pvc, p.dstSc) if tmpPVC, err = p.k8scli.CoreV1().PersistentVolumeClaims("default").Create( p.ctx, tmpPVC, metav1.CreateOptions{}, ); err != nil { @@ -1376,7 +1380,7 @@ func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (P } // consume pvc to determine any access mode errors - pvConsumerPodSpec := buildPVConsumerPod(p.tmpPodName, tmpPVC.Name) + pvConsumerPodSpec := p.buildPVConsumerPod(tmpPVC.Name) pvConsumerPod, err := p.k8scli.CoreV1().Pods(pvConsumerPodSpec.Namespace).Create(p.ctx, pvConsumerPodSpec, metav1.CreateOptions{}) if err != nil && !k8serrors.IsAlreadyExists(err) { return PVCError{}, err diff --git a/pkg/migrate/migrate_test.go b/pkg/migrate/migrate_test.go index d1b9efd..1ffb674 100644 --- a/pkg/migrate/migrate_test.go +++ b/pkg/migrate/migrate_test.go @@ -3574,7 +3574,7 @@ func Test_checkVolumeAccessModes(t *testing.T) { dstSc: tt.dstStorageClass, deletePVTimeout: 1 * time.Millisecond, podReadyTimeout: 1 * time.Millisecond, - tmpPodName: "tmpPod", + podNameOverride: "tmpPod", } result, err := pvm.checkVolumeAccessModes(*tt.input) if err != nil { @@ -3588,3 +3588,273 @@ func Test_checkVolumeAccessModes(t *testing.T) { }) } } + +func Test_buildTmpPVC(t *testing.T) { + for _, tt := range []struct { + name string + pvcNameOverride string + dstStorageClass string + input *corev1.PersistentVolumeClaim + expectedPVC *corev1.PersistentVolumeClaim + expectedName string + }{ + { + name: "generate unique temp pvc name", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + }, + }, + expectedPVC: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + expectedName: "pvmigrate-claim-test-pvc-", + dstStorageClass: "dstSc", + }, + { + name: "trim pvc name if longer than 63 chars", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "really-long-pvc-name-that-should-be-trimmed-to-avoid-an-error", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + }, + }, + expectedPVC: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + expectedName: "pvmigrate-claim-really-long-pvc-trimmed-to-avoid-an-error-", + dstStorageClass: "dstSc", + }, + { + name: "override pvc name", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-test-pvc", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + }, + }, + expectedPVC: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + pvcNameOverride: "pvc-name-override", + expectedName: "pvc-name-override", + dstStorageClass: "dstSc", + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + var pvcNameOverride string + if tt.pvcNameOverride != "" { + pvcNameOverride = tt.pvcNameOverride + } + pvm := PVMigrator{ + dstSc: tt.dstStorageClass, + pvcNameOverride: pvcNameOverride, + } + pvc := pvm.buildTmpPVC(*tt.input, tt.dstStorageClass) + req.True(strings.HasPrefix(pvc.Name, tt.expectedName)) + req.Equal(tt.expectedPVC.Spec, pvc.Spec) + }) + } +} + +func Test_buildPVConsumerPod(t *testing.T) { + for _, tt := range []struct { + name string + podNameOverride string + pvcName string + expectedPod *corev1.Pod + expectedName string + }{ + { + name: "generate unique temp pod name", + pvcName: "test-pvc", + expectedPod: &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Volumes: []corev1.Volume{ + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc", + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + Command: []string{ + "sleep", + "3600", + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/tmpmount", + Name: "tmp", + }, + }, + }, + }, + }, + }, + expectedName: "pvmigrate-vol-consumer-test-pvc-", + }, + { + name: "trim pod name if longer than 63 chars", + pvcName: "pvc-name-that-should-be-trimmed-because-it-will-cause-an-err", + expectedPod: &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Volumes: []corev1.Volume{ + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-name-that-should-be-trimmed-because-it-will-cause-an-err", + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + Command: []string{ + "sleep", + "3600", + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/tmpmount", + Name: "tmp", + }, + }, + }, + }, + }, + }, + expectedName: "pvmigrate-vol-consumer-pvc-namecause-it-will-cause-an-err-", + }, + { + name: "override temp pod name", + pvcName: "test-pvc", + expectedPod: &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Volumes: []corev1.Volume{ + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc", + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + Command: []string{ + "sleep", + "3600", + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/tmpmount", + Name: "tmp", + }, + }, + }, + }, + }, + }, + podNameOverride: "my pod name override", + expectedName: "my pod name override", + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + var podNameOverride string + if tt.podNameOverride != "" { + podNameOverride = tt.podNameOverride + } + pvm := PVMigrator{ + podNameOverride: podNameOverride, + } + pod := pvm.buildPVConsumerPod(tt.pvcName) + req.True(strings.HasPrefix(pod.Name, tt.expectedName)) + req.Equal(tt.expectedPod.Spec, pod.Spec) + }) + } +} From b44e0977134e7566e89d588ab940dc268d4d294d Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Mon, 21 Nov 2022 23:18:11 -0600 Subject: [PATCH 15/41] Add pod ready timeout out for volume validation pods --- pkg/migrate/migrate.go | 69 ++++++++++++++++++++++--------------- pkg/migrate/migrate_test.go | 8 ++--- 2 files changed, 45 insertions(+), 32 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 545c959..f40a97c 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -57,6 +57,7 @@ type Options struct { SkipSourceValidation bool SkipPVAccessModeValidation bool PvcCopyTimeout int + PodReadyTimeout int } // PVMigrator represents a migration context for migrating data from all srcSC volumes to @@ -86,6 +87,7 @@ func Cli() { flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") flag.BoolVar(&options.SkipPVAccessModeValidation, "skip-pv-access-mode-validation", false, "skip the volume access modes validation on the destination storage provider") flag.IntVar(&options.PvcCopyTimeout, "timeout", 300, "length of time to wait (in seconds) when transferring data from the source to the destination storage volume") + flag.IntVar(&options.PodReadyTimeout, "pod-ready-timeout", 60, "length of time to wait (in seconds) for volume validation pod(s) to go into Ready phase") flag.Parse() @@ -104,51 +106,56 @@ func Cli() { output := log.New(os.Stdout, "", 0) // this has no time prefix etc - err = Migrate(context.TODO(), output, clientset, options) - if err != nil { - fmt.Printf("%s\n", err.Error()) - os.Exit(1) - } -} - -// Migrate moves data and PVCs from one StorageClass to another -func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, options Options) error { - err := validateStorageClasses(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.SkipSourceValidation) - if err != nil { - return err - } - - matchingPVCs, namespaces, err := getPVCs(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.Namespace) - if err != nil { - return err - } - // escape hatch - for DEV/TEST ONLY if !options.SkipPVAccessModeValidation { - srcPVs, err := kurlutils.PVSByStorageClass(ctx, clientset, options.SourceSCName) + srcPVs, err := kurlutils.PVSByStorageClass(context.TODO(), clientset, options.SourceSCName) if err != nil { - return fmt.Errorf("failed to get volumes using storage class %s: %w", options.SourceSCName, err) + fmt.Printf("failed to get volumes using storage class %s: %s", options.SourceSCName, err) + os.Exit(1) } + pvMigrator := PVMigrator{ - ctx: ctx, - log: w, + ctx: context.TODO(), + log: output, k8scli: clientset, srcSc: options.SourceSCName, dstSc: options.DestSCName, deletePVTimeout: 5 * time.Minute, - podReadyTimeout: 30 * time.Second, + podReadyTimeout: time.Duration(options.PodReadyTimeout), } unsupportedPVCs, err := pvMigrator.ValidateVolumeAccessModes(srcPVs) if err != nil { - return fmt.Errorf("failed to validate volume access modes for destination storage class %s", options.DestSCName) + fmt.Printf("failed to validate volume access modes for destination storage class %s", options.DestSCName) + os.Exit(1) } if len(unsupportedPVCs) != 0 { PrintPVAccessModeErrors(unsupportedPVCs) - return fmt.Errorf("existing volumes have access modes not supported by the destination storage class %s", options.DestSCName) + fmt.Printf("existing volumes have access modes not supported by the destination storage class %s", options.DestSCName) + os.Exit(0) } } + // start the migration + err = Migrate(context.TODO(), output, clientset, options) + if err != nil { + fmt.Printf("%s\n", err.Error()) + os.Exit(1) + } +} + +// Migrate moves data and PVCs from one StorageClass to another +func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, options Options) error { + err := validateStorageClasses(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.SkipSourceValidation) + if err != nil { + return err + } + + matchingPVCs, namespaces, err := getPVCs(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.Namespace) + if err != nil { + return err + } + updatedMatchingPVCs, err := scaleDownPods(ctx, w, clientset, matchingPVCs, time.Second*5) if err != nil { return fmt.Errorf("failed to scale down pods: %w", err) @@ -186,7 +193,7 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, } // NewPVMigrator returns a PV migration context -func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string) (*PVMigrator, error) { +func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string, podReadyTimeout time.Duration) (*PVMigrator, error) { k8scli, err := k8sclient.NewForConfig(cfg) if err != nil { return nil, fmt.Errorf("failed to create kubernetes client: %w", err) @@ -202,6 +209,12 @@ func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string) (*PVM return nil, fmt.Errorf("no logger provided") } + podReadinessTimeout := podReadyTimeout + if podReadinessTimeout == 0 { + // default + podReadinessTimeout = 60 * time.Second + } + return &PVMigrator{ ctx: context.Background(), log: log, @@ -209,7 +222,7 @@ func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string) (*PVM srcSc: srcSC, dstSc: dstSC, deletePVTimeout: 5 * time.Minute, - podReadyTimeout: 30 * time.Second, + podReadyTimeout: podReadinessTimeout, }, nil } diff --git a/pkg/migrate/migrate_test.go b/pkg/migrate/migrate_test.go index 1ffb674..4f316f5 100644 --- a/pkg/migrate/migrate_test.go +++ b/pkg/migrate/migrate_test.go @@ -927,7 +927,7 @@ func TestGetPVCs(t *testing.T) { func TestNewPVMigrator(t *testing.T) { // test empty logger - _, err := NewPVMigrator(&rest.Config{}, nil, "src", "dst") + _, err := NewPVMigrator(&rest.Config{}, nil, "src", "dst", 0) if err == nil || err.Error() != "no logger provided" { t.Errorf("expected failure creating object: %v", err) } @@ -935,19 +935,19 @@ func TestNewPVMigrator(t *testing.T) { logger := log.New(io.Discard, "", 0) // test src storage class - _, err = NewPVMigrator(&rest.Config{}, logger, "", "dst") + _, err = NewPVMigrator(&rest.Config{}, logger, "", "dst", 0) if err == nil || err.Error() != "empty source storage class" { t.Errorf("expected failure creating object: %v", err) } // test empty dst sc - _, err = NewPVMigrator(&rest.Config{}, logger, "src", "") + _, err = NewPVMigrator(&rest.Config{}, logger, "src", "", 0) if err == nil || err.Error() != "empty destination storage class" { t.Errorf("expected failure creating object: %v", err) } // happy path - _, err = NewPVMigrator(&rest.Config{}, logger, "src", "dst") + _, err = NewPVMigrator(&rest.Config{}, logger, "src", "dst", 0) if err != nil { t.Errorf("unexpected failure creating object: %v", err) } From bacfe886c54f2772521ecf0376b3e8213d584ab8 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Mon, 21 Nov 2022 23:48:31 -0600 Subject: [PATCH 16/41] convert pod ready timeout to second units --- pkg/migrate/migrate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index f40a97c..a9421ca 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -121,7 +121,7 @@ func Cli() { srcSc: options.SourceSCName, dstSc: options.DestSCName, deletePVTimeout: 5 * time.Minute, - podReadyTimeout: time.Duration(options.PodReadyTimeout), + podReadyTimeout: time.Duration(options.PodReadyTimeout) * time.Second, } unsupportedPVCs, err := pvMigrator.ValidateVolumeAccessModes(srcPVs) if err != nil { From 26ab28b6a8108a08a0f61ea504bfb3b2c2257571 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Tue, 22 Nov 2022 00:01:35 -0600 Subject: [PATCH 17/41] accept an int for timeout instead of duration --- pkg/migrate/migrate.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index a9421ca..3017cd9 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -193,7 +193,7 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, } // NewPVMigrator returns a PV migration context -func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string, podReadyTimeout time.Duration) (*PVMigrator, error) { +func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string, podReadyTimeout int) (*PVMigrator, error) { k8scli, err := k8sclient.NewForConfig(cfg) if err != nil { return nil, fmt.Errorf("failed to create kubernetes client: %w", err) @@ -209,7 +209,7 @@ func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string, podRe return nil, fmt.Errorf("no logger provided") } - podReadinessTimeout := podReadyTimeout + podReadinessTimeout := time.Duration(podReadyTimeout) if podReadinessTimeout == 0 { // default podReadinessTimeout = 60 * time.Second @@ -222,7 +222,7 @@ func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string, podRe srcSc: srcSC, dstSc: dstSC, deletePVTimeout: 5 * time.Minute, - podReadyTimeout: podReadinessTimeout, + podReadyTimeout: podReadinessTimeout * time.Second, }, nil } From 59f1f058578ed1bc2042fbf9f28da6395801acbd Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Tue, 22 Nov 2022 22:15:47 -0600 Subject: [PATCH 18/41] specify PVC copy operation timeout in seconds --- pkg/migrate/migrate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 3017cd9..68e328f 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -161,7 +161,7 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, return fmt.Errorf("failed to scale down pods: %w", err) } - err = copyAllPVCs(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.RsyncImage, updatedMatchingPVCs, options.VerboseCopy, time.Duration(options.PvcCopyTimeout)) + err = copyAllPVCs(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.RsyncImage, updatedMatchingPVCs, options.VerboseCopy, time.Duration(options.PvcCopyTimeout)*time.Second) if err != nil { return err } From a44c4ba927579081a82cb84430daf14430277454 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Tue, 22 Nov 2022 22:33:49 -0600 Subject: [PATCH 19/41] rename flag --- pkg/migrate/migrate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 68e328f..d0ffed0 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -86,7 +86,7 @@ func Cli() { flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") flag.BoolVar(&options.SkipPVAccessModeValidation, "skip-pv-access-mode-validation", false, "skip the volume access modes validation on the destination storage provider") - flag.IntVar(&options.PvcCopyTimeout, "timeout", 300, "length of time to wait (in seconds) when transferring data from the source to the destination storage volume") + flag.IntVar(&options.PvcCopyTimeout, "pvc-copy-timeout", 300, "length of time to wait (in seconds) when transferring data from the source to the destination storage volume") flag.IntVar(&options.PodReadyTimeout, "pod-ready-timeout", 60, "length of time to wait (in seconds) for volume validation pod(s) to go into Ready phase") flag.Parse() From ee23c2cbe9187d906abb9b486a9281c0cc659f68 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Wed, 30 Nov 2022 17:05:59 -0600 Subject: [PATCH 20/41] Refactor WIP --- go.mod | 6 +- go.sum | 22 +-- pkg/k8sutil/pod.go | 24 +++ pkg/k8sutil/pod_test.go | 132 +++++++++++++ pkg/k8sutil/storage.go | 32 ++++ pkg/k8sutil/storage_test.go | 251 ++++++++++++++++++++++++ pkg/migrate/migrate.go | 370 ++---------------------------------- pkg/migrate/migrate_test.go | 28 +-- pkg/preflight/validate.go | 350 ++++++++++++++++++++++++++++++++++ 9 files changed, 828 insertions(+), 387 deletions(-) create mode 100644 pkg/k8sutil/pod.go create mode 100644 pkg/k8sutil/pod_test.go create mode 100644 pkg/k8sutil/storage.go create mode 100644 pkg/k8sutil/storage_test.go create mode 100644 pkg/preflight/validate.go diff --git a/go.mod b/go.mod index c966805..e25eec5 100644 --- a/go.mod +++ b/go.mod @@ -3,13 +3,12 @@ module github.com/replicatedhq/pvmigrate go 1.19 require ( - github.com/google/uuid v1.3.0 - github.com/replicatedhq/kurl v0.0.0-20221116172021-8ac37b7b9867 + github.com/google/go-cmp v0.5.9 github.com/stretchr/testify v1.8.1 k8s.io/api v0.25.4 k8s.io/apimachinery v0.25.4 k8s.io/client-go v0.25.4 - k8s.io/kubernetes v1.25.3 + k8s.io/kubernetes v1.25.4 k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed sigs.k8s.io/controller-runtime v0.13.1 ) @@ -41,7 +40,6 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/go.sum b/go.sum index 2f49c0a..b6e6ee9 100644 --- a/go.sum +++ b/go.sum @@ -24,7 +24,6 @@ github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tN github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -32,14 +31,13 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -80,12 +78,10 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -105,8 +101,6 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -118,15 +112,13 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.3.0 h1:kUMoxMoQG3ogk/QWyKh3zibV7BKZ+xBpWil1cTylVqc= +github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/replicatedhq/kurl v0.0.0-20221116172021-8ac37b7b9867 h1:2nu0z28pXfM6J7qXQiqXsc143aAHggB5WldO7FGqMC8= -github.com/replicatedhq/kurl v0.0.0-20221116172021-8ac37b7b9867/go.mod h1:oBUm8d7up1EZbU4EdvtjOMUeYFeslgkionv0CRbf52Q= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -143,9 +135,9 @@ github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKs github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -267,8 +259,8 @@ k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/kubernetes v1.25.3 h1:Ljx/Ew9+dt7rN9ob3V+N/aoDy7nDSbmr35IbYGRTyqE= -k8s.io/kubernetes v1.25.3/go.mod h1:lvEY+3iJhh+sGIK1LorGkI56rW0eLGsfalnp68wQwYU= +k8s.io/kubernetes v1.25.4 h1:M1+MR8IxE64zHhSSDn30twChLaOI+p0Kt77pvyQMKwU= +k8s.io/kubernetes v1.25.4/go.mod h1:lvEY+3iJhh+sGIK1LorGkI56rW0eLGsfalnp68wQwYU= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= sigs.k8s.io/controller-runtime v0.13.1 h1:tUsRCSJVM1QQOOeViGeX3GMT3dQF1eePPw6sEE3xSlg= diff --git a/pkg/k8sutil/pod.go b/pkg/k8sutil/pod.go new file mode 100644 index 0000000..359da25 --- /dev/null +++ b/pkg/k8sutil/pod.go @@ -0,0 +1,24 @@ +package k8sutil + +import ( + corev1 "k8s.io/api/core/v1" +) + +// HasPVC returs true if provided pod has provided pvc among its volumes. +func HasPVC(pod corev1.Pod, pvc corev1.PersistentVolumeClaim) bool { + if pod.Namespace != pvc.Namespace { + return false + } + + for _, vol := range pod.Spec.Volumes { + if vol.PersistentVolumeClaim == nil { + continue + } + if vol.PersistentVolumeClaim.ClaimName != pvc.Name { + continue + } + return true + } + + return false +} diff --git a/pkg/k8sutil/pod_test.go b/pkg/k8sutil/pod_test.go new file mode 100644 index 0000000..ca7e861 --- /dev/null +++ b/pkg/k8sutil/pod_test.go @@ -0,0 +1,132 @@ +package k8sutil + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestPodUsesPVC(t *testing.T) { + for _, tt := range []struct { + name string + expected bool + pod corev1.Pod + pvc corev1.PersistentVolumeClaim + }{ + { + name: "should find pvc in pod", + expected: true, + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "teste", + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "vol", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc", + }, + }, + }, + }, + }, + }, + pvc: corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "teste", + }, + }, + }, + { + name: "should return false if no claim ref is present in the pv", + expected: false, + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "teste", + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "vol", + }, + }, + }, + }, + pvc: corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "teste", + }, + }, + }, + { + name: "should return false if pvc and pod namespaces are different", + expected: false, + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "teste", + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "vol", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc", + }, + }, + }, + }, + }, + }, + pvc: corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "another-namespace", + }, + }, + }, + { + name: "should return false if pod does not use pvc", + expected: false, + pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "teste", + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "vol", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "another-pvc", + }, + }, + }, + }, + }, + }, + pvc: corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "teste", + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + result := HasPVC(tt.pod, tt.pvc) + if result != tt.expected { + t.Errorf("expected %v, received %v", tt.expected, result) + } + }) + } +} diff --git a/pkg/k8sutil/storage.go b/pkg/k8sutil/storage.go new file mode 100644 index 0000000..6ae71ab --- /dev/null +++ b/pkg/k8sutil/storage.go @@ -0,0 +1,32 @@ +package k8sutil + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// PVSByStorageClass returns a map of persistent volumes using the provided storage class name. +// returned pvs map is indexed by pv's name. +func PVsByStorageClass(ctx context.Context, cli kubernetes.Interface, scname string) (map[string]corev1.PersistentVolume, error) { + if _, err := cli.StorageV1().StorageClasses().Get(ctx, scname, metav1.GetOptions{}); err != nil { + return nil, fmt.Errorf("failed to get storage class %s: %w", scname, err) + } + + allpvs, err := cli.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get persistent volumes: %w", err) + } + + pvs := map[string]corev1.PersistentVolume{} + for _, pv := range allpvs.Items { + if pv.Spec.StorageClassName != scname { + continue + } + pvs[pv.Name] = *pv.DeepCopy() + } + return pvs, nil +} diff --git a/pkg/k8sutil/storage_test.go b/pkg/k8sutil/storage_test.go new file mode 100644 index 0000000..3866634 --- /dev/null +++ b/pkg/k8sutil/storage_test.go @@ -0,0 +1,251 @@ +package k8sutil + +import ( + "context" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" +) + +func TestPVCSForPVs(t *testing.T) { + for _, tt := range []struct { + name string + err string + input map[string]corev1.PersistentVolume + expected map[string]corev1.PersistentVolumeClaim + objs []runtime.Object + }{ + { + name: "should fail if pvc does not havel a claimref", + err: "pv pv0 without associated PVC", + input: map[string]corev1.PersistentVolume{ + "pv0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + }, + }, + }, + }, + { + name: "should fail if pvc is not found", + err: "failed to get pvc do-not-exist for pv pv0", + input: map[string]corev1.PersistentVolume{ + "pv0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + ClaimRef: &corev1.ObjectReference{ + Name: "do-not-exist", + }, + }, + }, + }, + }, + { + name: "should be able to find space in detached pvc", + input: map[string]corev1.PersistentVolume{ + "pv0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc", + }, + }, + }, + }, + objs: []runtime.Object{ + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + }, + }, + }, + expected: map[string]corev1.PersistentVolumeClaim{ + "pv0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + }, + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + kcli := fake.NewSimpleClientset(tt.objs...) + result, err := PVCSForPVs(context.Background(), kcli, tt.input) + if err != nil { + if len(tt.err) == 0 { + t.Errorf("unexpected error: %s", err) + } else if !strings.Contains(err.Error(), tt.err) { + t.Errorf("expecting %q, %q received instead", tt.err, err) + } + return + } + + if len(tt.err) > 0 { + t.Errorf("expecting error %q, nil received instead", tt.err) + } + + if diff := cmp.Diff(tt.expected, result); diff != "" { + t.Errorf("unexpected return: %s", diff) + } + }) + } +} + +func TestPVSByStorageClass(t *testing.T) { + for _, tt := range []struct { + name string + err string + scname string + expected map[string]corev1.PersistentVolume + objs []runtime.Object + }{ + { + name: "should fail if storage class was not found", + scname: "not-found", + err: "failed to get storage class", + }, + { + name: "should pass when multiple volumes are present", + scname: "default", + expected: map[string]corev1.PersistentVolume{ + "pv0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + }, + }, + "pv1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv1", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + }, + }, + }, + objs: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv1", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + }, + }, + }, + }, + { + name: "should pass when multiple volumes of different classes are present", + scname: "default", + expected: map[string]corev1.PersistentVolume{ + "pv0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + }, + }, + "pv1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pv1", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + }, + }, + }, + objs: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv1", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv2", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "another", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv3", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "yet-other-class", + }, + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + kcli := fake.NewSimpleClientset(tt.objs...) + result, err := PVSByStorageClass(context.Background(), kcli, tt.scname) + if err != nil { + if len(tt.err) == 0 { + t.Errorf("unexpected error: %s", err) + } else if !strings.Contains(err.Error(), tt.err) { + t.Errorf("expecting %q, %q received instead", tt.err, err) + } + return + } + + if len(tt.err) > 0 { + t.Errorf("expecting error %q, nil received instead", tt.err) + } + + if diff := cmp.Diff(tt.expected, result); diff != "" { + t.Errorf("unexpected return: %s", diff) + } + }) + } +} diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index d0ffed0..481b2f3 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -13,17 +13,12 @@ import ( "text/tabwriter" "time" - "github.com/google/uuid" - kurlutils "github.com/replicatedhq/kurl/pkg/k8sutil" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sclient "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - k8spodutils "k8s.io/kubernetes/pkg/api/v1/pod" "sigs.k8s.io/controller-runtime/pkg/client/config" ) @@ -60,23 +55,10 @@ type Options struct { PodReadyTimeout int } -// PVMigrator represents a migration context for migrating data from all srcSC volumes to -// dstSc volumes. -type PVMigrator struct { - ctx context.Context - log *log.Logger - k8scli k8sclient.Interface - srcSc string - dstSc string - deletePVTimeout time.Duration - podReadyTimeout time.Duration - podNameOverride string - pvcNameOverride string -} - // Cli uses CLI options to run Migrate func Cli() { var options Options + var dryRun bool flag.StringVar(&options.SourceSCName, "source-sc", "", "storage provider name to migrate from") flag.StringVar(&options.DestSCName, "dest-sc", "", "storage provider name to migrate to") @@ -88,6 +70,7 @@ func Cli() { flag.BoolVar(&options.SkipPVAccessModeValidation, "skip-pv-access-mode-validation", false, "skip the volume access modes validation on the destination storage provider") flag.IntVar(&options.PvcCopyTimeout, "pvc-copy-timeout", 300, "length of time to wait (in seconds) when transferring data from the source to the destination storage volume") flag.IntVar(&options.PodReadyTimeout, "pod-ready-timeout", 60, "length of time to wait (in seconds) for volume validation pod(s) to go into Ready phase") + flag.BoolVar(&dryRun, "dry-run", false, "run validation checks without running the migrations") flag.Parse() @@ -132,15 +115,17 @@ func Cli() { if len(unsupportedPVCs) != 0 { PrintPVAccessModeErrors(unsupportedPVCs) fmt.Printf("existing volumes have access modes not supported by the destination storage class %s", options.DestSCName) - os.Exit(0) + os.Exit(2) } } // start the migration - err = Migrate(context.TODO(), output, clientset, options) - if err != nil { - fmt.Printf("%s\n", err.Error()) - os.Exit(1) + if !dryRun { + err = Migrate(context.TODO(), output, clientset, options) + if err != nil { + fmt.Printf("%s\n", err.Error()) + os.Exit(1) + } } } @@ -192,87 +177,19 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, return nil } -// NewPVMigrator returns a PV migration context -func NewPVMigrator(cfg *rest.Config, log *log.Logger, srcSC, dstSC string, podReadyTimeout int) (*PVMigrator, error) { - k8scli, err := k8sclient.NewForConfig(cfg) - if err != nil { - return nil, fmt.Errorf("failed to create kubernetes client: %w", err) - } - - if srcSC == "" { - return nil, fmt.Errorf("empty source storage class") - } - if dstSC == "" { - return nil, fmt.Errorf("empty destination storage class") - } - if log == nil { - return nil, fmt.Errorf("no logger provided") - } - - podReadinessTimeout := time.Duration(podReadyTimeout) - if podReadinessTimeout == 0 { - // default - podReadinessTimeout = 60 * time.Second - } - - return &PVMigrator{ - ctx: context.Background(), - log: log, - k8scli: k8scli, - srcSc: srcSC, - dstSc: dstSC, - deletePVTimeout: 5 * time.Minute, - podReadyTimeout: podReadinessTimeout * time.Second, - }, nil -} - -// PrintPVAccessModeErrors prints and formats the volume access mode errors in pvcErrors -func PrintPVAccessModeErrors(pvcErrors map[string]map[string]PVCError) { - tw := tabwriter.NewWriter(os.Stdout, 0, 8, 8, '\t', 0) - fmt.Fprintf(tw, "The following persistent volume claims cannot be migrated:\n\n") - fmt.Fprintln(tw, "NAMESPACE\tPVC\tSOURCE\tREASON\tMESSAGE") - fmt.Fprintf(tw, "---------\t---\t------\t------\t-------\n") - for ns, pvcErrs := range pvcErrors { - for pvc, pvcErr := range pvcErrs { - fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\n", ns, pvc, pvcErr.from, pvcErr.reason, pvcErr.message) - } - } - tw.Flush() -} - -// ValidateVolumeAccessModes checks whether the provided persistent volumes support the access modes -// of the destination storage class. -// returns a map of pvc errors indexed by namespace -func (p *PVMigrator) ValidateVolumeAccessModes(pvs map[string]corev1.PersistentVolume) (map[string]map[string]PVCError, error) { - validationErrors := make(map[string]map[string]PVCError) - - if _, err := p.k8scli.StorageV1().StorageClasses().Get(p.ctx, p.dstSc, metav1.GetOptions{}); err != nil { - return nil, fmt.Errorf("failed to get destination storage class %s: %w", p.dstSc, err) - } - - pvcs, err := kurlutils.PVCSForPVs(p.ctx, p.k8scli, pvs) - if err != nil { - return nil, fmt.Errorf("failed to get pv to pvc mapping: %w", err) - } - - for pv, pvc := range pvcs { - v, err := p.checkVolumeAccessModes(pvc) - if err != nil { - p.log.Printf("failed to check volume access mode for claim %s (%s): %s", pvc.Name, pv, err) - continue - } - if v != (PVCError{}) { // test for empty struct - validationErrors[pvc.Namespace] = map[string]PVCError{pvc.Name: v} - } - } - return validationErrors, nil -} type pvcCtx struct { claim *corev1.PersistentVolumeClaim usedByPod *corev1.Pod } +func (p *pvcCtx) Copy() *pvcCtx { + return &pvcCtx{ + p.claim.DeepCopy(), + p.usedByPod.DeepCopy(), + } +} + func (pvc pvcCtx) getNodeNameRef() string { if pvc.usedByPod == nil { return "" @@ -280,15 +197,6 @@ func (pvc pvcCtx) getNodeNameRef() string { return pvc.usedByPod.Spec.NodeName } -type PVCError struct { - reason string - from string - message string -} - -func (e *PVCError) Error() string { - return fmt.Sprintf("volume claim error from %s during %s: %s", e.from, e.reason, e.message) -} // swapDefaultStorageClasses attempts to set newDefaultSC as the default StorageClass // if oldDefaultSC was set as the default, then it will be unset first @@ -1296,249 +1204,3 @@ func resetReclaimPolicy(ctx context.Context, w *log.Logger, clientset k8sclient. return nil } - -// buildPVConsumerPod creates a pod spec for consuming a pvc -func (p *PVMigrator) buildPVConsumerPod(pvcName string) *corev1.Pod { - podName := p.podNameOverride - if podName == "" { - tmp := uuid.New().String()[:5] - podName = fmt.Sprintf("pvmigrate-vol-consumer-%s-%s", pvcName, tmp) - } - if len(podName) > 63 { - podName = podName[0:31] + podName[len(podName)-32:] - } - return &corev1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: "default", - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Volumes: []corev1.Volume{ - { - Name: "tmp", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvcName, - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "busybox", - Image: "busybox", - Command: []string{ - "sleep", - "3600", - }, - VolumeMounts: []corev1.VolumeMount{ - { - MountPath: "/tmpmount", - Name: "tmp", - }, - }, - }, - }, - }, - } -} - -// buildPVC creates a temporary PVC requesting for 1Mi of storage for a provided storage class name. -func (p *PVMigrator) buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.PersistentVolumeClaim { - pvcName := p.pvcNameOverride - if pvcName == "" { - tmp := uuid.New().String()[:5] - pvcName = fmt.Sprintf("pvmigrate-claim-%s-%s", pvc.Name, tmp) - } - if len(pvcName) > 63 { - pvcName = pvcName[0:31] + pvcName[len(pvcName)-32:] - } - - return &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: pvcName, - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: &sc, - AccessModes: pvc.Spec.AccessModes, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Mi"), - }, - }, - }, - // for testing/mocking - // This fields gets set and updated by the kube api server - Status: corev1.PersistentVolumeClaimStatus{Phase: pvc.Status.Phase}, - } -} - -// checkVolumeAccessModeValid checks if the access modes of a pv are supported by the -// destination storage class. -func (p *PVMigrator) checkVolumeAccessModes(pvc corev1.PersistentVolumeClaim) (PVCError, error) { - var err error - - // create temp pvc for storage class - tmpPVC := p.buildTmpPVC(pvc, p.dstSc) - if tmpPVC, err = p.k8scli.CoreV1().PersistentVolumeClaims("default").Create( - p.ctx, tmpPVC, metav1.CreateOptions{}, - ); err != nil { - return PVCError{}, fmt.Errorf("failed to create temporary pvc: %w", err) - } - - // consume pvc to determine any access mode errors - pvConsumerPodSpec := p.buildPVConsumerPod(tmpPVC.Name) - pvConsumerPod, err := p.k8scli.CoreV1().Pods(pvConsumerPodSpec.Namespace).Create(p.ctx, pvConsumerPodSpec, metav1.CreateOptions{}) - if err != nil && !k8serrors.IsAlreadyExists(err) { - return PVCError{}, err - } - - // cleanup pvc and pod at the end - defer func() { - if err = p.deleteTmpPVC(tmpPVC); err != nil { - p.log.Printf("failed to delete tmp claim: %s", err) - } - }() - defer func() { - if err = p.deletePVConsumerPod(pvConsumerPod); err != nil { - p.log.Printf("failed to delete pv consumer pod %s: %s", pvConsumerPod.Name, err) - } - }() - - podReadyTimeoutEnd := time.Now().Add(p.podReadyTimeout) - for { - gotPod, err := p.k8scli.CoreV1().Pods(pvConsumerPodSpec.Namespace).Get(p.ctx, pvConsumerPodSpec.Name, metav1.GetOptions{}) - if err != nil { - return PVCError{}, fmt.Errorf("failed getting pv consumer pod %s: %w", gotPod.Name, err) - } - - switch { - case k8spodutils.IsPodReady(gotPod): - return PVCError{}, nil - default: - time.Sleep(time.Second) - } - - if time.Now().After(podReadyTimeoutEnd) { - // The volume consumer pod never went into running phase which means it's probably an error - // with provisioning the volume. - // A pod in Pending phase means the API Server has created the resource and stored it in etcd, - // but the pod has not been scheduled yet, nor have container images been pulled from the registry. - if gotPod.Status.Phase == corev1.PodPending { - // check pvc status and get error - pvcPendingError, err := p.getPvcError(tmpPVC) - if err != nil { - return PVCError{}, fmt.Errorf("failed to get PVC error: %s", err) - } - return pvcPendingError, nil - } - // pod failed for other reason(s) - return PVCError{}, fmt.Errorf("unexpected status for pod %s: %s", gotPod.Name, gotPod.Status.Phase) - } - } -} - -// deleteTmpPVC deletes the provided pvc from the default namespace and waits until the -// backing pv dissapear as well (this is mandatory so we don't leave any orphan pv as this would -// cause pvmigrate to fail). this function has a timeout of 5 minutes, after that an error is -// returned. -func (p *PVMigrator) deleteTmpPVC(pvc *corev1.PersistentVolumeClaim) error { - // Cleanup should use background context so as not to fail if context has already been canceled - ctx := context.Background() - - pvs, err := p.k8scli.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) - if err != nil { - return fmt.Errorf("failed to list persistent volumes: %w", err) - } - - pvsByPVCName := map[string]corev1.PersistentVolume{} - for _, pv := range pvs.Items { - if pv.Spec.ClaimRef == nil { - continue - } - pvsByPVCName[pv.Spec.ClaimRef.Name] = pv - } - - var waitFor []string - propagation := metav1.DeletePropagationForeground - delopts := metav1.DeleteOptions{PropagationPolicy: &propagation} - if err := p.k8scli.CoreV1().PersistentVolumeClaims("default").Delete( - ctx, pvc.Name, delopts, - ); err != nil { - if !k8serrors.IsNotFound(err) { - p.log.Printf("failed to delete temp pvc %s: %s", pvc.Name, err) - return err - } - } - waitFor = append(waitFor, pvc.Name) - - timeout := time.NewTicker(5 * time.Minute) - interval := time.NewTicker(5 * time.Second) - defer timeout.Stop() - defer interval.Stop() - for _, pvc := range waitFor { - pv, ok := pvsByPVCName[pvc] - if !ok { - p.log.Printf("failed to find pv for temp pvc %s", pvc) - continue - } - - for { - // break the loop as soon as we can't find the pv anymore. - if _, err := p.k8scli.CoreV1().PersistentVolumes().Get( - ctx, pv.Name, metav1.GetOptions{}, - ); err != nil && !k8serrors.IsNotFound(err) { - p.log.Printf("failed to get pv for temp pvc %s: %s", pvc, err) - } else if err != nil && k8serrors.IsNotFound(err) { - break - } - - select { - case <-interval.C: - continue - case <-timeout.C: - return fmt.Errorf("failed to delete pvs: timeout") - } - } - } - return nil -} - -// deletePVConsumerPod removes the pod resource from the api servere -func (p *PVMigrator) deletePVConsumerPod(pod *corev1.Pod) error { - propagation := metav1.DeletePropagationForeground - delopts := metav1.DeleteOptions{PropagationPolicy: &propagation} - if pod != nil { - if err := p.k8scli.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, delopts); err != nil { - return err - } - } - return nil -} - -// getPvcError returns the error event for why a PVC is in Pending status -func (p *PVMigrator) getPvcError(pvc *corev1.PersistentVolumeClaim) (PVCError, error) { - // no need to inspect pvc - if pvc.Status.Phase != corev1.ClaimPending { - return PVCError{}, fmt.Errorf("PVC %s is not in Pending status", pvc.Name) - } - - pvcEvents, err := p.k8scli.CoreV1().Events(pvc.Namespace).Search(scheme.Scheme, pvc) - if err != nil { - return PVCError{}, fmt.Errorf("failed to list events for PVC %s", pvc.Name) - } - - // get pending reason - for _, event := range pvcEvents.Items { - if event.Reason == "ProvisioningFailed" { - return PVCError{event.Reason, event.Source.Component, event.Message}, nil - } - } - return PVCError{}, fmt.Errorf("could not determine reason for why PVC %s is in Pending status", pvc.Name) -} diff --git a/pkg/migrate/migrate_test.go b/pkg/migrate/migrate_test.go index 4f316f5..a7bd9e9 100644 --- a/pkg/migrate/migrate_test.go +++ b/pkg/migrate/migrate_test.go @@ -3109,7 +3109,7 @@ func Test_validateVolumeAccessModes(t *testing.T) { expected map[string]map[string]PVCError }{ { - name: "should pass due to no accessmode errors", + name: "With compatible access modes, expect no errors", input: map[string]corev1.PersistentVolume{ "pv0": { ObjectMeta: metav1.ObjectMeta{ @@ -3156,7 +3156,7 @@ func Test_validateVolumeAccessModes(t *testing.T) { }, }, { - name: "should fail when there is no matching pvc for a pv", + name: "When there is no matching pvc for a pv, expect error", input: map[string]corev1.PersistentVolume{ "pv0": { ObjectMeta: metav1.ObjectMeta{ @@ -3218,7 +3218,7 @@ func Test_validateVolumeAccessModes(t *testing.T) { }, }, { - name: "should fail when destination storage class is not found", + name: "When destination storage class is not found, expect error", input: map[string]corev1.PersistentVolume{ "pv0": { ObjectMeta: metav1.ObjectMeta{ @@ -3285,7 +3285,7 @@ func Test_getPvcError(t *testing.T) { expected PVCError }{ { - name: "get pvc error from events", + name: "When there is a PVC error, expect ProvisioningFailed event", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", @@ -3342,7 +3342,7 @@ func Test_getPvcError(t *testing.T) { }, }, { - name: "no events that match the reason ProvisioningFailed", + name: "When PVC event reason is not ProvisioningFailed, expect error", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", @@ -3396,7 +3396,7 @@ func Test_getPvcError(t *testing.T) { }, }, { - name: "no events for pvc", + name: "When PVC is pending due to an error but there are no events for it, expect error", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", @@ -3431,7 +3431,7 @@ func Test_getPvcError(t *testing.T) { }, }, { - name: "pvc is not in Pending status", + name: "When PVC is not in Pending status, expect error", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", @@ -3486,7 +3486,7 @@ func Test_checkVolumeAccessModes(t *testing.T) { expected PVCError }{ { - name: "access mode not supported", + name: "When the PVC access mode is not supported by destination storage provider, expect PVCError", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", @@ -3599,7 +3599,7 @@ func Test_buildTmpPVC(t *testing.T) { expectedName string }{ { - name: "generate unique temp pvc name", + name: "When PVC name is not overridden, expect unique temp pvc name", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pvc", @@ -3628,7 +3628,7 @@ func Test_buildTmpPVC(t *testing.T) { dstStorageClass: "dstSc", }, { - name: "trim pvc name if longer than 63 chars", + name: "When PVC name is longer than 63 chars, expect name to be trimmed", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "really-long-pvc-name-that-should-be-trimmed-to-avoid-an-error", @@ -3657,7 +3657,7 @@ func Test_buildTmpPVC(t *testing.T) { dstStorageClass: "dstSc", }, { - name: "override pvc name", + name: "When PVC name is overriden, expect non-UID generated name", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "my-test-pvc", @@ -3713,7 +3713,7 @@ func Test_buildPVConsumerPod(t *testing.T) { expectedName string }{ { - name: "generate unique temp pod name", + name: "When pod name not overriden, expect unique pod name", pvcName: "test-pvc", expectedPod: &corev1.Pod{ TypeMeta: metav1.TypeMeta{ @@ -3756,7 +3756,7 @@ func Test_buildPVConsumerPod(t *testing.T) { expectedName: "pvmigrate-vol-consumer-test-pvc-", }, { - name: "trim pod name if longer than 63 chars", + name: "When pod name is longer than 63 chars, expect pod name to be trimmed", pvcName: "pvc-name-that-should-be-trimmed-because-it-will-cause-an-err", expectedPod: &corev1.Pod{ TypeMeta: metav1.TypeMeta{ @@ -3799,7 +3799,7 @@ func Test_buildPVConsumerPod(t *testing.T) { expectedName: "pvmigrate-vol-consumer-pvc-namecause-it-will-cause-an-err-", }, { - name: "override temp pod name", + name: "When pod name is overriden, expect non-UID name", pvcName: "test-pvc", expectedPod: &corev1.Pod{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go new file mode 100644 index 0000000..260a7ff --- /dev/null +++ b/pkg/preflight/validate.go @@ -0,0 +1,350 @@ +package preflight + +import ( + "context" + "fmt" + "io" + "log" + "text/tabwriter" + "time" + + "github.com/replicatedhq/pvmigrate/pkg/k8sutil" + "github.com/replicatedhq/pvmigrate/pkg/migrate" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + k8spodutils "k8s.io/kubernetes/pkg/api/v1/pod" +) + +type pvcFailure struct { + reason string + from string + message string +} + +type ValidationFailure struct { + Namespace string + Resource string + Source string + Message string +} + +// Validate runs preflight check on storage volumes returning a list of failures +func Validate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, options migrate.Options) ([]ValidationFailure, error) { + // validate access modes for all PVCs using the d source storage class + pvcs, err := pvcsForStorageClass(ctx, w, clientset, options.SourceSCName, options.Namespace) + if err != nil { + return nil, fmt.Errorf("failed to get PVCs for storage %s: %s", options.SourceSCName, err) + } + pvcAccesModeFailures, err := validateVolumeAccessModes(ctx, w, clientset, options.DestSCName, time.Duration(options.PodReadyTimeout), pvcs) + if err != nil { + return nil, fmt.Errorf("failed validate PVC access modes: %s", err) + } + return toValidationFailures(pvcAccesModeFailures), nil +} + +// PrintPVAccessModeErrors prints and formats the volume access mode errors in pvcErrors +func PrintValidationFailures(stream io.Writer, failures []ValidationFailure) { + tw := tabwriter.NewWriter(stream, 0, 8, 8, '\t', 0) + fmt.Fprintf(tw, "The following persistent volume claims cannot be migrated:\n\n") + fmt.Fprintln(tw, "NAMESPACE\tPVC\tSOURCE\tMESSAGE") + fmt.Fprintf(tw, "---------\t---\t------\t-------\n") + for _, failure := range failures { + fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", failure.Namespace, failure.Resource, failure.Source, failure.Message) + } + tw.Flush() +} + +func toValidationFailures(pvcFailures map[string]map[string]pvcFailure) []ValidationFailure { + var vFailures []ValidationFailure + for ns, failures := range pvcFailures { + for name, pvcFailure := range failures { + vFailures = append(vFailures, ValidationFailure{ns, name, pvcFailure.from, pvcFailure.message}) + } + } + return vFailures +} + +// validateVolumeAccessModes checks whether the provided persistent volumes support the access modes +// of the destination storage class. +// returns a map of pvc validation failures indexed by namespace +func validateVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient.Interface, dstSC string, podReadyTimeout time.Duration, pvcs map[string]corev1.PersistentVolumeClaim) (map[string]map[string]pvcFailure, error) { + volAccessModeFailures := make(map[string]map[string]pvcFailure) + + if _, err := client.StorageV1().StorageClasses().Get(ctx, dstSC, metav1.GetOptions{}); err != nil { + return nil, fmt.Errorf("failed to get destination storage class %s: %w", dstSC, err) + } + + for _, pvc := range pvcs { + v, err := checkVolumeAccessModes(ctx, l, client, dstSC, pvc, podReadyTimeout) + if err != nil { + l.Printf("failed to check volume access mode for PVC %s: %s", pvc.Name, err) + continue + } + if v != nil { + volAccessModeFailures[pvc.Namespace] = map[string]pvcFailure{pvc.Name: *v} + } + } + return volAccessModeFailures, nil +} + +// buildPVCConsumerPod creates a pod spec for consuming a pvc +func buildPVCConsumerPod(pvcName, namespace string) *corev1.Pod { + return &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: newK8sName("pvmigrate-preflight-pvc-consumer", pvcName), + Namespace: namespace, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Volumes: []corev1.Volume{ + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + Command: []string{ + "sleep", + "3600", + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/tmpmount", + Name: "tmp", + }, + }, + }, + }, + }, + } +} + +// buildTmpPVC creates a temporary PVC requesting for 1Mi of storage for a provided storage class name. +func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.PersistentVolumeClaim { + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: newK8sName("pvmigrate-preflight", pvc.Name), + Namespace: pvc.Namespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: &sc, + AccessModes: pvc.Spec.AccessModes, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + } +} + +// checkVolumeAccessModeValid checks if the access modes of a pv are supported by the +// destination storage class. +func checkVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient.Interface, dstSC string, pvc corev1.PersistentVolumeClaim, timeout time.Duration) (*pvcFailure, error) { + var err error + + // create temp pvc for storage class + tmpPVC := buildTmpPVC(pvc, dstSC) + if tmpPVC, err = client.CoreV1().PersistentVolumeClaims("default").Create( + ctx, tmpPVC, metav1.CreateOptions{}, + ); err != nil { + return nil, fmt.Errorf("failed to create temporary pvc: %w", err) + } + + // consume pvc to determine any access mode errors + pvcConsumerPodSpec := buildPVCConsumerPod(tmpPVC.Name, tmpPVC.Namespace) + pvcConsumerPod, err := client.CoreV1().Pods(pvcConsumerPodSpec.Namespace).Create(ctx, pvcConsumerPodSpec, metav1.CreateOptions{}) + if err != nil && !k8serrors.IsAlreadyExists(err) { + return nil, err + } + + // cleanup pvc and pod at the end + defer func() { + if err = deleteTmpPVC(l, client, tmpPVC); err != nil { + l.Printf("failed to cleanup tmp claim: %s", err) + } + if err = deletePVConsumerPod(client, pvcConsumerPod); err != nil { + l.Printf("failed to cleanup pv consumer pod %s: %s", pvcConsumerPod.Name, err) + } + }() + + podReadyTimeoutEnd := time.Now().Add(timeout) + for { + gotPod, err := client.CoreV1().Pods(pvcConsumerPodSpec.Namespace).Get(ctx, pvcConsumerPodSpec.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed getting pv consumer pod %s: %w", gotPod.Name, err) + } + + switch { + case k8spodutils.IsPodReady(gotPod): + return nil, nil + default: + time.Sleep(time.Second) + } + + if time.Now().After(podReadyTimeoutEnd) { + // The volume consumer pod never went into running phase which means it's probably an error + // with provisioning the volume. + // A pod in Pending phase means the API Server has created the resource and stored it in etcd, + // but the pod has not been scheduled yet, nor have container images been pulled from the registry. + if gotPod.Status.Phase == corev1.PodPending { + // check pvc status and get error + pvcPendingError, err := getPvcError(client, tmpPVC) + if err != nil { + return nil, fmt.Errorf("failed to get PVC error: %s", err) + } + return pvcPendingError, nil + } + // pod failed for other reason(s) + return nil, fmt.Errorf("unexpected status for pod %s: %s", gotPod.Name, gotPod.Status.Phase) + } + } +} + +// deleteTmpPVC deletes the provided pvc from the default namespace and waits until the +// backing pv dissapear as well (this is mandatory so we don't leave any orphan pv as this would +// cause pvmigrate to fail). this function has a timeout of 5 minutes, after that an error is +// returned. +func deleteTmpPVC(l *log.Logger, client k8sclient.Interface, pvc *corev1.PersistentVolumeClaim) error { + // Cleanup should use background context so as not to fail if context has already been canceled + ctx := context.Background() + + pvs, err := client.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("failed to list persistent volumes: %w", err) + } + + pvsByPVCName := map[string]corev1.PersistentVolume{} + for _, pv := range pvs.Items { + if pv.Spec.ClaimRef == nil { + continue + } + pvsByPVCName[pv.Spec.ClaimRef.Name] = pv + } + + var waitFor []string + propagation := metav1.DeletePropagationForeground + delopts := metav1.DeleteOptions{PropagationPolicy: &propagation} + if err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete( + ctx, pvc.Name, delopts, + ); err != nil { + if !k8serrors.IsNotFound(err) { + l.Printf("failed to delete temp pvc %s: %s", pvc.Name, err) + return err + } + } + waitFor = append(waitFor, pvc.Name) + + timeout := time.NewTicker(5 * time.Minute) + interval := time.NewTicker(5 * time.Second) + defer timeout.Stop() + defer interval.Stop() + for _, pvc := range waitFor { + pv, ok := pvsByPVCName[pvc] + if !ok { + l.Printf("failed to find pv for temp pvc %s", pvc) + continue + } + + for { + // break the loop as soon as we can't find the pv anymore. + if _, err := client.CoreV1().PersistentVolumes().Get( + ctx, pv.Name, metav1.GetOptions{}, + ); err != nil && !k8serrors.IsNotFound(err) { + l.Printf("failed to get pv for temp pvc %s: %s", pvc, err) + } else if err != nil && k8serrors.IsNotFound(err) { + break + } + + select { + case <-interval.C: + continue + case <-timeout.C: + return fmt.Errorf("failed to delete pvs: timeout") + } + } + } + return nil +} + +// deletePVConsumerPod removes the pod resource from the api servere +func deletePVConsumerPod(client k8sclient.Interface, pod *corev1.Pod) error { + propagation := metav1.DeletePropagationForeground + delopts := metav1.DeleteOptions{PropagationPolicy: &propagation} + if pod != nil { + if err := client.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, delopts); err != nil { + return err + } + } + return nil +} + +// getPvcError returns the error event for why a PVC is in Pending status +func getPvcError(client k8sclient.Interface, pvc *corev1.PersistentVolumeClaim) (*pvcFailure, error) { + // no need to inspect pvc + if pvc.Status.Phase != corev1.ClaimPending { + return nil, fmt.Errorf("PVC %s is not in Pending status", pvc.Name) + } + + pvcEvents, err := client.CoreV1().Events(pvc.Namespace).Search(scheme.Scheme, pvc) + if err != nil { + return nil, fmt.Errorf("failed to list events for PVC %s", pvc.Name) + } + + // get pending reason + for _, event := range pvcEvents.Items { + if event.Reason == "ProvisioningFailed" { + return &pvcFailure{event.Reason, event.Source.Component, event.Message}, nil + } + } + return nil, fmt.Errorf("could not determine reason for why PVC %s is in Pending status", pvc.Name) +} + +// pvcsForStorageClass returns all PersistentVolumeClaims, filtered by namespace, for a given +// storage class +func pvcsForStorageClass(ctx context.Context, l *log.Logger, client k8sclient.Interface, srcSC, namespace string) (map[string]corev1.PersistentVolumeClaim, error) { + srcPVs, err := k8sutil.PVsByStorageClass(context.TODO(), client, srcSC) + if err != nil { + return nil, fmt.Errorf("failed to get PVs for storage class %s: %s", srcSC, err) + } + + // get PVCs using specified PVs + srcPVCs := map[string]corev1.PersistentVolumeClaim{} + for _, pv := range srcPVs { + if pv.Spec.ClaimRef != nil { + pvc, err := client.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(ctx, pv.Spec.ClaimRef.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get PVC for PV %s in %s: %w", pv.Spec.ClaimRef.Name, pv.Spec.ClaimRef.Namespace, err) + } + if pv.Spec.ClaimRef.Namespace == namespace || namespace == "" { + srcPVCs[pv.Spec.ClaimRef.Name] = *pvc.DeepCopy() + } + } else { + return nil, fmt.Errorf("PV %s does not have an associated PVC", pv.Name) + } + } + return srcPVCs, nil +} + +func newK8sName(prefix, original string) string { + newName := fmt.Sprintf("%s-%s", prefix, original) + if len(newName) > 63 { + newName = newName[0:31] + newName[len(newName)-32:] + } + return newName +} From efe96f31aaefb7bbe083b2d14b47b0dd3223cb33 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Thu, 1 Dec 2022 01:09:17 -0600 Subject: [PATCH 21/41] WIP: PR Review Refactor --- cmd/main.go | 62 ++- pkg/k8sutil/pod.go | 24 - pkg/k8sutil/pod_test.go | 132 ----- pkg/k8sutil/storage.go | 19 + pkg/k8sutil/storage_test.go | 5 +- pkg/migrate/migrate.go | 98 +--- pkg/migrate/migrate_test.go | 793 --------------------------- pkg/preflight/validate.go | 4 +- pkg/preflight/validate_test.go | 971 +++++++++++++++++++++++++++++++++ 9 files changed, 1064 insertions(+), 1044 deletions(-) delete mode 100644 pkg/k8sutil/pod.go delete mode 100644 pkg/k8sutil/pod_test.go create mode 100644 pkg/preflight/validate_test.go diff --git a/cmd/main.go b/cmd/main.go index 85a6ebb..284f421 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -1,16 +1,76 @@ package main import ( + "context" + "flag" "fmt" + "log" + "os" "github.com/replicatedhq/pvmigrate/pkg/migrate" + "github.com/replicatedhq/pvmigrate/pkg/preflight" "github.com/replicatedhq/pvmigrate/pkg/version" + k8sclient "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth" // this allows accessing a larger array of cloud providers + "sigs.k8s.io/controller-runtime/pkg/client/config" ) func main() { fmt.Printf("Running pvmigrate build:\n") version.Print() - migrate.Cli() + // Cli uses CLI options to run Migrate + var options migrate.Options + var skipPreflightValidation bool + var preflightValidationOnly bool + flag.StringVar(&options.SourceSCName, "source-sc", "", "storage provider name to migrate from") + flag.StringVar(&options.DestSCName, "dest-sc", "", "storage provider name to migrate to") + flag.StringVar(&options.RsyncImage, "rsync-image", "eeacms/rsync:2.3", "the image to use to copy PVCs - must have 'rsync' on the path") + flag.StringVar(&options.Namespace, "namespace", "", "only migrate PVCs within this namespace") + flag.BoolVar(&options.SetDefaults, "set-defaults", false, "change default storage class from source to dest") + flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") + flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") + flag.IntVar(&options.PvcCopyTimeout, "pvc-copy-timeout", 300, "length of time to wait (in seconds) when transferring data from the source to the destination storage volume") + flag.IntVar(&options.PodReadyTimeout, "pod-ready-timeout", 60, "length of time to wait (in seconds) for volume validation pod(s) to go into Ready phase") + flag.BoolVar(&skipPreflightValidation, "skip-preflight-validation", false, "skip the volume access modes validation on the destination storage provider") + flag.BoolVar(&preflightValidationOnly, "preflight-validation-only", false, "skip the migrate and run preflight validation") + + flag.Parse() + + // setup k8s + cfg, err := config.GetConfig() + if err != nil { + fmt.Printf("failed to get config: %s\n", err.Error()) + os.Exit(1) + } + + clientset, err := k8sclient.NewForConfig(cfg) + if err != nil { + fmt.Printf("failed to create kubernetes clientset: %s\n", err.Error()) + os.Exit(1) + } + + output := log.New(os.Stdout, "", 0) // this has no time prefix etc + + if !skipPreflightValidation { + failures, err := preflight.Validate(context.TODO(), output, clientset, options) + if err != nil { + output.Printf("failed to run preflight validation checks") + os.Exit(1) + } + + if len(failures) != 0 { + preflight.PrintValidationFailures(output.Writer(), failures) + os.Exit(2) + } + } + + // start the migration + if !preflightValidationOnly { + err = migrate.Migrate(context.TODO(), output, clientset, options) + if err != nil { + output.Printf("migration failed: %s", err) + os.Exit(3) + } + } } diff --git a/pkg/k8sutil/pod.go b/pkg/k8sutil/pod.go deleted file mode 100644 index 359da25..0000000 --- a/pkg/k8sutil/pod.go +++ /dev/null @@ -1,24 +0,0 @@ -package k8sutil - -import ( - corev1 "k8s.io/api/core/v1" -) - -// HasPVC returs true if provided pod has provided pvc among its volumes. -func HasPVC(pod corev1.Pod, pvc corev1.PersistentVolumeClaim) bool { - if pod.Namespace != pvc.Namespace { - return false - } - - for _, vol := range pod.Spec.Volumes { - if vol.PersistentVolumeClaim == nil { - continue - } - if vol.PersistentVolumeClaim.ClaimName != pvc.Name { - continue - } - return true - } - - return false -} diff --git a/pkg/k8sutil/pod_test.go b/pkg/k8sutil/pod_test.go deleted file mode 100644 index ca7e861..0000000 --- a/pkg/k8sutil/pod_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package k8sutil - -import ( - "testing" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestPodUsesPVC(t *testing.T) { - for _, tt := range []struct { - name string - expected bool - pod corev1.Pod - pvc corev1.PersistentVolumeClaim - }{ - { - name: "should find pvc in pod", - expected: true, - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod", - Namespace: "teste", - }, - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "vol", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvc", - }, - }, - }, - }, - }, - }, - pvc: corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "teste", - }, - }, - }, - { - name: "should return false if no claim ref is present in the pv", - expected: false, - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod", - Namespace: "teste", - }, - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "vol", - }, - }, - }, - }, - pvc: corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "teste", - }, - }, - }, - { - name: "should return false if pvc and pod namespaces are different", - expected: false, - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod", - Namespace: "teste", - }, - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "vol", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvc", - }, - }, - }, - }, - }, - }, - pvc: corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "another-namespace", - }, - }, - }, - { - name: "should return false if pod does not use pvc", - expected: false, - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod", - Namespace: "teste", - }, - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "vol", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "another-pvc", - }, - }, - }, - }, - }, - }, - pvc: corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "teste", - }, - }, - }, - } { - t.Run(tt.name, func(t *testing.T) { - result := HasPVC(tt.pod, tt.pvc) - if result != tt.expected { - t.Errorf("expected %v, received %v", tt.expected, result) - } - }) - } -} diff --git a/pkg/k8sutil/storage.go b/pkg/k8sutil/storage.go index 6ae71ab..0a5a356 100644 --- a/pkg/k8sutil/storage.go +++ b/pkg/k8sutil/storage.go @@ -30,3 +30,22 @@ func PVsByStorageClass(ctx context.Context, cli kubernetes.Interface, scname str } return pvs, nil } + +// PVCSForPVs returns a pv to pvc mapping. the returned map is indexed by the pv name. +func PVCSForPVs(ctx context.Context, cli kubernetes.Interface, pvs map[string]corev1.PersistentVolume) (map[string]corev1.PersistentVolumeClaim, error) { + pvcs := map[string]corev1.PersistentVolumeClaim{} + for pvidx, pv := range pvs { + cref := pv.Spec.ClaimRef + if cref == nil { + return nil, fmt.Errorf("pv %s without associated PVC", pvidx) + } + + pvc, err := cli.CoreV1().PersistentVolumeClaims(cref.Namespace).Get(ctx, cref.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get pvc %s for pv %s: %w", cref.Name, pvidx, err) + } + + pvcs[pvidx] = *pvc.DeepCopy() + } + return pvcs, nil +} diff --git a/pkg/k8sutil/storage_test.go b/pkg/k8sutil/storage_test.go index 3866634..a744836 100644 --- a/pkg/k8sutil/storage_test.go +++ b/pkg/k8sutil/storage_test.go @@ -8,7 +8,6 @@ import ( "github.com/google/go-cmp/cmp" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" @@ -107,7 +106,7 @@ func TestPVCSForPVs(t *testing.T) { } } -func TestPVSByStorageClass(t *testing.T) { +func TestPVsByStorageClass(t *testing.T) { for _, tt := range []struct { name string err string @@ -229,7 +228,7 @@ func TestPVSByStorageClass(t *testing.T) { } { t.Run(tt.name, func(t *testing.T) { kcli := fake.NewSimpleClientset(tt.objs...) - result, err := PVSByStorageClass(context.Background(), kcli, tt.scname) + result, err := PVsByStorageClass(context.Background(), kcli, tt.scname) if err != nil { if len(tt.err) == 0 { t.Errorf("unexpected error: %s", err) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 481b2f3..c36628b 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -4,11 +4,9 @@ import ( "bufio" "context" "errors" - "flag" "fmt" "io" "log" - "os" "strconv" "text/tabwriter" "time" @@ -19,7 +17,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sclient "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client/config" ) const ( @@ -43,90 +40,15 @@ var isDestScLocalVolumeProvisioner bool // Options is the set of options that should be provided to Migrate type Options struct { - SourceSCName string - DestSCName string - RsyncImage string - Namespace string - SetDefaults bool - VerboseCopy bool - SkipSourceValidation bool - SkipPVAccessModeValidation bool - PvcCopyTimeout int - PodReadyTimeout int -} - -// Cli uses CLI options to run Migrate -func Cli() { - var options Options - var dryRun bool - - flag.StringVar(&options.SourceSCName, "source-sc", "", "storage provider name to migrate from") - flag.StringVar(&options.DestSCName, "dest-sc", "", "storage provider name to migrate to") - flag.StringVar(&options.RsyncImage, "rsync-image", "eeacms/rsync:2.3", "the image to use to copy PVCs - must have 'rsync' on the path") - flag.StringVar(&options.Namespace, "namespace", "", "only migrate PVCs within this namespace") - flag.BoolVar(&options.SetDefaults, "set-defaults", false, "change default storage class from source to dest") - flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") - flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") - flag.BoolVar(&options.SkipPVAccessModeValidation, "skip-pv-access-mode-validation", false, "skip the volume access modes validation on the destination storage provider") - flag.IntVar(&options.PvcCopyTimeout, "pvc-copy-timeout", 300, "length of time to wait (in seconds) when transferring data from the source to the destination storage volume") - flag.IntVar(&options.PodReadyTimeout, "pod-ready-timeout", 60, "length of time to wait (in seconds) for volume validation pod(s) to go into Ready phase") - flag.BoolVar(&dryRun, "dry-run", false, "run validation checks without running the migrations") - - flag.Parse() - - // setup k8s - cfg, err := config.GetConfig() - if err != nil { - fmt.Printf("failed to get config: %s\n", err.Error()) - os.Exit(1) - } - - clientset, err := k8sclient.NewForConfig(cfg) - if err != nil { - fmt.Printf("failed to create kubernetes clientset: %s\n", err.Error()) - os.Exit(1) - } - - output := log.New(os.Stdout, "", 0) // this has no time prefix etc - - // escape hatch - for DEV/TEST ONLY - if !options.SkipPVAccessModeValidation { - srcPVs, err := kurlutils.PVSByStorageClass(context.TODO(), clientset, options.SourceSCName) - if err != nil { - fmt.Printf("failed to get volumes using storage class %s: %s", options.SourceSCName, err) - os.Exit(1) - } - - pvMigrator := PVMigrator{ - ctx: context.TODO(), - log: output, - k8scli: clientset, - srcSc: options.SourceSCName, - dstSc: options.DestSCName, - deletePVTimeout: 5 * time.Minute, - podReadyTimeout: time.Duration(options.PodReadyTimeout) * time.Second, - } - unsupportedPVCs, err := pvMigrator.ValidateVolumeAccessModes(srcPVs) - if err != nil { - fmt.Printf("failed to validate volume access modes for destination storage class %s", options.DestSCName) - os.Exit(1) - } - - if len(unsupportedPVCs) != 0 { - PrintPVAccessModeErrors(unsupportedPVCs) - fmt.Printf("existing volumes have access modes not supported by the destination storage class %s", options.DestSCName) - os.Exit(2) - } - } - - // start the migration - if !dryRun { - err = Migrate(context.TODO(), output, clientset, options) - if err != nil { - fmt.Printf("%s\n", err.Error()) - os.Exit(1) - } - } + SourceSCName string + DestSCName string + RsyncImage string + Namespace string + SetDefaults bool + VerboseCopy bool + SkipSourceValidation bool + PvcCopyTimeout int + PodReadyTimeout int } // Migrate moves data and PVCs from one StorageClass to another @@ -177,7 +99,6 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, return nil } - type pvcCtx struct { claim *corev1.PersistentVolumeClaim usedByPod *corev1.Pod @@ -197,7 +118,6 @@ func (pvc pvcCtx) getNodeNameRef() string { return pvc.usedByPod.Spec.NodeName } - // swapDefaultStorageClasses attempts to set newDefaultSC as the default StorageClass // if oldDefaultSC was set as the default, then it will be unset first // if another StorageClass besides these two is currently the default, it will return an error diff --git a/pkg/migrate/migrate_test.go b/pkg/migrate/migrate_test.go index a7bd9e9..0b744e2 100644 --- a/pkg/migrate/migrate_test.go +++ b/pkg/migrate/migrate_test.go @@ -3,7 +3,6 @@ package migrate import ( "context" "fmt" - "io" "log" "strings" "testing" @@ -18,7 +17,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" k8sclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/rest" "k8s.io/utils/pointer" ) @@ -925,34 +923,6 @@ func TestGetPVCs(t *testing.T) { } } -func TestNewPVMigrator(t *testing.T) { - // test empty logger - _, err := NewPVMigrator(&rest.Config{}, nil, "src", "dst", 0) - if err == nil || err.Error() != "no logger provided" { - t.Errorf("expected failure creating object: %v", err) - } - - logger := log.New(io.Discard, "", 0) - - // test src storage class - _, err = NewPVMigrator(&rest.Config{}, logger, "", "dst", 0) - if err == nil || err.Error() != "empty source storage class" { - t.Errorf("expected failure creating object: %v", err) - } - - // test empty dst sc - _, err = NewPVMigrator(&rest.Config{}, logger, "src", "", 0) - if err == nil || err.Error() != "empty destination storage class" { - t.Errorf("expected failure creating object: %v", err) - } - - // happy path - _, err = NewPVMigrator(&rest.Config{}, logger, "src", "dst", 0) - if err != nil { - t.Errorf("unexpected failure creating object: %v", err) - } -} - func Test_createMigrationPod(t *testing.T) { type args struct { ns string @@ -3095,766 +3065,3 @@ func Test_copyAllPVCs(t *testing.T) { }) } } - -func Test_validateVolumeAccessModes(t *testing.T) { - for _, tt := range []struct { - name string - srcStorageClass string - dstStorageClass string - deletePVTimeout time.Duration - podReadyTimeout time.Duration - wantErr bool - resources []runtime.Object - input map[string]corev1.PersistentVolume - expected map[string]map[string]PVCError - }{ - { - name: "With compatible access modes, expect no errors", - input: map[string]corev1.PersistentVolume{ - "pv0": { - ObjectMeta: metav1.ObjectMeta{ - Name: "pv0", - }, - Spec: corev1.PersistentVolumeSpec{ - StorageClassName: "srcSc", - ClaimRef: &corev1.ObjectReference{ - Name: "pvc", - }, - }, - }, - }, - expected: make(map[string]map[string]PVCError), - srcStorageClass: "srcSc", - dstStorageClass: "dstSc", - resources: []runtime.Object{ - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "srcSc", - }, - }, - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dstSc", - }, - }, - &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - }, - }, - &corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pv0", - }, - Spec: corev1.PersistentVolumeSpec{ - StorageClassName: "srcSc", - ClaimRef: &corev1.ObjectReference{ - Name: "pvc", - }, - }, - }, - }, - }, - { - name: "When there is no matching pvc for a pv, expect error", - input: map[string]corev1.PersistentVolume{ - "pv0": { - ObjectMeta: metav1.ObjectMeta{ - Name: "pv0", - }, - Spec: corev1.PersistentVolumeSpec{ - StorageClassName: "srcSc", - ClaimRef: &corev1.ObjectReference{ // this pvc will not be found - Name: "pvc", - Namespace: "default", - }, - }, - }, - }, - expected: nil, - wantErr: true, - srcStorageClass: "srcSc", - dstStorageClass: "dstSc", - resources: []runtime.Object{ - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "srcSc", - }, - }, - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dstSc", - }, - Provisioner: "kubernetes.io/no-provisioner", - VolumeBindingMode: (*storagev1.VolumeBindingMode)(pointer.String("WaitForFirstConsumer")), - }, - &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "ns1", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("srcSc"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteAll"}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Mi"), - }, - }, - }, - }, - &corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pv0", - }, - Spec: corev1.PersistentVolumeSpec{ - StorageClassName: "srcSc", - ClaimRef: &corev1.ObjectReference{ - Name: "pvc", - Namespace: "ns1", - }, - }, - }, - }, - }, - { - name: "When destination storage class is not found, expect error", - input: map[string]corev1.PersistentVolume{ - "pv0": { - ObjectMeta: metav1.ObjectMeta{ - Name: "pv0", - }, - Spec: corev1.PersistentVolumeSpec{ - StorageClassName: "srcSc", - ClaimRef: &corev1.ObjectReference{ - Name: "pvc", - Namespace: "default", - }, - }, - }, - }, - expected: nil, - wantErr: true, - srcStorageClass: "srcSc", - dstStorageClass: "dstSc", - resources: []runtime.Object{ - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "srcSc", - }, - }, - }, - }, - } { - t.Run(tt.name, func(t *testing.T) { - req := require.New(t) - kcli := fake.NewSimpleClientset(tt.resources...) - testlog := log.New(testWriter{t: t}, "", 0) - pvm := PVMigrator{ - ctx: context.Background(), - log: testlog, - k8scli: kcli, - srcSc: tt.srcStorageClass, - dstSc: tt.dstStorageClass, - deletePVTimeout: 1 * time.Millisecond, - podReadyTimeout: 1 * time.Millisecond, - } - result, err := pvm.ValidateVolumeAccessModes(tt.input) - if err != nil { - if tt.wantErr { - req.Error(err) - } else { - req.NoError(err) - } - } - req.Equal(result, tt.expected) - }) - } -} - -func Test_getPvcError(t *testing.T) { - for _, tt := range []struct { - name string - srcStorageClass string - dstStorageClass string - deletePVTimeout time.Duration - podTimeout time.Duration - wantErr bool - resources []runtime.Object - input *corev1.PersistentVolumeClaim - expected PVCError - }{ - { - name: "When there is a PVC error, expect ProvisioningFailed event", - input: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "default", - UID: "12345", - }, - Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, - }, - expected: PVCError{ - reason: "ProvisioningFailed", - from: "kubernetes.io/no-provisioner", - message: "Only support ReadWriteOnce access mode", - }, - srcStorageClass: "srcSc", - dstStorageClass: "dstSc", - resources: []runtime.Object{ - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "srcSc", - }, - }, - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dstSc", - }, - }, - &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "default", - UID: "12345", - }, - Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, - }, - &corev1.EventList{ - Items: []corev1.Event{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-error-event", - Namespace: "default", - }, - InvolvedObject: corev1.ObjectReference{ - Kind: "PersistentVolumeClaim", - Namespace: "default", - Name: "pvc", - UID: "12345", - }, - Source: corev1.EventSource{Component: "kubernetes.io/no-provisioner"}, - Reason: "ProvisioningFailed", - Message: "Only support ReadWriteOnce access mode", - }, - }, - }, - }, - }, - { - name: "When PVC event reason is not ProvisioningFailed, expect error", - input: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "default", - UID: "12345", - }, - Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, - }, - expected: PVCError{}, - wantErr: true, - srcStorageClass: "srcSc", - dstStorageClass: "dstSc", - resources: []runtime.Object{ - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "srcSc", - }, - }, - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dstSc", - }, - }, - &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "default", - UID: "12345", - }, - Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, - }, - &corev1.EventList{ - Items: []corev1.Event{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-error-event", - Namespace: "default", - }, - InvolvedObject: corev1.ObjectReference{ - Kind: "PersistentVolumeClaim", - Namespace: "default", - Name: "pvc", - UID: "12345", - }, - Source: corev1.EventSource{Component: "kubernetes.io/no-provisioner"}, - Reason: "Provisioning", - Message: "External provisioner is provisiong volume for claim pvc", - }, - }, - }, - }, - }, - { - name: "When PVC is pending due to an error but there are no events for it, expect error", - input: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "default", - UID: "12345", - }, - Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, - }, - expected: PVCError{}, - wantErr: true, - srcStorageClass: "srcSc", - dstStorageClass: "dstSc", - resources: []runtime.Object{ - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "srcSc", - }, - }, - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dstSc", - }, - }, - &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "default", - UID: "12345", - }, - Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, - }, - }, - }, - { - name: "When PVC is not in Pending status, expect error", - input: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "default", - UID: "12345", - }, - Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimBound}, - }, - expected: PVCError{}, - wantErr: true, - srcStorageClass: "srcSc", - dstStorageClass: "dstSc", - resources: []runtime.Object{}, - }, - } { - t.Run(tt.name, func(t *testing.T) { - req := require.New(t) - kcli := fake.NewSimpleClientset(tt.resources...) - testlog := log.New(testWriter{t: t}, "", 0) - pvm := PVMigrator{ - ctx: context.Background(), - log: testlog, - k8scli: kcli, - srcSc: tt.srcStorageClass, - dstSc: tt.dstStorageClass, - deletePVTimeout: 1 * time.Millisecond, - podReadyTimeout: 1 * time.Millisecond, - } - result, err := pvm.getPvcError(tt.input) - if err != nil { - if tt.wantErr { - req.Error(err) - } else { - req.NoError(err) - } - } - req.Equal(tt.expected, result) - }) - } -} - -func Test_checkVolumeAccessModes(t *testing.T) { - for _, tt := range []struct { - name string - srcStorageClass string - dstStorageClass string - deletePVTimeout time.Duration - podTimeout time.Duration - wantErr bool - resources []runtime.Object - input *corev1.PersistentVolumeClaim - expected PVCError - }{ - { - name: "When the PVC access mode is not supported by destination storage provider, expect PVCError", - input: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", - Namespace: "default", - UID: "12345", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("srcSc"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, - }, - Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, - }, - expected: PVCError{ - reason: "ProvisioningFailed", - from: "kubernetes.io/no-provisioner", - message: "Only support ReadWriteOnce access mode", - }, - srcStorageClass: "srcSc", - dstStorageClass: "dstSc", - resources: []runtime.Object{ - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "srcSc", - }, - }, - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dstSc", - }, - }, - &corev1.EventList{ - Items: []corev1.Event{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc-error-event", - Namespace: "default", - }, - InvolvedObject: corev1.ObjectReference{ - Kind: "PersistentVolumeClaim", - Namespace: "default", - UID: "12345", - }, - Source: corev1.EventSource{Component: "kubernetes.io/no-provisioner"}, - Reason: "ProvisioningFailed", - Message: "Only support ReadWriteOnce access mode", - }, - }, - }, - &corev1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "tmpPod", - Namespace: "default", - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Containers: []corev1.Container{ - { - Name: "busybox", - Image: "busybox", - Command: []string{ - "sleep", - "3600", - }, - }, - }, - }, - Status: corev1.PodStatus{Phase: corev1.PodPending}, - }, - }, - }, - } { - t.Run(tt.name, func(t *testing.T) { - req := require.New(t) - kcli := fake.NewSimpleClientset(tt.resources...) - testlog := log.New(testWriter{t: t}, "", 0) - pvm := PVMigrator{ - ctx: context.Background(), - log: testlog, - k8scli: kcli, - srcSc: tt.srcStorageClass, - dstSc: tt.dstStorageClass, - deletePVTimeout: 1 * time.Millisecond, - podReadyTimeout: 1 * time.Millisecond, - podNameOverride: "tmpPod", - } - result, err := pvm.checkVolumeAccessModes(*tt.input) - if err != nil { - if tt.wantErr { - req.Error(err) - } else { - req.NoError(err) - } - } - req.Equal(tt.expected, result) - }) - } -} - -func Test_buildTmpPVC(t *testing.T) { - for _, tt := range []struct { - name string - pvcNameOverride string - dstStorageClass string - input *corev1.PersistentVolumeClaim - expectedPVC *corev1.PersistentVolumeClaim - expectedName string - }{ - { - name: "When PVC name is not overridden, expect unique temp pvc name", - input: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pvc", - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("dstSc"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, - }, - }, - expectedPVC: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("dstSc"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Mi"), - }, - }, - }, - }, - expectedName: "pvmigrate-claim-test-pvc-", - dstStorageClass: "dstSc", - }, - { - name: "When PVC name is longer than 63 chars, expect name to be trimmed", - input: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "really-long-pvc-name-that-should-be-trimmed-to-avoid-an-error", - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("dstSc"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, - }, - }, - expectedPVC: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("dstSc"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Mi"), - }, - }, - }, - }, - expectedName: "pvmigrate-claim-really-long-pvc-trimmed-to-avoid-an-error-", - dstStorageClass: "dstSc", - }, - { - name: "When PVC name is overriden, expect non-UID generated name", - input: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-test-pvc", - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("dstSc"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, - }, - }, - expectedPVC: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("dstSc"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Mi"), - }, - }, - }, - }, - pvcNameOverride: "pvc-name-override", - expectedName: "pvc-name-override", - dstStorageClass: "dstSc", - }, - } { - t.Run(tt.name, func(t *testing.T) { - req := require.New(t) - var pvcNameOverride string - if tt.pvcNameOverride != "" { - pvcNameOverride = tt.pvcNameOverride - } - pvm := PVMigrator{ - dstSc: tt.dstStorageClass, - pvcNameOverride: pvcNameOverride, - } - pvc := pvm.buildTmpPVC(*tt.input, tt.dstStorageClass) - req.True(strings.HasPrefix(pvc.Name, tt.expectedName)) - req.Equal(tt.expectedPVC.Spec, pvc.Spec) - }) - } -} - -func Test_buildPVConsumerPod(t *testing.T) { - for _, tt := range []struct { - name string - podNameOverride string - pvcName string - expectedPod *corev1.Pod - expectedName string - }{ - { - name: "When pod name not overriden, expect unique pod name", - pvcName: "test-pvc", - expectedPod: &corev1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Volumes: []corev1.Volume{ - { - Name: "tmp", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "test-pvc", - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "busybox", - Image: "busybox", - Command: []string{ - "sleep", - "3600", - }, - VolumeMounts: []corev1.VolumeMount{ - { - MountPath: "/tmpmount", - Name: "tmp", - }, - }, - }, - }, - }, - }, - expectedName: "pvmigrate-vol-consumer-test-pvc-", - }, - { - name: "When pod name is longer than 63 chars, expect pod name to be trimmed", - pvcName: "pvc-name-that-should-be-trimmed-because-it-will-cause-an-err", - expectedPod: &corev1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Volumes: []corev1.Volume{ - { - Name: "tmp", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvc-name-that-should-be-trimmed-because-it-will-cause-an-err", - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "busybox", - Image: "busybox", - Command: []string{ - "sleep", - "3600", - }, - VolumeMounts: []corev1.VolumeMount{ - { - MountPath: "/tmpmount", - Name: "tmp", - }, - }, - }, - }, - }, - }, - expectedName: "pvmigrate-vol-consumer-pvc-namecause-it-will-cause-an-err-", - }, - { - name: "When pod name is overriden, expect non-UID name", - pvcName: "test-pvc", - expectedPod: &corev1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Volumes: []corev1.Volume{ - { - Name: "tmp", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "test-pvc", - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "busybox", - Image: "busybox", - Command: []string{ - "sleep", - "3600", - }, - VolumeMounts: []corev1.VolumeMount{ - { - MountPath: "/tmpmount", - Name: "tmp", - }, - }, - }, - }, - }, - }, - podNameOverride: "my pod name override", - expectedName: "my pod name override", - }, - } { - t.Run(tt.name, func(t *testing.T) { - req := require.New(t) - var podNameOverride string - if tt.podNameOverride != "" { - podNameOverride = tt.podNameOverride - } - pvm := PVMigrator{ - podNameOverride: podNameOverride, - } - pod := pvm.buildPVConsumerPod(tt.pvcName) - req.True(strings.HasPrefix(pod.Name, tt.expectedName)) - req.Equal(tt.expectedPod.Spec, pod.Spec) - }) - } -} diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go index 260a7ff..634b13d 100644 --- a/pkg/preflight/validate.go +++ b/pkg/preflight/validate.go @@ -153,7 +153,7 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent } } -// checkVolumeAccessModeValid checks if the access modes of a pv are supported by the +// checkVolumeAccessModes checks if the access modes of a pv are supported by the // destination storage class. func checkVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient.Interface, dstSC string, pvc corev1.PersistentVolumeClaim, timeout time.Duration) (*pvcFailure, error) { var err error @@ -294,7 +294,7 @@ func deletePVConsumerPod(client k8sclient.Interface, pod *corev1.Pod) error { return nil } -// getPvcError returns the error event for why a PVC is in Pending status +// getPvcError returns the failure event for why a PVC is in Pending status func getPvcError(client k8sclient.Interface, pvc *corev1.PersistentVolumeClaim) (*pvcFailure, error) { // no need to inspect pvc if pvc.Status.Phase != corev1.ClaimPending { diff --git a/pkg/preflight/validate_test.go b/pkg/preflight/validate_test.go new file mode 100644 index 0000000..c7d4a65 --- /dev/null +++ b/pkg/preflight/validate_test.go @@ -0,0 +1,971 @@ +package preflight + +import ( + "context" + "io" + "log" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/utils/pointer" +) + +func Test_validateVolumeAccessModes(t *testing.T) { + for _, tt := range []struct { + name string + dstSC string + podReadyTimeout time.Duration + wantErr bool + resources []runtime.Object + input map[string]corev1.PersistentVolumeClaim + expected map[string]map[string]pvcFailure + }{ + { + name: "With compatible access modes, expect no validation failures", + input: map[string]corev1.PersistentVolumeClaim{ + "pvc0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc0", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("default"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + "pvc1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc1", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("default"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + }, + expected: make(map[string]map[string]pvcFailure), + dstSC: "dstSc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dstSc", + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "srcSc", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc", + }, + }, + }, + }, + }, + { + name: "When destination storage class is not found, expect error", + wantErr: true, + dstSC: "dstSc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + kcli := fake.NewSimpleClientset(tt.resources...) + logger := log.New(io.Discard, "", 0) + result, err := validateVolumeAccessModes(context.Background(), logger, kcli, tt.dstSC, tt.podReadyTimeout, tt.input) + if err != nil { + if tt.wantErr { + req.Error(err) + } else { + req.NoError(err) + } + } + req.Equal(result, tt.expected) + }) + } +} + +func Test_getPvcError(t *testing.T) { + for _, tt := range []struct { + name string + wantErr bool + resources []runtime.Object + input *corev1.PersistentVolumeClaim + expected *pvcFailure + }{ + { + name: "When there is a PVC failure, expect ProvisioningFailed event", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + }, + expected: &pvcFailure{ + reason: "ProvisioningFailed", + from: "kubernetes.io/no-provisioner", + message: "Only support ReadWriteOnce access mode", + }, + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dstSc", + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, + &corev1.EventList{ + Items: []corev1.Event{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-error-event", + Namespace: "default", + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "PersistentVolumeClaim", + Namespace: "default", + Name: "pvc", + UID: "12345", + }, + Source: corev1.EventSource{Component: "kubernetes.io/no-provisioner"}, + Reason: "ProvisioningFailed", + Message: "Only support ReadWriteOnce access mode", + }, + }, + }, + }, + }, + { + name: "When PVC event failure reason is not ProvisioningFailed, expect error", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + }, + expected: &pvcFailure{}, + wantErr: true, + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dstSc", + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, + &corev1.EventList{ + Items: []corev1.Event{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-error-event", + Namespace: "default", + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "PersistentVolumeClaim", + Namespace: "default", + Name: "pvc", + UID: "12345", + }, + Source: corev1.EventSource{Component: "kubernetes.io/no-provisioner"}, + Reason: "Provisioning", + Message: "External provisioner is provisiong volume for claim pvc", + }, + }, + }, + }, + }, + { + name: "When PVC is pending due to a failure but there are no events for it, expect error", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, + expected: &pvcFailure{}, + wantErr: true, + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dstSc", + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, + }, + }, + { + name: "When PVC is not in Pending status, expect error", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimBound}, + }, + expected: &pvcFailure{}, + wantErr: true, + resources: []runtime.Object{}, + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + kcli := fake.NewSimpleClientset(tt.resources...) + result, err := getPvcError(kcli, tt.input) + if err != nil { + if tt.wantErr { + req.Error(err) + } else { + req.NoError(err) + } + } + req.Equal(tt.expected, result) + }) + } +} + +func Test_checkVolumeAccessModes(t *testing.T) { + for _, tt := range []struct { + name string + srcStorageClass string + dstStorageClass string + deletePVTimeout time.Duration + podTimeout time.Duration + wantErr bool + resources []runtime.Object + input *corev1.PersistentVolumeClaim + expected pvcFailure + }{ + { + name: "When the PVC access mode is not supported by destination storage provider, expect PVCError", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + UID: "12345", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("srcSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, + expected: pvcFailure{ + reason: "ProvisioningFailed", + from: "kubernetes.io/no-provisioner", + message: "Only support ReadWriteOnce access mode", + }, + srcStorageClass: "srcSc", + dstStorageClass: "dstSc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "srcSc", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dstSc", + }, + }, + &corev1.EventList{ + Items: []corev1.Event{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-error-event", + Namespace: "default", + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "PersistentVolumeClaim", + Namespace: "default", + UID: "12345", + }, + Source: corev1.EventSource{Component: "kubernetes.io/no-provisioner"}, + Reason: "ProvisioningFailed", + Message: "Only support ReadWriteOnce access mode", + }, + }, + }, + &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "tmpPod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + Command: []string{ + "sleep", + "3600", + }, + }, + }, + }, + Status: corev1.PodStatus{Phase: corev1.PodPending}, + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + kcli := fake.NewSimpleClientset(tt.resources...) + logger := log.New(io.Discard, "", 0) + result, err := checkVolumeAccessModes(context.Background(), logger, kcli, tt.dstStorageClass, *tt.input, tt.podTimeout) + if err != nil { + if tt.wantErr { + req.Error(err) + } else { + req.NoError(err) + } + } + req.Equal(tt.expected, result) + }) + } +} + +func Test_buildTmpPVC(t *testing.T) { + for _, tt := range []struct { + name string + pvcNameOverride string + dstStorageClass string + input *corev1.PersistentVolumeClaim + expectedPVC *corev1.PersistentVolumeClaim + expectedName string + }{ + { + name: "When PVC name is not overridden, expect unique temp pvc name", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + }, + }, + expectedPVC: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + expectedName: "pvmigrate-preflight-test-pvc", + dstStorageClass: "dstSc", + }, + { + name: "When PVC name is longer than 63 chars, expect name to be trimmed", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "really-long-pvc-name-that-should-be-trimmed-to-avoid-an-error", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + }, + }, + expectedPVC: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + expectedName: "pvmigrate-claim-really-long-pvc-trimmed-to-avoid-an-error-", + dstStorageClass: "dstSc", + }, + { + name: "When PVC name is overriden, expect non-UID generated name", + input: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-test-pvc", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + }, + }, + expectedPVC: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + pvcNameOverride: "pvc-name-override", + expectedName: "pvc-name-override", + dstStorageClass: "dstSc", + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + pvc := buildTmpPVC(*tt.input, tt.dstStorageClass) + req.True(strings.HasPrefix(pvc.Name, tt.expectedName)) + req.Equal(tt.expectedPVC.Spec, pvc.Spec) + }) + } +} + +func Test_buildPVCConsumerPod(t *testing.T) { + for _, tt := range []struct { + name string + namespace string + podNameOverride string + pvcName string + expectedPod *corev1.Pod + expectedName string + }{ + { + name: "When pod name not overriden, expect unique pod name", + pvcName: "test-pvc", + expectedPod: &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Volumes: []corev1.Volume{ + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc", + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + Command: []string{ + "sleep", + "3600", + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/tmpmount", + Name: "tmp", + }, + }, + }, + }, + }, + }, + expectedName: "pvmigrate-vol-consumer-test-pvc-", + }, + { + name: "When pod name is longer than 63 chars, expect pod name to be trimmed", + pvcName: "pvc-name-that-should-be-trimmed-because-it-will-cause-an-err", + expectedPod: &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Volumes: []corev1.Volume{ + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-name-that-should-be-trimmed-because-it-will-cause-an-err", + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + Command: []string{ + "sleep", + "3600", + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/tmpmount", + Name: "tmp", + }, + }, + }, + }, + }, + }, + expectedName: "pvmigrate-vol-consumer-pvc-namecause-it-will-cause-an-err-", + }, + { + name: "When pod name is overriden, expect non-UID name", + pvcName: "test-pvc", + expectedPod: &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Volumes: []corev1.Volume{ + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc", + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + Command: []string{ + "sleep", + "3600", + }, + VolumeMounts: []corev1.VolumeMount{ + { + MountPath: "/tmpmount", + Name: "tmp", + }, + }, + }, + }, + }, + }, + podNameOverride: "my pod name override", + expectedName: "my pod name override", + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + pod := buildPVCConsumerPod(tt.pvcName, tt.namespace) + req.True(strings.HasPrefix(pod.Name, tt.expectedName)) + req.Equal(tt.expectedPod.Spec, pod.Spec) + }) + } +} + +func Test_pvcsForStorageClass(t *testing.T) { + for _, tt := range []struct { + name string + scname string + namespace string + wantErr bool + resources []runtime.Object + expected map[string]corev1.PersistentVolumeClaim + }{ + { + name: "When storage class is not found, expect error", + scname: "i-dont-exit", + namespace: "default", + wantErr: true, + }, + { + name: "When volumes and storage classes exist and namespace is set, expect pvcs for that particular namespace only", + scname: "default", + namespace: "default", + expected: map[string]corev1.PersistentVolumeClaim{ + "pvc0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc0", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("default"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + }, + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rook", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc0", + Namespace: "default", + }, + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv1", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "rook", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc1", + Namespace: "test", + }, + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc0", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("default"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc1", + Namespace: "test", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("rook"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + }, + }, + { + name: "When volumes and storage classes exist and namespace is NOT set, expect pvcs for all namespaces", + scname: "default", + namespace: "", + expected: map[string]corev1.PersistentVolumeClaim{ + "pvc0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc0", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("default"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + "pvc1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc1", + Namespace: "test", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("rook"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + }, + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rook", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc0", + Namespace: "default", + }, + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv1", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "rook", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc1", + Namespace: "test", + }, + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc0", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("default"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc1", + Namespace: "test", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("rook"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + }, + }, + { + name: "When PV does not have an associated PVC, expect error", + scname: "default", + namespace: "default", + expected: map[string]corev1.PersistentVolumeClaim{ + "pvc0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc0", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("default"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + }, + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "rook", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv0", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv1", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "rook", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc1", + Namespace: "test", + }, + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc0", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("default"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc1", + Namespace: "test", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("rook"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + kcli := fake.NewSimpleClientset(tt.resources...) + logger := log.New(io.Discard, "", 0) + result, err := pvcsForStorageClass(context.Background(), logger, kcli, tt.scname, tt.namespace) + if err != nil { + if tt.wantErr { + req.Error(err) + } else { + req.NoError(err) + } + } + req.Equal(tt.expected, result) + }) + } +} From 83334387cfadd3106aa9d344f3743260a576b134 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 2 Dec 2022 14:17:08 -0600 Subject: [PATCH 22/41] WIP: More PR review refactor --- cmd/main.go | 2 +- pkg/k8sutil/label.go | 13 ++ pkg/k8sutil/label_test.go | 36 ++++ pkg/migrate/migrate.go | 73 +++---- pkg/migrate/migrate_test.go | 2 +- pkg/preflight/validate.go | 65 +++--- pkg/preflight/validate_test.go | 348 +++++++++++---------------------- 7 files changed, 228 insertions(+), 311 deletions(-) create mode 100644 pkg/k8sutil/label.go create mode 100644 pkg/k8sutil/label_test.go diff --git a/cmd/main.go b/cmd/main.go index 284f421..cf29aa8 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -33,7 +33,7 @@ func main() { flag.IntVar(&options.PvcCopyTimeout, "pvc-copy-timeout", 300, "length of time to wait (in seconds) when transferring data from the source to the destination storage volume") flag.IntVar(&options.PodReadyTimeout, "pod-ready-timeout", 60, "length of time to wait (in seconds) for volume validation pod(s) to go into Ready phase") flag.BoolVar(&skipPreflightValidation, "skip-preflight-validation", false, "skip the volume access modes validation on the destination storage provider") - flag.BoolVar(&preflightValidationOnly, "preflight-validation-only", false, "skip the migrate and run preflight validation") + flag.BoolVar(&preflightValidationOnly, "preflight-validation-only", false, "skip the migration and run preflight validation only") flag.Parse() diff --git a/pkg/k8sutil/label.go b/pkg/k8sutil/label.go new file mode 100644 index 0000000..00ea693 --- /dev/null +++ b/pkg/k8sutil/label.go @@ -0,0 +1,13 @@ +package k8sutil + +import "fmt" + +// NewPrefixedName returns a name prefixed by prefix and with length that is no longer than 63 +// chars +func NewPrefixedName(prefix, original string) string { + newName := fmt.Sprintf("%s-%s", prefix, original) + if len(newName) > 63 { + newName = newName[0:31] + newName[len(newName)-32:] + } + return newName +} diff --git a/pkg/k8sutil/label_test.go b/pkg/k8sutil/label_test.go new file mode 100644 index 0000000..a00bb94 --- /dev/null +++ b/pkg/k8sutil/label_test.go @@ -0,0 +1,36 @@ +package k8sutil + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewPrefixedName(t *testing.T) { + tests := []struct { + name string + originalName string + prefix string + want string + }{ + { + name: "when name is < 63 chars expect new name to be prefixed", + originalName: "abc", + prefix: "pvcmigrate", + want: "pvcmigrate-abc", + }, + { + name: "when name is > 63 chars expect new name to be prefixed and 63 chars long", + originalName: "this label will exceed its allowed length and than be truncated", + prefix: "pvcmigrate", + want: "pvcmigrate-this label will excewed length and than be truncated", + }, + } + for _, tt := range tests { + t.Run(tt.originalName, func(t *testing.T) { + req := require.New(t) + got := NewPrefixedName(tt.prefix, tt.originalName) + req.Equal(tt.want, got) + }) + } +} diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index c36628b..344e6e7 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -40,15 +40,15 @@ var isDestScLocalVolumeProvisioner bool // Options is the set of options that should be provided to Migrate type Options struct { - SourceSCName string - DestSCName string - RsyncImage string - Namespace string - SetDefaults bool - VerboseCopy bool - SkipSourceValidation bool - PvcCopyTimeout int - PodReadyTimeout int + SourceSCName string + DestSCName string + RsyncImage string + Namespace string + SetDefaults bool + VerboseCopy bool + SkipSourceValidation bool + PvcCopyTimeout int + PodReadyTimeout int } // Migrate moves data and PVCs from one StorageClass to another @@ -68,7 +68,7 @@ func Migrate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, return fmt.Errorf("failed to scale down pods: %w", err) } - err = copyAllPVCs(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.RsyncImage, updatedMatchingPVCs, options.VerboseCopy, time.Duration(options.PvcCopyTimeout)*time.Second) + err = copyAllPVCs(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.RsyncImage, updatedMatchingPVCs, options.VerboseCopy, time.Second) if err != nil { return err } @@ -184,7 +184,7 @@ func swapDefaultStorageClasses(ctx context.Context, w *log.Logger, clientset k8s return nil } -func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, sourceSCName string, destSCName string, rsyncImage string, matchingPVCs map[string][]pvcCtx, verboseCopy bool, timeout time.Duration) error { +func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, sourceSCName string, destSCName string, rsyncImage string, matchingPVCs map[string][]pvcCtx, verboseCopy bool, waitTime time.Duration) error { // create a pod for each PVC migration, and wait for it to finish w.Printf("\nCopying data from %s PVCs to %s PVCs\n", sourceSCName, destSCName) for ns, nsPvcs := range matchingPVCs { @@ -192,10 +192,7 @@ func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interfa sourcePvcName, destPvcName := nsPvc.claim.Name, newPvcName(nsPvc.claim.Name) w.Printf("Copying data from %s (%s) to %s in %s\n", sourcePvcName, nsPvc.claim.Spec.VolumeName, destPvcName, ns) - // setup timeout - timeoutCtx, cancelCtx := context.WithTimeout(ctx, timeout) - defer cancelCtx() - err := copyOnePVC(timeoutCtx, w, clientset, ns, sourcePvcName, destPvcName, rsyncImage, verboseCopy, nsPvc.getNodeNameRef(), timeout) + err := copyOnePVC(ctx, w, clientset, ns, sourcePvcName, destPvcName, rsyncImage, verboseCopy, waitTime, nsPvc.getNodeNameRef()) if err != nil { return fmt.Errorf("failed to copy PVC %s in %s: %w", nsPvc.claim.Name, ns, err) } @@ -204,7 +201,7 @@ func copyAllPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interfa return nil } -func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, rsyncImage string, verboseCopy bool, nodeName string, timeout time.Duration) error { +func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, ns string, sourcePvcName string, destPvcName string, rsyncImage string, verboseCopy bool, waitTime time.Duration, nodeName string) error { w.Printf("Creating pvc migrator pod on node %s\n", nodeName) createdPod, err := createMigrationPod(ctx, clientset, ns, sourcePvcName, destPvcName, rsyncImage, nodeName) if err != nil { @@ -220,27 +217,26 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac } }() - migrationTimeout := time.NewTicker(timeout) - waitInterval := time.NewTicker(1 * time.Second) - defer migrationTimeout.Stop() - defer waitInterval.Stop() + // wait for the pod to be created + time.Sleep(waitTime) for { gotPod, err := clientset.CoreV1().Pods(ns).Get(ctx, createdPod.Name, metav1.GetOptions{}) if err != nil { w.Printf("failed to get newly created migration pod %s: %v\n", createdPod.Name, err) - } else if gotPod.Status.Phase == corev1.PodRunning || gotPod.Status.Phase == corev1.PodSucceeded { + continue + } + + if gotPod.Status.Phase == corev1.PodPending { + time.Sleep(waitTime) + continue + } + + if gotPod.Status.Phase == corev1.PodRunning || gotPod.Status.Phase == corev1.PodSucceeded { // time to get logs break } w.Printf("got status %s for pod %s, this is likely an error\n", gotPod.Status.Phase, gotPod.Name) - - select { - case <-waitInterval.C: - continue - case <-migrationTimeout.C: - return fmt.Errorf("migration pod %s failed to go into Running phase: timedout", createdPod.Name) - } } w.Printf("migrating PVC %s:\n", sourcePvcName) @@ -259,16 +255,15 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac gotPod, err := clientset.CoreV1().Pods(ns).Get(ctx, createdPod.Name, metav1.GetOptions{}) if err != nil { w.Printf("failed to check status of newly created migration pod %s: %v\n", createdPod.Name, err) - } else if gotPod.Status.Phase != corev1.PodRunning { + continue + } + if gotPod.Status.Phase != corev1.PodRunning { // if the pod is not running, go to the "validate success" section break } - } - select { - case <-waitInterval.C: - continue - case <-migrationTimeout.C: - return fmt.Errorf("failed to get logs for migration container %s: timedout", pvMigrateContainerName) + + // if the pod is running, wait to see if getting logs works in a few seconds + time.Sleep(waitTime) } } @@ -316,12 +311,7 @@ func copyOnePVC(ctx context.Context, w *log.Logger, clientset k8sclient.Interfac return fmt.Errorf("logs for the migration pod %s in %s ended, but the status was %s and not succeeded", createdPod.Name, ns, gotPod.Status.Phase) } - select { - case <-waitInterval.C: - continue - case <-migrationTimeout.C: - return fmt.Errorf("could not determine if migration pod %s succeeded: timedout", createdPod.Name) - } + time.Sleep(waitTime) } w.Printf("finished migrating PVC %s\n", sourcePvcName) @@ -548,6 +538,7 @@ func getPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, return matchingPVCs, pvcNamespaces, nil } +// TODO: move this to the validation package func validateStorageClasses(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, sourceSCName string, destSCName string, skipSourceValidation bool) error { // get storage providers storageClasses, err := clientset.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) diff --git a/pkg/migrate/migrate_test.go b/pkg/migrate/migrate_test.go index 0b744e2..591711b 100644 --- a/pkg/migrate/migrate_test.go +++ b/pkg/migrate/migrate_test.go @@ -3055,7 +3055,7 @@ func Test_copyAllPVCs(t *testing.T) { } }(testCtx, testlog, clientset, tt.events) - err := copyAllPVCs(testCtx, testlog, clientset, "sourcesc", "destsc", "testrsyncimage", tt.matchingPVCs, false, time.Second*4) + err := copyAllPVCs(testCtx, testlog, clientset, "sourcesc", "destsc", "testrsyncimage", tt.matchingPVCs, false, time.Millisecond*10) if tt.wantErr { req.Error(err) testlog.Printf("got expected error %q", err.Error()) diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go index 634b13d..4d482b5 100644 --- a/pkg/preflight/validate.go +++ b/pkg/preflight/validate.go @@ -19,6 +19,11 @@ import ( k8spodutils "k8s.io/kubernetes/pkg/api/v1/pod" ) +const ( + pvcNamePrefix = "pf-pvc" + podNamePrefix = "pvmigrate-pf-pod" +) + type pvcFailure struct { reason string from string @@ -34,12 +39,12 @@ type ValidationFailure struct { // Validate runs preflight check on storage volumes returning a list of failures func Validate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, options migrate.Options) ([]ValidationFailure, error) { - // validate access modes for all PVCs using the d source storage class + // validate access modes for all PVCs using the source storage class pvcs, err := pvcsForStorageClass(ctx, w, clientset, options.SourceSCName, options.Namespace) if err != nil { return nil, fmt.Errorf("failed to get PVCs for storage %s: %s", options.SourceSCName, err) } - pvcAccesModeFailures, err := validateVolumeAccessModes(ctx, w, clientset, options.DestSCName, time.Duration(options.PodReadyTimeout), pvcs) + pvcAccesModeFailures, err := validateVolumeAccessModes(ctx, w, clientset, options.DestSCName, options.RsyncImage, time.Duration(options.PodReadyTimeout), pvcs) if err != nil { return nil, fmt.Errorf("failed validate PVC access modes: %s", err) } @@ -50,8 +55,8 @@ func Validate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, func PrintValidationFailures(stream io.Writer, failures []ValidationFailure) { tw := tabwriter.NewWriter(stream, 0, 8, 8, '\t', 0) fmt.Fprintf(tw, "The following persistent volume claims cannot be migrated:\n\n") - fmt.Fprintln(tw, "NAMESPACE\tPVC\tSOURCE\tMESSAGE") - fmt.Fprintf(tw, "---------\t---\t------\t-------\n") + fmt.Fprintln(tw, "NAMESPACE\tRESOURCE\tSOURCE\tMESSAGE") + fmt.Fprintf(tw, "---------\t--------\t------\t-------\n") for _, failure := range failures { fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", failure.Namespace, failure.Resource, failure.Source, failure.Message) } @@ -71,7 +76,7 @@ func toValidationFailures(pvcFailures map[string]map[string]pvcFailure) []Valida // validateVolumeAccessModes checks whether the provided persistent volumes support the access modes // of the destination storage class. // returns a map of pvc validation failures indexed by namespace -func validateVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient.Interface, dstSC string, podReadyTimeout time.Duration, pvcs map[string]corev1.PersistentVolumeClaim) (map[string]map[string]pvcFailure, error) { +func validateVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient.Interface, dstSC string, tmpPodImage string, podReadyTimeout time.Duration, pvcs map[string]corev1.PersistentVolumeClaim) (map[string]map[string]pvcFailure, error) { volAccessModeFailures := make(map[string]map[string]pvcFailure) if _, err := client.StorageV1().StorageClasses().Get(ctx, dstSC, metav1.GetOptions{}); err != nil { @@ -79,7 +84,7 @@ func validateVolumeAccessModes(ctx context.Context, l *log.Logger, client k8scli } for _, pvc := range pvcs { - v, err := checkVolumeAccessModes(ctx, l, client, dstSC, pvc, podReadyTimeout) + v, err := checkVolumeAccessModes(ctx, l, client, dstSC, pvc, podReadyTimeout, tmpPodImage) if err != nil { l.Printf("failed to check volume access mode for PVC %s: %s", pvc.Name, err) continue @@ -91,15 +96,15 @@ func validateVolumeAccessModes(ctx context.Context, l *log.Logger, client k8scli return volAccessModeFailures, nil } -// buildPVCConsumerPod creates a pod spec for consuming a pvc -func buildPVCConsumerPod(pvcName, namespace string) *corev1.Pod { +// buildTmpPVCConsumerPod creates a pod spec for consuming a pvc +func buildTmpPVCConsumerPod(pvcName, namespace, image string) *corev1.Pod { return &corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: newK8sName("pvmigrate-preflight-pvc-consumer", pvcName), + Name: k8sutil.NewPrefixedName(podNamePrefix, pvcName), Namespace: namespace, }, Spec: corev1.PodSpec{ @@ -116,8 +121,8 @@ func buildPVCConsumerPod(pvcName, namespace string) *corev1.Pod { }, Containers: []corev1.Container{ { - Name: "busybox", - Image: "busybox", + Name: "sleep", + Image: image, Command: []string{ "sleep", "3600", @@ -138,7 +143,7 @@ func buildPVCConsumerPod(pvcName, namespace string) *corev1.Pod { func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.PersistentVolumeClaim { return &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ - Name: newK8sName("pvmigrate-preflight", pvc.Name), + Name: k8sutil.NewPrefixedName(pvcNamePrefix, pvc.Name), Namespace: pvc.Namespace, }, Spec: corev1.PersistentVolumeClaimSpec{ @@ -155,22 +160,30 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent // checkVolumeAccessModes checks if the access modes of a pv are supported by the // destination storage class. -func checkVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient.Interface, dstSC string, pvc corev1.PersistentVolumeClaim, timeout time.Duration) (*pvcFailure, error) { +func checkVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient.Interface, dstSC string, pvc corev1.PersistentVolumeClaim, timeout time.Duration, tmpPodImage string) (*pvcFailure, error) { var err error // create temp pvc for storage class - tmpPVC := buildTmpPVC(pvc, dstSC) - if tmpPVC, err = client.CoreV1().PersistentVolumeClaims("default").Create( - ctx, tmpPVC, metav1.CreateOptions{}, - ); err != nil { - return nil, fmt.Errorf("failed to create temporary pvc: %w", err) + tmpPVCSpec := buildTmpPVC(pvc, dstSC) + tmpPVC, err := client.CoreV1().PersistentVolumeClaims(tmpPVCSpec.Namespace).Create( + ctx, tmpPVCSpec, metav1.CreateOptions{}) + if err != nil { + if !k8serrors.IsAlreadyExists(err) { + return nil, fmt.Errorf("failed to create temporary pvc %s: %w", tmpPVCSpec.Name, err) + } + + // PVC exist, get it + tmpPVC, err = client.CoreV1().PersistentVolumeClaims(tmpPVCSpec.Namespace).Get(ctx, tmpPVCSpec.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get existing temp PVC %s: %w", tmpPVCSpec.Name, err) + } } // consume pvc to determine any access mode errors - pvcConsumerPodSpec := buildPVCConsumerPod(tmpPVC.Name, tmpPVC.Namespace) - pvcConsumerPod, err := client.CoreV1().Pods(pvcConsumerPodSpec.Namespace).Create(ctx, pvcConsumerPodSpec, metav1.CreateOptions{}) - if err != nil && !k8serrors.IsAlreadyExists(err) { - return nil, err + pvcConsumerPodSpec := buildTmpPVCConsumerPod(tmpPVC.Name, tmpPVC.Namespace, tmpPodImage) + pvcConsumerPod, err := client.CoreV1().Pods(tmpPVC.Namespace).Create(ctx, pvcConsumerPodSpec, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to create validation pod %s: %s", pvcConsumerPodSpec.Name, err) } // cleanup pvc and pod at the end @@ -340,11 +353,3 @@ func pvcsForStorageClass(ctx context.Context, l *log.Logger, client k8sclient.In } return srcPVCs, nil } - -func newK8sName(prefix, original string) string { - newName := fmt.Sprintf("%s-%s", prefix, original) - if len(newName) > 63 { - newName = newName[0:31] + newName[len(newName)-32:] - } - return newName -} diff --git a/pkg/preflight/validate_test.go b/pkg/preflight/validate_test.go index c7d4a65..97a1485 100644 --- a/pkg/preflight/validate_test.go +++ b/pkg/preflight/validate_test.go @@ -4,7 +4,7 @@ import ( "context" "io" "log" - "strings" + "os" "testing" "time" @@ -14,6 +14,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + k8sclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" "k8s.io/utils/pointer" ) @@ -110,7 +111,7 @@ func Test_validateVolumeAccessModes(t *testing.T) { req := require.New(t) kcli := fake.NewSimpleClientset(tt.resources...) logger := log.New(io.Discard, "", 0) - result, err := validateVolumeAccessModes(context.Background(), logger, kcli, tt.dstSC, tt.podReadyTimeout, tt.input) + result, err := validateVolumeAccessModes(context.Background(), logger, kcli, tt.dstSC, "eeacms/rsync:2.3", tt.podReadyTimeout, tt.input) if err != nil { if tt.wantErr { req.Error(err) @@ -132,13 +133,14 @@ func Test_getPvcError(t *testing.T) { expected *pvcFailure }{ { - name: "When there is a PVC failure, expect ProvisioningFailed event", + name: "When there is a PVC failure expect ProvisioningFailed event", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "default", UID: "12345", }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, }, expected: &pvcFailure{ reason: "ProvisioningFailed", @@ -186,15 +188,16 @@ func Test_getPvcError(t *testing.T) { }, }, { - name: "When PVC event failure reason is not ProvisioningFailed, expect error", + name: "When PVC event failure reason is not ProvisioningFailed expect error", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "default", UID: "12345", }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, }, - expected: &pvcFailure{}, + expected: nil, wantErr: true, resources: []runtime.Object{ &storagev1.StorageClass{ @@ -237,7 +240,7 @@ func Test_getPvcError(t *testing.T) { }, }, { - name: "When PVC is pending due to a failure but there are no events for it, expect error", + name: "When PVC is pending due to a failure but there are no events for it expect error", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", @@ -246,7 +249,7 @@ func Test_getPvcError(t *testing.T) { }, Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, }, - expected: &pvcFailure{}, + expected: nil, wantErr: true, resources: []runtime.Object{ &storagev1.StorageClass{ @@ -270,7 +273,7 @@ func Test_getPvcError(t *testing.T) { }, }, { - name: "When PVC is not in Pending status, expect error", + name: "When PVC is not in Pending status expect error", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", @@ -279,7 +282,7 @@ func Test_getPvcError(t *testing.T) { }, Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimBound}, }, - expected: &pvcFailure{}, + expected: nil, wantErr: true, resources: []runtime.Object{}, }, @@ -310,13 +313,15 @@ func Test_checkVolumeAccessModes(t *testing.T) { wantErr bool resources []runtime.Object input *corev1.PersistentVolumeClaim - expected pvcFailure + expected *pvcFailure + tmpPodName string + backgroundFunc func(context.Context, *log.Logger, k8sclient.Interface, string, string, string) }{ { - name: "When the PVC access mode is not supported by destination storage provider, expect PVCError", + name: "When the PVC access mode is not supported by destination storage provider expect PVC failure", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ - Name: "pvc", + Name: "testpvc", Namespace: "default", UID: "12345", }, @@ -326,13 +331,17 @@ func Test_checkVolumeAccessModes(t *testing.T) { }, Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, }, - expected: pvcFailure{ + expected: &pvcFailure{ reason: "ProvisioningFailed", from: "kubernetes.io/no-provisioner", message: "Only support ReadWriteOnce access mode", }, srcStorageClass: "srcSc", dstStorageClass: "dstSc", + tmpPodName: podNamePrefix + "-pf-pvc-testpvc", + // make the timeout for the function under test take a little longer so that that + // backgroundFunc can update the pod phase to Pending + podTimeout: 2 * time.Second, resources: []runtime.Object{ &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ @@ -344,6 +353,30 @@ func Test_checkVolumeAccessModes(t *testing.T) { Name: "dstSc", }, }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv-for-pf-pvc-testpvc", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "dstSc", + ClaimRef: &corev1.ObjectReference{ + Name: pvcNamePrefix + "-testpvc", + Namespace: "default", + }, + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcNamePrefix + "-testpvc", + Namespace: "default", + UID: "12345", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("dstSc"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, + }, + Status: corev1.PersistentVolumeClaimStatus{Phase: corev1.ClaimPending}, + }, &corev1.EventList{ Items: []corev1.Event{ { @@ -362,38 +395,46 @@ func Test_checkVolumeAccessModes(t *testing.T) { }, }, }, - &corev1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "tmpPod", - Namespace: "default", - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Containers: []corev1.Container{ - { - Name: "busybox", - Image: "busybox", - Command: []string{ - "sleep", - "3600", - }, - }, - }, - }, - Status: corev1.PodStatus{Phase: corev1.PodPending}, - }, + }, + backgroundFunc: func(ctx context.Context, logger *log.Logger, k k8sclient.Interface, tmpPod, ns, pv string) { + for { + pod, err := k.CoreV1().Pods(ns).Get(ctx, tmpPod, metav1.GetOptions{}) + if err != nil { + continue + } + + // update status of the pod to Pending + pendingPod := pod.DeepCopy() + pendingPod.Status = corev1.PodStatus{Phase: corev1.PodPending} + if _, err = k.CoreV1().Pods(pendingPod.Namespace).Update(ctx, pendingPod, metav1.UpdateOptions{}); err != nil { + logger.Printf("backgroundFunc: failed to update pod %s with status Pending", pendingPod.Name) + return + } + + // now wait for a bit until defer functions run + // this needs to > tt.podTimeout + time.Sleep(3 * time.Second) + + // delete PV in resources + if err = k.CoreV1().PersistentVolumes().Delete(ctx, pv, metav1.DeleteOptions{}); err != nil { + logger.Print("backgroundFunc: could not delete PV: ", pv) + } + break + } }, }, } { t.Run(tt.name, func(t *testing.T) { req := require.New(t) + testCtx, cancelfunc := context.WithTimeout(context.Background(), time.Minute) // if your test takes more than 1m, there are issues + defer cancelfunc() kcli := fake.NewSimpleClientset(tt.resources...) - logger := log.New(io.Discard, "", 0) - result, err := checkVolumeAccessModes(context.Background(), logger, kcli, tt.dstStorageClass, *tt.input, tt.podTimeout) + // logger := log.New(io.Discard, "", 0) + logger := log.New(os.Stdout, "", 0) + if tt.backgroundFunc != nil { + go tt.backgroundFunc(testCtx, logger, kcli, tt.tmpPodName, "default", "pv-for-pf-pvc-testpvc") + } + result, err := checkVolumeAccessModes(context.Background(), logger, kcli, tt.dstStorageClass, *tt.input, tt.podTimeout, "eeacms/rsync:2.3") if err != nil { if tt.wantErr { req.Error(err) @@ -409,43 +450,12 @@ func Test_checkVolumeAccessModes(t *testing.T) { func Test_buildTmpPVC(t *testing.T) { for _, tt := range []struct { name string - pvcNameOverride string dstStorageClass string input *corev1.PersistentVolumeClaim expectedPVC *corev1.PersistentVolumeClaim - expectedName string }{ { - name: "When PVC name is not overridden, expect unique temp pvc name", - input: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pvc", - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("dstSc"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, - }, - }, - expectedPVC: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("dstSc"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Mi"), - }, - }, - }, - }, - expectedName: "pvmigrate-preflight-test-pvc", - dstStorageClass: "dstSc", - }, - { - name: "When PVC name is longer than 63 chars, expect name to be trimmed", + name: "When PVC name is longer than 63 chars expect name to be trimmed", input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "really-long-pvc-name-that-should-be-trimmed-to-avoid-an-error", @@ -458,6 +468,7 @@ func Test_buildTmpPVC(t *testing.T) { }, expectedPVC: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ + Name: "pf-pvc-really-long-pvc-name-thauld-be-trimmed-to-avoid-an-error", Namespace: "default", }, Spec: corev1.PersistentVolumeClaimSpec{ @@ -470,153 +481,37 @@ func Test_buildTmpPVC(t *testing.T) { }, }, }, - expectedName: "pvmigrate-claim-really-long-pvc-trimmed-to-avoid-an-error-", - dstStorageClass: "dstSc", - }, - { - name: "When PVC name is overriden, expect non-UID generated name", - input: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-test-pvc", - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("dstSc"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, - }, - }, - expectedPVC: &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("dstSc"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Mi"), - }, - }, - }, - }, - pvcNameOverride: "pvc-name-override", - expectedName: "pvc-name-override", dstStorageClass: "dstSc", }, } { t.Run(tt.name, func(t *testing.T) { req := require.New(t) pvc := buildTmpPVC(*tt.input, tt.dstStorageClass) - req.True(strings.HasPrefix(pvc.Name, tt.expectedName)) - req.Equal(tt.expectedPVC.Spec, pvc.Spec) + req.Equal(tt.expectedPVC, pvc) }) } } func Test_buildPVCConsumerPod(t *testing.T) { for _, tt := range []struct { - name string - namespace string - podNameOverride string - pvcName string - expectedPod *corev1.Pod - expectedName string + name string + namespace string + pvcName string + podImage string + expectedPod *corev1.Pod }{ { - name: "When pod name not overriden, expect unique pod name", - pvcName: "test-pvc", - expectedPod: &corev1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Volumes: []corev1.Volume{ - { - Name: "tmp", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "test-pvc", - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "busybox", - Image: "busybox", - Command: []string{ - "sleep", - "3600", - }, - VolumeMounts: []corev1.VolumeMount{ - { - MountPath: "/tmpmount", - Name: "tmp", - }, - }, - }, - }, - }, - }, - expectedName: "pvmigrate-vol-consumer-test-pvc-", - }, - { - name: "When pod name is longer than 63 chars, expect pod name to be trimmed", - pvcName: "pvc-name-that-should-be-trimmed-because-it-will-cause-an-err", - expectedPod: &corev1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - Volumes: []corev1.Volume{ - { - Name: "tmp", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "pvc-name-that-should-be-trimmed-because-it-will-cause-an-err", - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "busybox", - Image: "busybox", - Command: []string{ - "sleep", - "3600", - }, - VolumeMounts: []corev1.VolumeMount{ - { - MountPath: "/tmpmount", - Name: "tmp", - }, - }, - }, - }, - }, - }, - expectedName: "pvmigrate-vol-consumer-pvc-namecause-it-will-cause-an-err-", - }, - { - name: "When pod name is overriden, expect non-UID name", - pvcName: "test-pvc", + name: "When pod name is longer than 63 chars expect pod name to be trimmed", + pvcName: "pf-pvc-this-pvc-name-will-cause-the-temp-pod-name-to-be-trimmed", + namespace: "default", + podImage: "eeacms/rsync:2.3", expectedPod: &corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ + Name: "pvmigrate-pf-pod-pf-pvc-this-pv-the-temp-pod-name-to-be-trimmed", Namespace: "default", }, Spec: corev1.PodSpec{ @@ -626,15 +521,15 @@ func Test_buildPVCConsumerPod(t *testing.T) { Name: "tmp", VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "test-pvc", + ClaimName: "pf-pvc-this-pvc-name-will-cause-the-temp-pod-name-to-be-trimmed", }, }, }, }, Containers: []corev1.Container{ { - Name: "busybox", - Image: "busybox", + Name: "sleep", + Image: "eeacms/rsync:2.3", Command: []string{ "sleep", "3600", @@ -649,15 +544,12 @@ func Test_buildPVCConsumerPod(t *testing.T) { }, }, }, - podNameOverride: "my pod name override", - expectedName: "my pod name override", }, } { t.Run(tt.name, func(t *testing.T) { req := require.New(t) - pod := buildPVCConsumerPod(tt.pvcName, tt.namespace) - req.True(strings.HasPrefix(pod.Name, tt.expectedName)) - req.Equal(tt.expectedPod.Spec, pod.Spec) + pod := buildTmpPVCConsumerPod(tt.pvcName, tt.namespace, tt.podImage) + req.Equal(tt.expectedPod, pod) }) } } @@ -672,13 +564,13 @@ func Test_pvcsForStorageClass(t *testing.T) { expected map[string]corev1.PersistentVolumeClaim }{ { - name: "When storage class is not found, expect error", - scname: "i-dont-exit", + name: "When storage class is not found expect error", + scname: "i-dont-exist", namespace: "default", wantErr: true, }, { - name: "When volumes and storage classes exist and namespace is set, expect pvcs for that particular namespace only", + name: "When volumes and storage classes exist and namespace is set expect pvcs for that particular namespace only", scname: "default", namespace: "default", expected: map[string]corev1.PersistentVolumeClaim{ @@ -766,7 +658,7 @@ func Test_pvcsForStorageClass(t *testing.T) { }, }, { - name: "When volumes and storage classes exist and namespace is NOT set, expect pvcs for all namespaces", + name: "When volumes and storage classes exist and namespace is NOT set expect pvcs for all namespaces", scname: "default", namespace: "", expected: map[string]corev1.PersistentVolumeClaim{ @@ -791,8 +683,8 @@ func Test_pvcsForStorageClass(t *testing.T) { Namespace: "test", }, Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("rook"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, + StorageClassName: pointer.String("default"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Mi"), @@ -807,11 +699,6 @@ func Test_pvcsForStorageClass(t *testing.T) { Name: "default", }, }, - &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rook", - }, - }, &corev1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: "pv0", @@ -829,7 +716,7 @@ func Test_pvcsForStorageClass(t *testing.T) { Name: "pv1", }, Spec: corev1.PersistentVolumeSpec{ - StorageClassName: "rook", + StorageClassName: "default", ClaimRef: &corev1.ObjectReference{ Name: "pvc1", Namespace: "test", @@ -857,8 +744,8 @@ func Test_pvcsForStorageClass(t *testing.T) { Namespace: "test", }, Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("rook"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, + StorageClassName: pointer.String("default"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Mi"), @@ -869,26 +756,11 @@ func Test_pvcsForStorageClass(t *testing.T) { }, }, { - name: "When PV does not have an associated PVC, expect error", + name: "When PV does not have an associated PVC expect error", scname: "default", namespace: "default", - expected: map[string]corev1.PersistentVolumeClaim{ - "pvc0": { - ObjectMeta: metav1.ObjectMeta{ - Name: "pvc0", - Namespace: "default", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: pointer.String("default"), - AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Mi"), - }, - }, - }, - }, - }, + wantErr: true, + expected: nil, resources: []runtime.Object{ &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ From 87666f56f6eb04387e305818c546dc15d915d02f Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 2 Dec 2022 14:47:21 -0600 Subject: [PATCH 23/41] Map all failures to exit code 1 --- cmd/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index cf29aa8..5fb8b4f 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -61,7 +61,7 @@ func main() { if len(failures) != 0 { preflight.PrintValidationFailures(output.Writer(), failures) - os.Exit(2) + os.Exit(1) } } @@ -70,7 +70,7 @@ func main() { err = migrate.Migrate(context.TODO(), output, clientset, options) if err != nil { output.Printf("migration failed: %s", err) - os.Exit(3) + os.Exit(1) } } } From a5566503f19a8798ce49f6659a86e5742bae91fd Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 2 Dec 2022 15:17:00 -0600 Subject: [PATCH 24/41] Address Ethan's PR review comments and suggestions --- cmd/main.go | 21 +++++++++++---------- pkg/k8sutil/storage.go | 2 +- pkg/migrate/migrate.go | 2 +- pkg/preflight/validate.go | 27 +++++++++++++-------------- pkg/preflight/validate_test.go | 2 +- 5 files changed, 27 insertions(+), 27 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 5fb8b4f..ad4b704 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -30,46 +30,47 @@ func main() { flag.BoolVar(&options.SetDefaults, "set-defaults", false, "change default storage class from source to dest") flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") - flag.IntVar(&options.PvcCopyTimeout, "pvc-copy-timeout", 300, "length of time to wait (in seconds) when transferring data from the source to the destination storage volume") + flag.IntVar(&options.PVCCopyTimeout, "pvc-copy-timeout", 300, "length of time to wait (in seconds) when transferring data from the source to the destination storage volume") flag.IntVar(&options.PodReadyTimeout, "pod-ready-timeout", 60, "length of time to wait (in seconds) for volume validation pod(s) to go into Ready phase") flag.BoolVar(&skipPreflightValidation, "skip-preflight-validation", false, "skip the volume access modes validation on the destination storage provider") flag.BoolVar(&preflightValidationOnly, "preflight-validation-only", false, "skip the migration and run preflight validation only") flag.Parse() + // setup logger + logger := log.New(os.Stderr, "", 0) // this has no time prefix etc + // setup k8s cfg, err := config.GetConfig() if err != nil { - fmt.Printf("failed to get config: %s\n", err.Error()) + logger.Printf("failed to get config: %s", err) os.Exit(1) } clientset, err := k8sclient.NewForConfig(cfg) if err != nil { - fmt.Printf("failed to create kubernetes clientset: %s\n", err.Error()) + logger.Printf("failed to create kubernetes clientset: %s", err) os.Exit(1) } - output := log.New(os.Stdout, "", 0) // this has no time prefix etc - if !skipPreflightValidation { - failures, err := preflight.Validate(context.TODO(), output, clientset, options) + failures, err := preflight.Validate(context.TODO(), logger, clientset, options) if err != nil { - output.Printf("failed to run preflight validation checks") + logger.Printf("failed to run preflight validation checks") os.Exit(1) } if len(failures) != 0 { - preflight.PrintValidationFailures(output.Writer(), failures) + preflight.PrintValidationFailures(os.Stdout, failures) os.Exit(1) } } // start the migration if !preflightValidationOnly { - err = migrate.Migrate(context.TODO(), output, clientset, options) + err = migrate.Migrate(context.TODO(), logger, clientset, options) if err != nil { - output.Printf("migration failed: %s", err) + logger.Printf("migration failed: %s", err) os.Exit(1) } } diff --git a/pkg/k8sutil/storage.go b/pkg/k8sutil/storage.go index 0a5a356..e451d2b 100644 --- a/pkg/k8sutil/storage.go +++ b/pkg/k8sutil/storage.go @@ -18,7 +18,7 @@ func PVsByStorageClass(ctx context.Context, cli kubernetes.Interface, scname str allpvs, err := cli.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) if err != nil { - return nil, fmt.Errorf("failed to get persistent volumes: %w", err) + return nil, fmt.Errorf("failed to list persistent volumes: %w", err) } pvs := map[string]corev1.PersistentVolume{} diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 344e6e7..61cc6cb 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -47,7 +47,7 @@ type Options struct { SetDefaults bool VerboseCopy bool SkipSourceValidation bool - PvcCopyTimeout int + PVCCopyTimeout int PodReadyTimeout int } diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go index 4d482b5..9547ab8 100644 --- a/pkg/preflight/validate.go +++ b/pkg/preflight/validate.go @@ -42,11 +42,11 @@ func Validate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, // validate access modes for all PVCs using the source storage class pvcs, err := pvcsForStorageClass(ctx, w, clientset, options.SourceSCName, options.Namespace) if err != nil { - return nil, fmt.Errorf("failed to get PVCs for storage %s: %s", options.SourceSCName, err) + return nil, fmt.Errorf("failed to get PVCs for storage %s: %w", options.SourceSCName, err) } pvcAccesModeFailures, err := validateVolumeAccessModes(ctx, w, clientset, options.DestSCName, options.RsyncImage, time.Duration(options.PodReadyTimeout), pvcs) if err != nil { - return nil, fmt.Errorf("failed validate PVC access modes: %s", err) + return nil, fmt.Errorf("failed to validate PVC access modes: %w", err) } return toValidationFailures(pvcAccesModeFailures), nil } @@ -183,7 +183,7 @@ func checkVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient pvcConsumerPodSpec := buildTmpPVCConsumerPod(tmpPVC.Name, tmpPVC.Namespace, tmpPodImage) pvcConsumerPod, err := client.CoreV1().Pods(tmpPVC.Namespace).Create(ctx, pvcConsumerPodSpec, metav1.CreateOptions{}) if err != nil { - return nil, fmt.Errorf("failed to create validation pod %s: %s", pvcConsumerPodSpec.Name, err) + return nil, fmt.Errorf("failed to create validation pod %s: %w", pvcConsumerPodSpec.Name, err) } // cleanup pvc and pod at the end @@ -200,7 +200,7 @@ func checkVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient for { gotPod, err := client.CoreV1().Pods(pvcConsumerPodSpec.Namespace).Get(ctx, pvcConsumerPodSpec.Name, metav1.GetOptions{}) if err != nil { - return nil, fmt.Errorf("failed getting pv consumer pod %s: %w", gotPod.Name, err) + return nil, fmt.Errorf("failed to get pv consumer pod %s: %w", gotPod.Name, err) } switch { @@ -217,9 +217,9 @@ func checkVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient // but the pod has not been scheduled yet, nor have container images been pulled from the registry. if gotPod.Status.Phase == corev1.PodPending { // check pvc status and get error - pvcPendingError, err := getPvcError(client, tmpPVC) + pvcPendingError, err := getPVCError(client, tmpPVC) if err != nil { - return nil, fmt.Errorf("failed to get PVC error: %s", err) + return nil, fmt.Errorf("failed to get pvc failure: %w", err) } return pvcPendingError, nil } @@ -263,9 +263,8 @@ func deleteTmpPVC(l *log.Logger, client k8sclient.Interface, pvc *corev1.Persist } waitFor = append(waitFor, pvc.Name) - timeout := time.NewTicker(5 * time.Minute) + timeout := time.After(5 * time.Minute) interval := time.NewTicker(5 * time.Second) - defer timeout.Stop() defer interval.Stop() for _, pvc := range waitFor { pv, ok := pvsByPVCName[pvc] @@ -287,7 +286,7 @@ func deleteTmpPVC(l *log.Logger, client k8sclient.Interface, pvc *corev1.Persist select { case <-interval.C: continue - case <-timeout.C: + case <-timeout: return fmt.Errorf("failed to delete pvs: timeout") } } @@ -307,8 +306,8 @@ func deletePVConsumerPod(client k8sclient.Interface, pod *corev1.Pod) error { return nil } -// getPvcError returns the failure event for why a PVC is in Pending status -func getPvcError(client k8sclient.Interface, pvc *corev1.PersistentVolumeClaim) (*pvcFailure, error) { +// getPVCError returns the failure event for why a PVC is in Pending status +func getPVCError(client k8sclient.Interface, pvc *corev1.PersistentVolumeClaim) (*pvcFailure, error) { // no need to inspect pvc if pvc.Status.Phase != corev1.ClaimPending { return nil, fmt.Errorf("PVC %s is not in Pending status", pvc.Name) @@ -316,7 +315,7 @@ func getPvcError(client k8sclient.Interface, pvc *corev1.PersistentVolumeClaim) pvcEvents, err := client.CoreV1().Events(pvc.Namespace).Search(scheme.Scheme, pvc) if err != nil { - return nil, fmt.Errorf("failed to list events for PVC %s", pvc.Name) + return nil, fmt.Errorf("failed to list events for PVC %s: %w", pvc.Name, err) } // get pending reason @@ -331,9 +330,9 @@ func getPvcError(client k8sclient.Interface, pvc *corev1.PersistentVolumeClaim) // pvcsForStorageClass returns all PersistentVolumeClaims, filtered by namespace, for a given // storage class func pvcsForStorageClass(ctx context.Context, l *log.Logger, client k8sclient.Interface, srcSC, namespace string) (map[string]corev1.PersistentVolumeClaim, error) { - srcPVs, err := k8sutil.PVsByStorageClass(context.TODO(), client, srcSC) + srcPVs, err := k8sutil.PVsByStorageClass(ctx, client, srcSC) if err != nil { - return nil, fmt.Errorf("failed to get PVs for storage class %s: %s", srcSC, err) + return nil, fmt.Errorf("failed to get PVs for storage class %s: %w", srcSC, err) } // get PVCs using specified PVs diff --git a/pkg/preflight/validate_test.go b/pkg/preflight/validate_test.go index 97a1485..a6c0de5 100644 --- a/pkg/preflight/validate_test.go +++ b/pkg/preflight/validate_test.go @@ -290,7 +290,7 @@ func Test_getPvcError(t *testing.T) { t.Run(tt.name, func(t *testing.T) { req := require.New(t) kcli := fake.NewSimpleClientset(tt.resources...) - result, err := getPvcError(kcli, tt.input) + result, err := getPVCError(kcli, tt.input) if err != nil { if tt.wantErr { req.Error(err) From ff3ea813a856293cd7d4f691e987bbee965b546c Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 2 Dec 2022 16:31:11 -0600 Subject: [PATCH 25/41] tweak Test_pvcForStorageClass test --- pkg/preflight/validate_test.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/pkg/preflight/validate_test.go b/pkg/preflight/validate_test.go index a6c0de5..d713ed9 100644 --- a/pkg/preflight/validate_test.go +++ b/pkg/preflight/validate_test.go @@ -619,6 +619,18 @@ func Test_pvcsForStorageClass(t *testing.T) { }, Spec: corev1.PersistentVolumeSpec{ StorageClassName: "rook", + ClaimRef: &corev1.ObjectReference{ + Name: "pvc2", + Namespace: "test", + }, + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv2", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "default", ClaimRef: &corev1.ObjectReference{ Name: "pvc1", Namespace: "test", @@ -645,6 +657,21 @@ func Test_pvcsForStorageClass(t *testing.T) { Name: "pvc1", Namespace: "test", }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: pointer.String("default"), + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Mi"), + }, + }, + }, + }, + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc2", + Namespace: "test", + }, Spec: corev1.PersistentVolumeClaimSpec{ StorageClassName: pointer.String("rook"), AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteMany"}, From f2b558b5bfe0f752464769687bda30854064d184 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 2 Dec 2022 22:57:59 -0600 Subject: [PATCH 26/41] convert podready timeout to correct unit --- cmd/main.go | 1 - pkg/migrate/migrate.go | 1 - pkg/preflight/validate.go | 4 ++-- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index ad4b704..97364ae 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -30,7 +30,6 @@ func main() { flag.BoolVar(&options.SetDefaults, "set-defaults", false, "change default storage class from source to dest") flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") - flag.IntVar(&options.PVCCopyTimeout, "pvc-copy-timeout", 300, "length of time to wait (in seconds) when transferring data from the source to the destination storage volume") flag.IntVar(&options.PodReadyTimeout, "pod-ready-timeout", 60, "length of time to wait (in seconds) for volume validation pod(s) to go into Ready phase") flag.BoolVar(&skipPreflightValidation, "skip-preflight-validation", false, "skip the volume access modes validation on the destination storage provider") flag.BoolVar(&preflightValidationOnly, "preflight-validation-only", false, "skip the migration and run preflight validation only") diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 61cc6cb..e60caa0 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -47,7 +47,6 @@ type Options struct { SetDefaults bool VerboseCopy bool SkipSourceValidation bool - PVCCopyTimeout int PodReadyTimeout int } diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go index 9547ab8..5985406 100644 --- a/pkg/preflight/validate.go +++ b/pkg/preflight/validate.go @@ -44,7 +44,7 @@ func Validate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, if err != nil { return nil, fmt.Errorf("failed to get PVCs for storage %s: %w", options.SourceSCName, err) } - pvcAccesModeFailures, err := validateVolumeAccessModes(ctx, w, clientset, options.DestSCName, options.RsyncImage, time.Duration(options.PodReadyTimeout), pvcs) + pvcAccesModeFailures, err := validateVolumeAccessModes(ctx, w, clientset, options.DestSCName, options.RsyncImage, time.Duration(options.PodReadyTimeout)*time.Second, pvcs) if err != nil { return nil, fmt.Errorf("failed to validate PVC access modes: %w", err) } @@ -308,7 +308,7 @@ func deletePVConsumerPod(client k8sclient.Interface, pod *corev1.Pod) error { // getPVCError returns the failure event for why a PVC is in Pending status func getPVCError(client k8sclient.Interface, pvc *corev1.PersistentVolumeClaim) (*pvcFailure, error) { - // no need to inspect pvc + // no need to inspect pvc if it's NOT in Pending phase if pvc.Status.Phase != corev1.ClaimPending { return nil, fmt.Errorf("PVC %s is not in Pending status", pvc.Name) } From de8c668839c13c7d214ef0e1c7ae4128f202ecc7 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 2 Dec 2022 23:27:57 -0600 Subject: [PATCH 27/41] allow os signals to context --- cmd/main.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 97364ae..1ae8b3e 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -6,6 +6,7 @@ import ( "fmt" "log" "os" + "os/signal" "github.com/replicatedhq/pvmigrate/pkg/migrate" "github.com/replicatedhq/pvmigrate/pkg/preflight" @@ -16,6 +17,9 @@ import ( ) func main() { + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) + defer stop() + fmt.Printf("Running pvmigrate build:\n") version.Print() @@ -53,7 +57,7 @@ func main() { } if !skipPreflightValidation { - failures, err := preflight.Validate(context.TODO(), logger, clientset, options) + failures, err := preflight.Validate(ctx, logger, clientset, options) if err != nil { logger.Printf("failed to run preflight validation checks") os.Exit(1) @@ -67,7 +71,7 @@ func main() { // start the migration if !preflightValidationOnly { - err = migrate.Migrate(context.TODO(), logger, clientset, options) + err = migrate.Migrate(ctx, logger, clientset, options) if err != nil { logger.Printf("migration failed: %s", err) os.Exit(1) From 0313d4a815f57d86b8e3f54be0a9b8d7b2290095 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Sat, 3 Dec 2022 00:02:19 -0600 Subject: [PATCH 28/41] fix pod and pvc cleanup --- pkg/preflight/validate.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go index 5985406..9828fc4 100644 --- a/pkg/preflight/validate.go +++ b/pkg/preflight/validate.go @@ -188,12 +188,14 @@ func checkVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient // cleanup pvc and pod at the end defer func() { - if err = deleteTmpPVC(l, client, tmpPVC); err != nil { - l.Printf("failed to cleanup tmp claim: %s", err) - } + // pod must be deleted first then the pvc if err = deletePVConsumerPod(client, pvcConsumerPod); err != nil { l.Printf("failed to cleanup pv consumer pod %s: %s", pvcConsumerPod.Name, err) } + + if err = deleteTmpPVC(l, client, tmpPVC); err != nil { + l.Printf("failed to cleanup tmp claim: %s", err) + } }() podReadyTimeoutEnd := time.Now().Add(timeout) From cf84ac3375a7bb7526ef2c4045a8e68bcc958df5 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Sat, 3 Dec 2022 00:23:41 -0600 Subject: [PATCH 29/41] simplify table output --- pkg/preflight/validate.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go index 9828fc4..f9c1c0d 100644 --- a/pkg/preflight/validate.go +++ b/pkg/preflight/validate.go @@ -54,11 +54,11 @@ func Validate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, // PrintPVAccessModeErrors prints and formats the volume access mode errors in pvcErrors func PrintValidationFailures(stream io.Writer, failures []ValidationFailure) { tw := tabwriter.NewWriter(stream, 0, 8, 8, '\t', 0) - fmt.Fprintf(tw, "The following persistent volume claims cannot be migrated:\n\n") - fmt.Fprintln(tw, "NAMESPACE\tRESOURCE\tSOURCE\tMESSAGE") - fmt.Fprintf(tw, "---------\t--------\t------\t-------\n") + fmt.Fprintf(tw, "The following resources cannot be migrated:\n\n") + fmt.Fprintln(tw, "NAMESPACE\tRESOURCE\tMESSAGE") + fmt.Fprintf(tw, "---------\t--------\t-------\n") for _, failure := range failures { - fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", failure.Namespace, failure.Resource, failure.Source, failure.Message) + fmt.Fprintf(tw, "%s\t%s\t%s\n", failure.Namespace, failure.Resource, failure.Message) } tw.Flush() } @@ -67,7 +67,7 @@ func toValidationFailures(pvcFailures map[string]map[string]pvcFailure) []Valida var vFailures []ValidationFailure for ns, failures := range pvcFailures { for name, pvcFailure := range failures { - vFailures = append(vFailures, ValidationFailure{ns, name, pvcFailure.from, pvcFailure.message}) + vFailures = append(vFailures, ValidationFailure{ns, "pvc/" + name, pvcFailure.from, pvcFailure.message}) } } return vFailures From 7c8d9e547340a821869159e1975b92ee2bbe60b6 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Sat, 3 Dec 2022 00:33:37 -0600 Subject: [PATCH 30/41] speed up pod deletion --- pkg/preflight/validate.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go index f9c1c0d..9618c5f 100644 --- a/pkg/preflight/validate.go +++ b/pkg/preflight/validate.go @@ -17,6 +17,8 @@ import ( k8sclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" k8spodutils "k8s.io/kubernetes/pkg/api/v1/pod" + + "k8s.io/utils/pointer" ) const ( @@ -299,7 +301,7 @@ func deleteTmpPVC(l *log.Logger, client k8sclient.Interface, pvc *corev1.Persist // deletePVConsumerPod removes the pod resource from the api servere func deletePVConsumerPod(client k8sclient.Interface, pod *corev1.Pod) error { propagation := metav1.DeletePropagationForeground - delopts := metav1.DeleteOptions{PropagationPolicy: &propagation} + delopts := metav1.DeleteOptions{GracePeriodSeconds: pointer.Int64(0), PropagationPolicy: &propagation} if pod != nil { if err := client.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, delopts); err != nil { return err From 9e2f86c30f42337bda61025d5346b829d074847c Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Wed, 7 Dec 2022 13:00:23 -0600 Subject: [PATCH 31/41] validate storage class resource is present in the cluster --- cmd/main.go | 3 +- pkg/migrate/migrate.go | 3 +- pkg/preflight/validate.go | 41 +++++++++++++++++- pkg/preflight/validate_test.go | 78 ++++++++++++++++++++++++++++++++++ 4 files changed, 121 insertions(+), 4 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 1ae8b3e..39bb9a7 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -7,6 +7,7 @@ import ( "log" "os" "os/signal" + "time" "github.com/replicatedhq/pvmigrate/pkg/migrate" "github.com/replicatedhq/pvmigrate/pkg/preflight" @@ -34,7 +35,7 @@ func main() { flag.BoolVar(&options.SetDefaults, "set-defaults", false, "change default storage class from source to dest") flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") - flag.IntVar(&options.PodReadyTimeout, "pod-ready-timeout", 60, "length of time to wait (in seconds) for volume validation pod(s) to go into Ready phase") + flag.DurationVar(&options.PodReadyTimeout, "pod-ready-timeout", 60*time.Second, "length of time to wait (in seconds) for volume validation pod(s) to go into Ready phase") flag.BoolVar(&skipPreflightValidation, "skip-preflight-validation", false, "skip the volume access modes validation on the destination storage provider") flag.BoolVar(&preflightValidationOnly, "preflight-validation-only", false, "skip the migration and run preflight validation only") diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index e60caa0..e97286b 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -47,7 +47,7 @@ type Options struct { SetDefaults bool VerboseCopy bool SkipSourceValidation bool - PodReadyTimeout int + PodReadyTimeout time.Duration } // Migrate moves data and PVCs from one StorageClass to another @@ -537,7 +537,6 @@ func getPVCs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, return matchingPVCs, pvcNamespaces, nil } -// TODO: move this to the validation package func validateStorageClasses(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, sourceSCName string, destSCName string, skipSourceValidation bool) error { // get storage providers storageClasses, err := clientset.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go index 9618c5f..dfb4fae 100644 --- a/pkg/preflight/validate.go +++ b/pkg/preflight/validate.go @@ -41,12 +41,23 @@ type ValidationFailure struct { // Validate runs preflight check on storage volumes returning a list of failures func Validate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, options migrate.Options) ([]ValidationFailure, error) { + // validate storage classes + scFailures, err := validateStorageClasses(ctx, w, clientset, options.SourceSCName, options.DestSCName) + if err != nil { + return nil, fmt.Errorf("failed to validate storage classes: %w", err) + } + + // if there are storage class validation failures it doesn't make sense to proceed + if scFailures != nil { + return scFailures, nil + } + // validate access modes for all PVCs using the source storage class pvcs, err := pvcsForStorageClass(ctx, w, clientset, options.SourceSCName, options.Namespace) if err != nil { return nil, fmt.Errorf("failed to get PVCs for storage %s: %w", options.SourceSCName, err) } - pvcAccesModeFailures, err := validateVolumeAccessModes(ctx, w, clientset, options.DestSCName, options.RsyncImage, time.Duration(options.PodReadyTimeout)*time.Second, pvcs) + pvcAccesModeFailures, err := validateVolumeAccessModes(ctx, w, clientset, options.DestSCName, options.RsyncImage, options.PodReadyTimeout, pvcs) if err != nil { return nil, fmt.Errorf("failed to validate PVC access modes: %w", err) } @@ -98,6 +109,34 @@ func validateVolumeAccessModes(ctx context.Context, l *log.Logger, client k8scli return volAccessModeFailures, nil } +// validateStorageClasses returns any failures encountered when discovering the source and destination +// storage classes +func validateStorageClasses(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, sourceSCName, destSCName string) ([]ValidationFailure, error) { + // get storage providers + storageClasses, err := clientset.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to list storage classes: %w", err) + } + + var sourceScFound, destScFound bool + var scFailures []ValidationFailure + for _, sc := range storageClasses.Items { + if sc.Name == sourceSCName { + sourceScFound = true + } + if sc.Name == destSCName { + destScFound = true + } + } + if !sourceScFound { + scFailures = append(scFailures, ValidationFailure{Resource: "sc/" + sourceSCName, Message: "Resource not found"}) + } + if !destScFound { + scFailures = append(scFailures, ValidationFailure{Resource: "sc/" + destSCName, Message: "Resource not found"}) + } + return scFailures, nil +} + // buildTmpPVCConsumerPod creates a pod spec for consuming a pvc func buildTmpPVCConsumerPod(pvcName, namespace, image string) *corev1.Pod { return &corev1.Pod{ diff --git a/pkg/preflight/validate_test.go b/pkg/preflight/validate_test.go index d713ed9..4ed07da 100644 --- a/pkg/preflight/validate_test.go +++ b/pkg/preflight/validate_test.go @@ -868,3 +868,81 @@ func Test_pvcsForStorageClass(t *testing.T) { }) } } + +func Test_validateStorageClasses(t *testing.T) { + for _, tt := range []struct { + name string + resources []runtime.Object + sourceSC string + destSC string + wantErr bool + expected []ValidationFailure + }{ + { + name: "When both StorageClasses exist and are distinct expect no failures", + sourceSC: "sourcesc", + destSC: "destsc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sourcesc", + }, + }, + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "destsc", + }, + }, + }, + }, + { + name: "When source storage class does not exist expect validation failure", + sourceSC: "sourcesc", + destSC: "destsc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "destsc", + }, + }, + }, + expected: []ValidationFailure{ + { + Resource: "sc/sourcesc", + Message: "Resource not found", + }, + }, + }, + { + name: "When destination storage class does not exist expect validation failure", + sourceSC: "sourcesc", + destSC: "destsc", + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sourcesc", + }, + }, + }, + expected: []ValidationFailure{ + { + Resource: "sc/destsc", + Message: "Resource not found", + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + clientset := fake.NewSimpleClientset(tt.resources...) + logger := log.New(io.Discard, "", 0) + result, err := validateStorageClasses(context.Background(), logger, clientset, tt.sourceSC, tt.destSC) + if !tt.wantErr { + req.NoError(err) + } else { + req.Error(err) + } + req.Equal(result, tt.expected) + }) + } +} From 263427fa2ef4bcfb65408b67291245e88c268180 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Wed, 7 Dec 2022 13:30:23 -0600 Subject: [PATCH 32/41] Change output message --- pkg/preflight/validate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go index dfb4fae..c6c12f6 100644 --- a/pkg/preflight/validate.go +++ b/pkg/preflight/validate.go @@ -67,7 +67,7 @@ func Validate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, // PrintPVAccessModeErrors prints and formats the volume access mode errors in pvcErrors func PrintValidationFailures(stream io.Writer, failures []ValidationFailure) { tw := tabwriter.NewWriter(stream, 0, 8, 8, '\t', 0) - fmt.Fprintf(tw, "The following resources cannot be migrated:\n\n") + fmt.Fprintf(tw, "The following resources failed validation:\n") fmt.Fprintln(tw, "NAMESPACE\tRESOURCE\tMESSAGE") fmt.Fprintf(tw, "---------\t--------\t-------\n") for _, failure := range failures { From 0659ab5b27516c073d701995e2eef7278599ead8 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Wed, 7 Dec 2022 13:34:41 -0600 Subject: [PATCH 33/41] Add todo --- pkg/migrate/migrate.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index e97286b..28104d4 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -578,6 +578,7 @@ const nameSuffix = "-pvcmigrate" // pruning from the end runs the risk of dropping the '0'/'1'/etc of a statefulset's PVC name // pruning from the front runs the risk of making a-replica-... and b-replica-... collide // so this removes characters from the middle of the string +// TODO: refactor to k8sutil package func newPvcName(originalName string) string { candidate := originalName + nameSuffix if len(candidate) <= 63 { From 573e99692f9cc74a2f78065ac25651419b24965e Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Wed, 7 Dec 2022 14:08:16 -0600 Subject: [PATCH 34/41] Update flags and README --- README.md | 19 ++++++++++++++++++- cmd/main.go | 2 +- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index d86e41d..db797a2 100644 --- a/README.md +++ b/README.md @@ -3,12 +3,26 @@ pvmigrate allows migrating PVCs between two StorageClasses by creating new PVs, copying over the data, and then changing PVCs to refer to the new PVs. +## Preflight Validation + +`pvmigrate` can perform preflight migration validation to catch any potential failures prior to the migration. + +Currently supported validations are: +- Checking for existence of storage classes +- Checking existing PVC access modes are supported on the destination storage provider + ## Examples To migrate PVs from the 'default' StorageClass to mynewsc: ```bash -pvmigrate --source-sc default --dest-sc mynewsc +pvmigrate --source-sc "default" --dest-sc "mynewsc" +``` + +To run preflight migration validation without actually running the migration operation: + +```bash +pvmigrate --source-sc "source" --dest-sc "destination" --preflight-validation-only ``` ## Flags @@ -22,6 +36,9 @@ pvmigrate --source-sc default --dest-sc mynewsc | --set-defaults | Bool | | false | change default storage class from source to dest | | --verbose-copy | Bool | | false | show output from the rsync command used to copy data between PVCs | | --skip-source-validation | Bool | | false | migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist | +| --preflight-validation-only | Bool | | false | skip the migration and run preflight validation only | +| --skip-preflight-validation | Bool | | false | skip preflight migration validation on the destination storage provider | +| --pod-ready-timeout | time.Duration | | 60 seconds | length of time to wait (in seconds) for validation pod(s) to go into Ready phase | ## Process diff --git a/cmd/main.go b/cmd/main.go index 39bb9a7..d60a1ef 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -36,7 +36,7 @@ func main() { flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") flag.DurationVar(&options.PodReadyTimeout, "pod-ready-timeout", 60*time.Second, "length of time to wait (in seconds) for volume validation pod(s) to go into Ready phase") - flag.BoolVar(&skipPreflightValidation, "skip-preflight-validation", false, "skip the volume access modes validation on the destination storage provider") + flag.BoolVar(&skipPreflightValidation, "skip-preflight-validation", false, "skip preflight migration validation on the destination storage provider") flag.BoolVar(&preflightValidationOnly, "preflight-validation-only", false, "skip the migration and run preflight validation only") flag.Parse() From 9d3323db9a1cfd8393b3cdad68a0d6bc36eae48c Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Wed, 7 Dec 2022 14:08:51 -0600 Subject: [PATCH 35/41] Update text for pod-ready-timeout flag --- README.md | 2 +- cmd/main.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index db797a2..ab9d0fb 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ PVCs to refer to the new PVs. ## Preflight Validation -`pvmigrate` can perform preflight migration validation to catch any potential failures prior to the migration. +`pvmigrate` will run preflight migration validation to catch any potential failures prior to the migration. Currently supported validations are: - Checking for existence of storage classes diff --git a/cmd/main.go b/cmd/main.go index d60a1ef..5e32064 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -35,7 +35,7 @@ func main() { flag.BoolVar(&options.SetDefaults, "set-defaults", false, "change default storage class from source to dest") flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") - flag.DurationVar(&options.PodReadyTimeout, "pod-ready-timeout", 60*time.Second, "length of time to wait (in seconds) for volume validation pod(s) to go into Ready phase") + flag.DurationVar(&options.PodReadyTimeout, "pod-ready-timeout", 60*time.Second, "length of time to wait (in seconds) for validation pod(s) to go into Ready phase") flag.BoolVar(&skipPreflightValidation, "skip-preflight-validation", false, "skip preflight migration validation on the destination storage provider") flag.BoolVar(&preflightValidationOnly, "preflight-validation-only", false, "skip the migration and run preflight validation only") From 25f7358cd3f09536a64ee3d9a925196376e4a06f Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Wed, 7 Dec 2022 14:14:23 -0600 Subject: [PATCH 36/41] Add Ethan's suggestion --- pkg/migrate/migrate.go | 4 ++-- pkg/migrate/migrate_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 28104d4..662f0e6 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -24,7 +24,7 @@ const ( scaleAnnotation = baseAnnotation + "-scale" kindAnnotation = baseAnnotation + "-kind" sourceNsAnnotation = baseAnnotation + "-sourcens" - sourcePvcAnnotation = baseAnnotation + "-sourcepvc" + sourcePVCAnnotation = baseAnnotation + "-sourcepvc" desiredReclaimAnnotation = baseAnnotation + "-reclaim" ) @@ -957,7 +957,7 @@ func swapPVs(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, err = mutatePV(ctx, w, clientset, migratedPVC.Spec.VolumeName, func(volume *corev1.PersistentVolume) *corev1.PersistentVolume { // add annotations describing what PVC this data came from in case of a failure later volume.Annotations[sourceNsAnnotation] = ns - volume.Annotations[sourcePvcAnnotation] = pvcName + volume.Annotations[sourcePVCAnnotation] = pvcName // this will be used to set the reclaim policy after attaching a new PVC volume.Annotations[desiredReclaimAnnotation] = string(originalReclaim) diff --git a/pkg/migrate/migrate_test.go b/pkg/migrate/migrate_test.go index 591711b..5c0d8f1 100644 --- a/pkg/migrate/migrate_test.go +++ b/pkg/migrate/migrate_test.go @@ -1389,7 +1389,7 @@ func Test_swapPVs(t *testing.T) { Annotations: map[string]string{ desiredReclaimAnnotation: "Delete", sourceNsAnnotation: "testns", - sourcePvcAnnotation: "sourcepvc", + sourcePVCAnnotation: "sourcepvc", "testannotation": "dest-pv", }, }, From b8382caf0c0b5894dede8b16591f7a3e999ab1e5 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Fri, 9 Dec 2022 00:15:21 -0600 Subject: [PATCH 37/41] --skip-source-validation works with preflight validation --- pkg/preflight/validate.go | 10 +++++++--- pkg/preflight/validate_test.go | 30 ++++++++++++++++++++++-------- 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go index c6c12f6..015d3af 100644 --- a/pkg/preflight/validate.go +++ b/pkg/preflight/validate.go @@ -42,7 +42,7 @@ type ValidationFailure struct { // Validate runs preflight check on storage volumes returning a list of failures func Validate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, options migrate.Options) ([]ValidationFailure, error) { // validate storage classes - scFailures, err := validateStorageClasses(ctx, w, clientset, options.SourceSCName, options.DestSCName) + scFailures, err := validateStorageClasses(ctx, w, clientset, options.SourceSCName, options.DestSCName, options.SkipSourceValidation) if err != nil { return nil, fmt.Errorf("failed to validate storage classes: %w", err) } @@ -111,7 +111,7 @@ func validateVolumeAccessModes(ctx context.Context, l *log.Logger, client k8scli // validateStorageClasses returns any failures encountered when discovering the source and destination // storage classes -func validateStorageClasses(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, sourceSCName, destSCName string) ([]ValidationFailure, error) { +func validateStorageClasses(ctx context.Context, l *log.Logger, clientset k8sclient.Interface, sourceSCName, destSCName string, skipSourceValidation bool) ([]ValidationFailure, error) { // get storage providers storageClasses, err := clientset.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) if err != nil { @@ -129,7 +129,11 @@ func validateStorageClasses(ctx context.Context, l *log.Logger, clientset k8scli } } if !sourceScFound { - scFailures = append(scFailures, ValidationFailure{Resource: "sc/" + sourceSCName, Message: "Resource not found"}) + if skipSourceValidation { + l.Printf("Warning: unable to find source Storage Class %s, but continuing anyways", sourceSCName) + } else { + scFailures = append(scFailures, ValidationFailure{Resource: "sc/" + sourceSCName, Message: "Resource not found"}) + } } if !destScFound { scFailures = append(scFailures, ValidationFailure{Resource: "sc/" + destSCName, Message: "Resource not found"}) diff --git a/pkg/preflight/validate_test.go b/pkg/preflight/validate_test.go index 4ed07da..179768a 100644 --- a/pkg/preflight/validate_test.go +++ b/pkg/preflight/validate_test.go @@ -871,12 +871,13 @@ func Test_pvcsForStorageClass(t *testing.T) { func Test_validateStorageClasses(t *testing.T) { for _, tt := range []struct { - name string - resources []runtime.Object - sourceSC string - destSC string - wantErr bool - expected []ValidationFailure + name string + resources []runtime.Object + sourceSC string + destSC string + wantErr bool + expected []ValidationFailure + skipSourceSCValidation bool }{ { name: "When both StorageClasses exist and are distinct expect no failures", @@ -931,18 +932,31 @@ func Test_validateStorageClasses(t *testing.T) { }, }, }, + { + name: "When source storage class does not exist and skip storage class validation is enabled expect no failure", + sourceSC: "sourcesc", + destSC: "destsc", + skipSourceSCValidation: true, + resources: []runtime.Object{ + &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "destsc", + }, + }, + }, + }, } { t.Run(tt.name, func(t *testing.T) { req := require.New(t) clientset := fake.NewSimpleClientset(tt.resources...) logger := log.New(io.Discard, "", 0) - result, err := validateStorageClasses(context.Background(), logger, clientset, tt.sourceSC, tt.destSC) + result, err := validateStorageClasses(context.Background(), logger, clientset, tt.sourceSC, tt.destSC, tt.skipSourceSCValidation) if !tt.wantErr { req.NoError(err) } else { req.Error(err) } - req.Equal(result, tt.expected) + req.Equal(tt.expected, result) }) } } From 92cb2b7d238d13721118f4c42b7423ee5205b7db Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Tue, 13 Dec 2022 22:20:57 -0600 Subject: [PATCH 38/41] Simplify deleteTmpPVC() --- cmd/main.go | 1 + pkg/migrate/migrate.go | 1 + pkg/preflight/validate.go | 58 ++++---- pkg/preflight/validate_test.go | 244 +++++++++++++++++++++++++++++++-- 4 files changed, 263 insertions(+), 41 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 5e32064..3a096c6 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -36,6 +36,7 @@ func main() { flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") flag.DurationVar(&options.PodReadyTimeout, "pod-ready-timeout", 60*time.Second, "length of time to wait (in seconds) for validation pod(s) to go into Ready phase") + flag.DurationVar(&options.DeletePVTimeout, "delete-pv-timeout", 300*time.Second, "length of time to wait (in seconds) for backing PV to be removed when temporary PVC is purged") flag.BoolVar(&skipPreflightValidation, "skip-preflight-validation", false, "skip preflight migration validation on the destination storage provider") flag.BoolVar(&preflightValidationOnly, "preflight-validation-only", false, "skip the migration and run preflight validation only") diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go index 662f0e6..9cdf662 100644 --- a/pkg/migrate/migrate.go +++ b/pkg/migrate/migrate.go @@ -48,6 +48,7 @@ type Options struct { VerboseCopy bool SkipSourceValidation bool PodReadyTimeout time.Duration + DeletePVTimeout time.Duration } // Migrate moves data and PVCs from one StorageClass to another diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go index 015d3af..c5ca5f7 100644 --- a/pkg/preflight/validate.go +++ b/pkg/preflight/validate.go @@ -57,7 +57,7 @@ func Validate(ctx context.Context, w *log.Logger, clientset k8sclient.Interface, if err != nil { return nil, fmt.Errorf("failed to get PVCs for storage %s: %w", options.SourceSCName, err) } - pvcAccesModeFailures, err := validateVolumeAccessModes(ctx, w, clientset, options.DestSCName, options.RsyncImage, options.PodReadyTimeout, pvcs) + pvcAccesModeFailures, err := validateVolumeAccessModes(ctx, w, clientset, options.DestSCName, options.RsyncImage, options.PodReadyTimeout, options.DeletePVTimeout, pvcs) if err != nil { return nil, fmt.Errorf("failed to validate PVC access modes: %w", err) } @@ -89,7 +89,7 @@ func toValidationFailures(pvcFailures map[string]map[string]pvcFailure) []Valida // validateVolumeAccessModes checks whether the provided persistent volumes support the access modes // of the destination storage class. // returns a map of pvc validation failures indexed by namespace -func validateVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient.Interface, dstSC string, tmpPodImage string, podReadyTimeout time.Duration, pvcs map[string]corev1.PersistentVolumeClaim) (map[string]map[string]pvcFailure, error) { +func validateVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient.Interface, dstSC string, tmpPodImage string, podReadyTimeout, deletePVTimeout time.Duration, pvcs map[string]corev1.PersistentVolumeClaim) (map[string]map[string]pvcFailure, error) { volAccessModeFailures := make(map[string]map[string]pvcFailure) if _, err := client.StorageV1().StorageClasses().Get(ctx, dstSC, metav1.GetOptions{}); err != nil { @@ -97,7 +97,7 @@ func validateVolumeAccessModes(ctx context.Context, l *log.Logger, client k8scli } for _, pvc := range pvcs { - v, err := checkVolumeAccessModes(ctx, l, client, dstSC, pvc, podReadyTimeout, tmpPodImage) + v, err := checkVolumeAccessModes(ctx, l, client, dstSC, pvc, podReadyTimeout, deletePVTimeout, tmpPodImage) if err != nil { l.Printf("failed to check volume access mode for PVC %s: %s", pvc.Name, err) continue @@ -205,7 +205,7 @@ func buildTmpPVC(pvc corev1.PersistentVolumeClaim, sc string) *corev1.Persistent // checkVolumeAccessModes checks if the access modes of a pv are supported by the // destination storage class. -func checkVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient.Interface, dstSC string, pvc corev1.PersistentVolumeClaim, timeout time.Duration, tmpPodImage string) (*pvcFailure, error) { +func checkVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient.Interface, dstSC string, pvc corev1.PersistentVolumeClaim, timeout, deletePVTimeout time.Duration, tmpPodImage string) (*pvcFailure, error) { var err error // create temp pvc for storage class @@ -238,7 +238,7 @@ func checkVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient l.Printf("failed to cleanup pv consumer pod %s: %s", pvcConsumerPod.Name, err) } - if err = deleteTmpPVC(l, client, tmpPVC); err != nil { + if err = deleteTmpPVC(l, client, tmpPVC, deletePVTimeout); err != nil { l.Printf("failed to cleanup tmp claim: %s", err) } }() @@ -280,7 +280,7 @@ func checkVolumeAccessModes(ctx context.Context, l *log.Logger, client k8sclient // backing pv dissapear as well (this is mandatory so we don't leave any orphan pv as this would // cause pvmigrate to fail). this function has a timeout of 5 minutes, after that an error is // returned. -func deleteTmpPVC(l *log.Logger, client k8sclient.Interface, pvc *corev1.PersistentVolumeClaim) error { +func deleteTmpPVC(l *log.Logger, client k8sclient.Interface, pvc *corev1.PersistentVolumeClaim, deleteTimeout time.Duration) error { // Cleanup should use background context so as not to fail if context has already been canceled ctx := context.Background() @@ -291,13 +291,12 @@ func deleteTmpPVC(l *log.Logger, client k8sclient.Interface, pvc *corev1.Persist pvsByPVCName := map[string]corev1.PersistentVolume{} for _, pv := range pvs.Items { - if pv.Spec.ClaimRef == nil { + if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.Namespace != pvc.Namespace { continue } pvsByPVCName[pv.Spec.ClaimRef.Name] = pv } - var waitFor []string propagation := metav1.DeletePropagationForeground delopts := metav1.DeleteOptions{PropagationPolicy: &propagation} if err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete( @@ -308,34 +307,31 @@ func deleteTmpPVC(l *log.Logger, client k8sclient.Interface, pvc *corev1.Persist return err } } - waitFor = append(waitFor, pvc.Name) - timeout := time.After(5 * time.Minute) + pv, ok := pvsByPVCName[pvc.Name] + if !ok { + l.Printf("failed to find pv for temp pvc %s", pvc) + return nil + } + + timeout := time.After(deleteTimeout) interval := time.NewTicker(5 * time.Second) defer interval.Stop() - for _, pvc := range waitFor { - pv, ok := pvsByPVCName[pvc] - if !ok { - l.Printf("failed to find pv for temp pvc %s", pvc) - continue + for { + // break the loop as soon as we can't find the pv anymore. + if _, err := client.CoreV1().PersistentVolumes().Get( + ctx, pv.Name, metav1.GetOptions{}, + ); err != nil && !k8serrors.IsNotFound(err) { + l.Printf("failed to get pv for temp pvc %s: %s", pvc, err) + } else if err != nil && k8serrors.IsNotFound(err) { + break } - for { - // break the loop as soon as we can't find the pv anymore. - if _, err := client.CoreV1().PersistentVolumes().Get( - ctx, pv.Name, metav1.GetOptions{}, - ); err != nil && !k8serrors.IsNotFound(err) { - l.Printf("failed to get pv for temp pvc %s: %s", pvc, err) - } else if err != nil && k8serrors.IsNotFound(err) { - break - } - - select { - case <-interval.C: - continue - case <-timeout: - return fmt.Errorf("failed to delete pvs: timeout") - } + select { + case <-interval.C: + continue + case <-timeout: + return fmt.Errorf("failed to delete pvs: timeout") } } return nil diff --git a/pkg/preflight/validate_test.go b/pkg/preflight/validate_test.go index 179768a..4fc2be2 100644 --- a/pkg/preflight/validate_test.go +++ b/pkg/preflight/validate_test.go @@ -4,7 +4,6 @@ import ( "context" "io" "log" - "os" "testing" "time" @@ -24,13 +23,15 @@ func Test_validateVolumeAccessModes(t *testing.T) { name string dstSC string podReadyTimeout time.Duration + deletePVTimeout time.Duration wantErr bool resources []runtime.Object input map[string]corev1.PersistentVolumeClaim expected map[string]map[string]pvcFailure }{ { - name: "With compatible access modes, expect no validation failures", + name: "With compatible access modes, expect no validation failures", + deletePVTimeout: time.Second, input: map[string]corev1.PersistentVolumeClaim{ "pvc0": { ObjectMeta: metav1.ObjectMeta{ @@ -95,9 +96,10 @@ func Test_validateVolumeAccessModes(t *testing.T) { }, }, { - name: "When destination storage class is not found, expect error", - wantErr: true, - dstSC: "dstSc", + name: "When destination storage class is not found, expect error", + deletePVTimeout: time.Second, + wantErr: true, + dstSC: "dstSc", resources: []runtime.Object{ &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ @@ -111,7 +113,7 @@ func Test_validateVolumeAccessModes(t *testing.T) { req := require.New(t) kcli := fake.NewSimpleClientset(tt.resources...) logger := log.New(io.Discard, "", 0) - result, err := validateVolumeAccessModes(context.Background(), logger, kcli, tt.dstSC, "eeacms/rsync:2.3", tt.podReadyTimeout, tt.input) + result, err := validateVolumeAccessModes(context.Background(), logger, kcli, tt.dstSC, "eeacms/rsync:2.3", tt.podReadyTimeout, tt.deletePVTimeout, tt.input) if err != nil { if tt.wantErr { req.Error(err) @@ -318,7 +320,8 @@ func Test_checkVolumeAccessModes(t *testing.T) { backgroundFunc func(context.Context, *log.Logger, k8sclient.Interface, string, string, string) }{ { - name: "When the PVC access mode is not supported by destination storage provider expect PVC failure", + name: "When the PVC access mode is not supported by destination storage provider expect PVC failure", + deletePVTimeout: time.Second, input: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "testpvc", @@ -429,12 +432,11 @@ func Test_checkVolumeAccessModes(t *testing.T) { testCtx, cancelfunc := context.WithTimeout(context.Background(), time.Minute) // if your test takes more than 1m, there are issues defer cancelfunc() kcli := fake.NewSimpleClientset(tt.resources...) - // logger := log.New(io.Discard, "", 0) - logger := log.New(os.Stdout, "", 0) + logger := log.New(io.Discard, "", 0) if tt.backgroundFunc != nil { go tt.backgroundFunc(testCtx, logger, kcli, tt.tmpPodName, "default", "pv-for-pf-pvc-testpvc") } - result, err := checkVolumeAccessModes(context.Background(), logger, kcli, tt.dstStorageClass, *tt.input, tt.podTimeout, "eeacms/rsync:2.3") + result, err := checkVolumeAccessModes(context.Background(), logger, kcli, tt.dstStorageClass, *tt.input, tt.podTimeout, tt.deletePVTimeout, "eeacms/rsync:2.3") if err != nil { if tt.wantErr { req.Error(err) @@ -960,3 +962,225 @@ func Test_validateStorageClasses(t *testing.T) { }) } } + +func Test_deleteTmpPVCs(t *testing.T) { + for _, tt := range []struct { + name string + resources []runtime.Object + timeout time.Duration + pvc *corev1.PersistentVolumeClaim + wantErr bool + backgroundFunc func(*testing.T, k8sclient.Interface) + }{ + { + name: "When deleting non existing pvc expect success", + timeout: time.Second, + pvc: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "i-do-not-exist", + Namespace: "default", + }, + }, + }, + { + name: "When a pv has nil claim ref expect success", + timeout: time.Second, + resources: []runtime.Object{ + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc", + Namespace: "default", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv", + }, + }, + }, + pvc: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + }, + }, + }, + { + name: "When a pv has a claim ref to a different pvc expect success", + timeout: time.Second, + resources: []runtime.Object{ + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc", + Namespace: "default", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv", + }, + Spec: corev1.PersistentVolumeSpec{ + ClaimRef: &corev1.ObjectReference{ + Name: "abc", + Namespace: "default", + }, + }, + }, + }, + pvc: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pvc", + Namespace: "default", + }, + }, + }, + { + name: "When a pv takes while to be purged expect suceess", + timeout: 20 * time.Second, + resources: []runtime.Object{ + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv", + }, + Spec: corev1.PersistentVolumeSpec{ + ClaimRef: &corev1.ObjectReference{ + Name: "pvc", + Namespace: "default", + }, + }, + }, + }, + pvc: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + }, + }, + backgroundFunc: func(t *testing.T, kcli k8sclient.Interface) { + time.Sleep(6 * time.Second) + if err := kcli.CoreV1().PersistentVolumes().Delete( + context.Background(), "pv", metav1.DeleteOptions{}, + ); err != nil { + t.Errorf("failed to delete test pv: %s", err) + } + }, + }, + { + name: "When a pv is not purged expect error (timeout)", + timeout: 10 * time.Second, + wantErr: true, + resources: []runtime.Object{ + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv", + }, + Spec: corev1.PersistentVolumeSpec{ + ClaimRef: &corev1.ObjectReference{ + Name: "pvc", + Namespace: "default", + }, + }, + }, + }, + pvc: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + }, + }, + }, + { + name: "When pv has a claim ref to a pvc from a different namespaces expect success", + timeout: 20 * time.Second, + resources: []runtime.Object{ + &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv", + }, + Spec: corev1.PersistentVolumeSpec{ + ClaimRef: &corev1.ObjectReference{ + Name: "pvc", + Namespace: "default", + }, + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "another-pv", + }, + Spec: corev1.PersistentVolumeSpec{ + ClaimRef: &corev1.ObjectReference{ + Name: "pvc", + Namespace: "different-namespace", + }, + }, + }, + &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "yet-another-pv", + }, + Spec: corev1.PersistentVolumeSpec{ + ClaimRef: &corev1.ObjectReference{ + Name: "pvc", + Namespace: "yet-another-different-namespace", + }, + }, + }, + }, + pvc: &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + Namespace: "default", + }, + }, + backgroundFunc: func(t *testing.T, kcli k8sclient.Interface) { + time.Sleep(6 * time.Second) + if err := kcli.CoreV1().PersistentVolumes().Delete( + context.Background(), "pv", metav1.DeleteOptions{}, + ); err != nil { + t.Errorf("failed to delete test pv: %s", err) + } + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + req := require.New(t) + clientset := fake.NewSimpleClientset(tt.resources...) + logger := log.New(io.Discard, "", 0) + + if tt.backgroundFunc != nil { + go tt.backgroundFunc(t, clientset) + } + + err := deleteTmpPVC(logger, clientset, tt.pvc, tt.timeout) + if err != nil { + if tt.wantErr { + req.Error(err) + return + } + } + + if tt.wantErr { + req.Fail("Expecting error but received nil instead") + } + }) + } +} From 726a16026c1ecff7c3c4c20258df129afadbec13 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Wed, 14 Dec 2022 08:45:40 -0600 Subject: [PATCH 39/41] Update README with --delete-pv-timeout flag --- README.md | 1 + cmd/main.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ab9d0fb..5963608 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,7 @@ pvmigrate --source-sc "source" --dest-sc "destination" --preflight-validation-on | --preflight-validation-only | Bool | | false | skip the migration and run preflight validation only | | --skip-preflight-validation | Bool | | false | skip preflight migration validation on the destination storage provider | | --pod-ready-timeout | time.Duration | | 60 seconds | length of time to wait (in seconds) for validation pod(s) to go into Ready phase | +| --delete-pv-timeout | time.Duration | | 5 minutes | length of time to wait (in seconds) for backing PV to be removed when the temporary PVC is deleted | ## Process diff --git a/cmd/main.go b/cmd/main.go index 3a096c6..45fb9a3 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -36,7 +36,7 @@ func main() { flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") flag.DurationVar(&options.PodReadyTimeout, "pod-ready-timeout", 60*time.Second, "length of time to wait (in seconds) for validation pod(s) to go into Ready phase") - flag.DurationVar(&options.DeletePVTimeout, "delete-pv-timeout", 300*time.Second, "length of time to wait (in seconds) for backing PV to be removed when temporary PVC is purged") + flag.DurationVar(&options.DeletePVTimeout, "delete-pv-timeout", 300*time.Second, "length of time to wait (in seconds) for backing PV to be removed when temporary PVC is deleted") flag.BoolVar(&skipPreflightValidation, "skip-preflight-validation", false, "skip preflight migration validation on the destination storage provider") flag.BoolVar(&preflightValidationOnly, "preflight-validation-only", false, "skip the migration and run preflight validation only") From 54ba092c511881b4caf7010c63e64baded22f670 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Wed, 14 Dec 2022 14:57:34 -0600 Subject: [PATCH 40/41] fix timeout flags --- cmd/main.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 45fb9a3..6e6cfc1 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -28,6 +28,8 @@ func main() { var options migrate.Options var skipPreflightValidation bool var preflightValidationOnly bool + var podReadyTimeout int + var deletePVTimeout int flag.StringVar(&options.SourceSCName, "source-sc", "", "storage provider name to migrate from") flag.StringVar(&options.DestSCName, "dest-sc", "", "storage provider name to migrate to") flag.StringVar(&options.RsyncImage, "rsync-image", "eeacms/rsync:2.3", "the image to use to copy PVCs - must have 'rsync' on the path") @@ -35,13 +37,17 @@ func main() { flag.BoolVar(&options.SetDefaults, "set-defaults", false, "change default storage class from source to dest") flag.BoolVar(&options.VerboseCopy, "verbose-copy", false, "show output from the rsync command used to copy data between PVCs") flag.BoolVar(&options.SkipSourceValidation, "skip-source-validation", false, "migrate from PVCs using a particular StorageClass name, even if that StorageClass does not exist") - flag.DurationVar(&options.PodReadyTimeout, "pod-ready-timeout", 60*time.Second, "length of time to wait (in seconds) for validation pod(s) to go into Ready phase") - flag.DurationVar(&options.DeletePVTimeout, "delete-pv-timeout", 300*time.Second, "length of time to wait (in seconds) for backing PV to be removed when temporary PVC is deleted") + flag.IntVar(&podReadyTimeout, "pod-ready-timeout", 60, "length of time to wait (in seconds) for validation pod(s) to go into Ready phase") + flag.IntVar(&deletePVTimeout, "delete-pv-timeout", 300, "length of time to wait (in seconds) for backing PV to be removed when temporary PVC is deleted") flag.BoolVar(&skipPreflightValidation, "skip-preflight-validation", false, "skip preflight migration validation on the destination storage provider") flag.BoolVar(&preflightValidationOnly, "preflight-validation-only", false, "skip the migration and run preflight validation only") flag.Parse() + // update options with flag values + options.PodReadyTimeout = time.Duration(podReadyTimeout) * time.Second + options.DeletePVTimeout = time.Duration(deletePVTimeout) * time.Second + // setup logger logger := log.New(os.Stderr, "", 0) // this has no time prefix etc From df71c226fd663caf56024a05170ad5c06a63f105 Mon Sep 17 00:00:00 2001 From: Rafael Polanco <6497491+rrpolanco@users.noreply.github.com> Date: Wed, 14 Dec 2022 15:04:12 -0600 Subject: [PATCH 41/41] log name of the tempt pvc and not the struct --- pkg/preflight/validate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/preflight/validate.go b/pkg/preflight/validate.go index c5ca5f7..cacd12b 100644 --- a/pkg/preflight/validate.go +++ b/pkg/preflight/validate.go @@ -310,7 +310,7 @@ func deleteTmpPVC(l *log.Logger, client k8sclient.Interface, pvc *corev1.Persist pv, ok := pvsByPVCName[pvc.Name] if !ok { - l.Printf("failed to find pv for temp pvc %s", pvc) + l.Printf("failed to find pv for temp pvc %s", pvc.Name) return nil }