Skip to content

Commit cc13a7d

Browse files
authored
chore: enable gocritic (argoproj#680)
Signed-off-by: Matthieu MOREL <[email protected]>
1 parent ad846ac commit cc13a7d

File tree

9 files changed

+52
-58
lines changed

9 files changed

+52
-58
lines changed

.golangci.yaml

-7
Original file line numberDiff line numberDiff line change
@@ -34,15 +34,8 @@ linters-settings:
3434
disabled-checks:
3535
- appendAssign
3636
- assignOp # Keep it disabled for readability
37-
- commentFormatting #FIXME
3837
- exitAfterDefer
39-
- elseif #FIXME
40-
- ifElseChain #FIXME
41-
- mapKey
42-
- singleCaseSwitch #FIXME
4338
- typeSwitchVar
44-
- unlambda #FIXME
45-
- wrapperFunc #FIXME
4639
goimports:
4740
local-prefixes: github.com/argoproj/gitops-engine
4841
importas:

pkg/cache/cluster_test.go

+5-4
Original file line numberDiff line numberDiff line change
@@ -847,11 +847,12 @@ func ExampleNewClusterCache_resourceUpdatedEvents() {
847847
panic(err)
848848
}
849849
unsubscribe := clusterCache.OnResourceUpdated(func(newRes *Resource, oldRes *Resource, _ map[kube.ResourceKey]*Resource) {
850-
if newRes == nil {
850+
switch {
851+
case newRes == nil:
851852
fmt.Printf("%s deleted\n", oldRes.Ref.String())
852-
} else if oldRes == nil {
853+
case oldRes == nil:
853854
fmt.Printf("%s created\n", newRes.Ref.String())
854-
} else {
855+
default:
855856
fmt.Printf("%s updated\n", newRes.Ref.String())
856857
}
857858
})
@@ -1378,7 +1379,7 @@ func BenchmarkIterateHierarchyV2(b *testing.B) {
13781379
}
13791380
}
13801381

1381-
//func BenchmarkIterateHierarchy(b *testing.B) {
1382+
// func BenchmarkIterateHierarchy(b *testing.B) {
13821383
// cluster := newCluster(b)
13831384
// for _, resource := range testResources {
13841385
// cluster.setNode(resource)

pkg/cache/resource.go

+2-4
Original file line numberDiff line numberDiff line change
@@ -92,10 +92,8 @@ func (r *Resource) iterateChildren(ns map[kube.ResourceKey]*Resource, parents ma
9292
if parents[childKey] {
9393
key := r.ResourceKey()
9494
_ = action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns)
95-
} else {
96-
if action(nil, child, ns) {
97-
child.iterateChildren(ns, newResourceKeySet(parents, r.ResourceKey()), action)
98-
}
95+
} else if action(nil, child, ns) {
96+
child.iterateChildren(ns, newResourceKeySet(parents, r.ResourceKey()), action)
9997
}
10098
}
10199
}

pkg/diff/diff.go

+10-11
Original file line numberDiff line numberDiff line change
@@ -116,15 +116,13 @@ func Diff(config, live *unstructured.Unstructured, opts ...Option) (*DiffResult,
116116
orig, err := GetLastAppliedConfigAnnotation(live)
117117
if err != nil {
118118
o.log.V(1).Info(fmt.Sprintf("Failed to get last applied configuration: %v", err))
119-
} else {
120-
if orig != nil && config != nil {
121-
Normalize(orig, opts...)
122-
dr, err := ThreeWayDiff(orig, config, live)
123-
if err == nil {
124-
return dr, nil
125-
}
126-
o.log.V(1).Info(fmt.Sprintf("three-way diff calculation failed: %v. Falling back to two-way diff", err))
119+
} else if orig != nil && config != nil {
120+
Normalize(orig, opts...)
121+
dr, err := ThreeWayDiff(orig, config, live)
122+
if err == nil {
123+
return dr, nil
127124
}
125+
o.log.V(1).Info(fmt.Sprintf("three-way diff calculation failed: %v. Falling back to two-way diff", err))
128126
}
129127
return TwoWayDiff(config, live)
130128
}
@@ -810,11 +808,12 @@ func Normalize(un *unstructured.Unstructured, opts ...Option) {
810808
unstructured.RemoveNestedField(un.Object, "metadata", "creationTimestamp")
811809

812810
gvk := un.GroupVersionKind()
813-
if gvk.Group == "" && gvk.Kind == "Secret" {
811+
switch {
812+
case gvk.Group == "" && gvk.Kind == "Secret":
814813
NormalizeSecret(un, opts...)
815-
} else if gvk.Group == "rbac.authorization.k8s.io" && (gvk.Kind == "ClusterRole" || gvk.Kind == "Role") {
814+
case gvk.Group == "rbac.authorization.k8s.io" && (gvk.Kind == "ClusterRole" || gvk.Kind == "Role"):
816815
normalizeRole(un, o)
817-
} else if gvk.Group == "" && gvk.Kind == "Endpoints" {
816+
case gvk.Group == "" && gvk.Kind == "Endpoints":
818817
normalizeEndpoint(un, o)
819818
}
820819

pkg/health/health_deployment.go

+5-4
Original file line numberDiff line numberDiff line change
@@ -35,22 +35,23 @@ func getAppsv1DeploymentHealth(deployment *appsv1.Deployment) (*HealthStatus, er
3535
// Borrowed at kubernetes/kubectl/rollout_status.go https://github.com/kubernetes/kubernetes/blob/5232ad4a00ec93942d0b2c6359ee6cd1201b46bc/pkg/kubectl/rollout_status.go#L80
3636
if deployment.Generation <= deployment.Status.ObservedGeneration {
3737
cond := getAppsv1DeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
38-
if cond != nil && cond.Reason == "ProgressDeadlineExceeded" {
38+
switch {
39+
case cond != nil && cond.Reason == "ProgressDeadlineExceeded":
3940
return &HealthStatus{
4041
Status: HealthStatusDegraded,
4142
Message: fmt.Sprintf("Deployment %q exceeded its progress deadline", deployment.Name),
4243
}, nil
43-
} else if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
44+
case deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas:
4445
return &HealthStatus{
4546
Status: HealthStatusProgressing,
4647
Message: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...", deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas),
4748
}, nil
48-
} else if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
49+
case deployment.Status.Replicas > deployment.Status.UpdatedReplicas:
4950
return &HealthStatus{
5051
Status: HealthStatusProgressing,
5152
Message: fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...", deployment.Status.Replicas-deployment.Status.UpdatedReplicas),
5253
}, nil
53-
} else if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
54+
case deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas:
5455
return &HealthStatus{
5556
Status: HealthStatusProgressing,
5657
Message: fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available...", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas),

pkg/health/health_job.go

+9-7
Original file line numberDiff line numberDiff line change
@@ -50,24 +50,26 @@ func getBatchv1JobHealth(job *batchv1.Job) (*HealthStatus, error) {
5050
}
5151
}
5252
}
53-
if !complete {
53+
switch {
54+
case !complete:
5455
return &HealthStatus{
5556
Status: HealthStatusProgressing,
5657
Message: message,
5758
}, nil
58-
} else if failed {
59+
case failed:
5960
return &HealthStatus{
6061
Status: HealthStatusDegraded,
6162
Message: failMsg,
6263
}, nil
63-
} else if isSuspended {
64+
case isSuspended:
6465
return &HealthStatus{
6566
Status: HealthStatusSuspended,
6667
Message: failMsg,
6768
}, nil
69+
default:
70+
return &HealthStatus{
71+
Status: HealthStatusHealthy,
72+
Message: message,
73+
}, nil
6874
}
69-
return &HealthStatus{
70-
Status: HealthStatusHealthy,
71-
Message: message,
72-
}, nil
7375
}

pkg/sync/sync_context.go

+8-11
Original file line numberDiff line numberDiff line change
@@ -718,9 +718,7 @@ func (sc *syncContext) getSyncTasks() (_ syncTasks, successful bool) {
718718
task.liveObj = sc.liveObj(task.targetObj)
719719
}
720720

721-
isRetryable := func(err error) bool {
722-
return apierrors.IsUnauthorized(err)
723-
}
721+
isRetryable := apierrors.IsUnauthorized
724722

725723
serverResCache := make(map[schema.GroupVersionKind]*metav1.APIResource)
726724

@@ -849,20 +847,19 @@ func (sc *syncContext) autoCreateNamespace(tasks syncTasks) syncTasks {
849847
managedNs, err := kubeutil.ToUnstructured(nsSpec)
850848
if err == nil {
851849
liveObj, err := sc.kubectl.GetResource(context.TODO(), sc.config, managedNs.GroupVersionKind(), managedNs.GetName(), metav1.NamespaceNone)
852-
if err == nil {
850+
switch {
851+
case err == nil:
853852
nsTask := &syncTask{phase: common.SyncPhasePreSync, targetObj: managedNs, liveObj: liveObj}
854853
_, ok := sc.syncRes[nsTask.resultKey()]
855854
if ok {
856855
tasks = sc.appendNsTask(tasks, nsTask, managedNs, liveObj)
857-
} else {
858-
if liveObj != nil {
859-
sc.log.WithValues("namespace", sc.namespace).Info("Namespace already exists")
860-
tasks = sc.appendNsTask(tasks, &syncTask{phase: common.SyncPhasePreSync, targetObj: managedNs, liveObj: liveObj}, managedNs, liveObj)
861-
}
856+
} else if liveObj != nil {
857+
sc.log.WithValues("namespace", sc.namespace).Info("Namespace already exists")
858+
tasks = sc.appendNsTask(tasks, &syncTask{phase: common.SyncPhasePreSync, targetObj: managedNs, liveObj: liveObj}, managedNs, liveObj)
862859
}
863-
} else if apierrors.IsNotFound(err) {
860+
case apierrors.IsNotFound(err):
864861
tasks = sc.appendNsTask(tasks, &syncTask{phase: common.SyncPhasePreSync, targetObj: managedNs, liveObj: nil}, managedNs, nil)
865-
} else {
862+
default:
866863
tasks = sc.appendFailedNsTask(tasks, managedNs, fmt.Errorf("Namespace auto creation failed: %w", err))
867864
}
868865
} else {

pkg/sync/sync_context_test.go

+12-9
Original file line numberDiff line numberDiff line change
@@ -131,12 +131,13 @@ func TestSyncCreateInSortedOrder(t *testing.T) {
131131
assert.Len(t, resources, 2)
132132
for i := range resources {
133133
result := resources[i]
134-
if result.ResourceKey.Kind == "Pod" {
134+
switch result.ResourceKey.Kind {
135+
case "Pod":
135136
assert.Equal(t, synccommon.ResultCodeSynced, result.Status)
136137
assert.Equal(t, "", result.Message)
137-
} else if result.ResourceKey.Kind == "Service" {
138+
case "Service":
138139
assert.Equal(t, "", result.Message)
139-
} else {
140+
default:
140141
t.Error("Resource isn't a pod or a service")
141142
}
142143
}
@@ -255,13 +256,14 @@ func TestSyncSuccessfully(t *testing.T) {
255256
assert.Len(t, resources, 2)
256257
for i := range resources {
257258
result := resources[i]
258-
if result.ResourceKey.Kind == "Pod" {
259+
switch result.ResourceKey.Kind {
260+
case "Pod":
259261
assert.Equal(t, synccommon.ResultCodePruned, result.Status)
260262
assert.Equal(t, "pruned", result.Message)
261-
} else if result.ResourceKey.Kind == "Service" {
263+
case "Service":
262264
assert.Equal(t, synccommon.ResultCodeSynced, result.Status)
263265
assert.Equal(t, "", result.Message)
264-
} else {
266+
default:
265267
t.Error("Resource isn't a pod or a service")
266268
}
267269
}
@@ -284,13 +286,14 @@ func TestSyncDeleteSuccessfully(t *testing.T) {
284286
assert.Equal(t, synccommon.OperationSucceeded, phase)
285287
for i := range resources {
286288
result := resources[i]
287-
if result.ResourceKey.Kind == "Pod" {
289+
switch result.ResourceKey.Kind {
290+
case "Pod":
288291
assert.Equal(t, synccommon.ResultCodePruned, result.Status)
289292
assert.Equal(t, "pruned", result.Message)
290-
} else if result.ResourceKey.Kind == "Service" {
293+
case "Service":
291294
assert.Equal(t, synccommon.ResultCodePruned, result.Status)
292295
assert.Equal(t, "pruned", result.Message)
293-
} else {
296+
default:
294297
t.Error("Resource isn't a pod or a service")
295298
}
296299
}

pkg/utils/kube/kube.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@ func cleanKubectlOutput(s string) string {
215215
s = kubectlErrOutRegexp.ReplaceAllString(s, "")
216216
s = kubectlErrOutMapRegexp.ReplaceAllString(s, "")
217217
s = kubectlApplyPatchErrOutRegexp.ReplaceAllString(s, "")
218-
s = strings.Replace(s, "; if you choose to ignore these errors, turn validation off with --validate=false", "", -1)
218+
s = strings.ReplaceAll(s, "; if you choose to ignore these errors, turn validation off with --validate=false", "")
219219
return s
220220
}
221221

0 commit comments

Comments
 (0)