From cadc46d8b57117da5309330e19d2e12840adfcc2 Mon Sep 17 00:00:00 2001 From: Samsondeen Dare Date: Wed, 5 Feb 2025 14:56:13 +0100 Subject: [PATCH 1/6] consolidate runner.run --- internal/backend/local/test.go | 121 +++++++++++----------- internal/moduletest/graph/eval_context.go | 5 - 2 files changed, 58 insertions(+), 68 deletions(-) diff --git a/internal/backend/local/test.go b/internal/backend/local/test.go index ade753af50d6..da8ebdfeda58 100644 --- a/internal/backend/local/test.go +++ b/internal/backend/local/test.go @@ -346,7 +346,7 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics defer sem.Release() switch v := v.(type) { - case *graph.NodeTestRun: + case *graph.NodeTestRun: // NodeTestRun is also executable, so it has to be first. file := v.File() run := v.Run() if file.GetStatus() == moduletest.Error { @@ -374,7 +374,11 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics if diags.HasErrors() { return diags } - // continue the execution of the test run. + + startTime := time.Now().UTC() + runner.run(run, file, startTime) + runner.Suite.View.Run(run, file, moduletest.Complete, 0) + file.UpdateStatus(run.Status) case graph.GraphNodeExecutable: diags = v.Execute(runner.EvalContext) return diags @@ -382,75 +386,55 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics // If the vertex isn't a test run or executable, we'll just skip it. return } + return + } - // We already know that the vertex is a test run - runNode := v.(*graph.NodeTestRun) - - file := runNode.File() - run := runNode.Run() - - key := run.GetStateKey() - if run.Config.ConfigUnderTest != nil { - if key == moduletest.MainStateIdentifier { - // This is bad. It means somehow the module we're loading has - // the same key as main state and we're about to corrupt things. - - run.Diagnostics = run.Diagnostics.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid module source", - Detail: fmt.Sprintf("The source for the selected module evaluated to %s which should not be possible. This is a bug in Terraform - please report it!", key), - Subject: run.Config.Module.DeclRange.Ptr(), - }) - - run.Status = moduletest.Error - file.UpdateStatus(moduletest.Error) - return - } - } - - startTime := time.Now().UTC() - state, updatedState := runner.run(run, file, runner.EvalContext.GetFileState(key).State) - runDuration := time.Since(startTime) - if updatedState { - // Only update the most recent run and state if the state was - // actually updated by this change. We want to use the run that - // most recently updated the tracked state as the cleanup - // configuration. - runner.EvalContext.SetFileState(key, &graph.TestFileState{ - Run: run, - State: state, - }) - } + return g.AcyclicGraph.Walk(walkFn) +} +func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, startTime time.Time) { + log.Printf("[TRACE] TestFileRunner: executing run block %s/%s", file.Name, run.Name) + defer func() { // If we got far enough to actually execute the run then we'll give // the view some additional metadata about the execution. run.ExecutionMeta = &moduletest.RunExecutionMeta{ Start: startTime, - Duration: runDuration, + Duration: time.Since(startTime), } - runner.Suite.View.Run(run, file, moduletest.Complete, 0) - file.UpdateStatus(run.Status) - return - } + }() - return g.AcyclicGraph.Walk(walkFn) -} + key := run.GetStateKey() + if run.Config.ConfigUnderTest != nil { + if key == moduletest.MainStateIdentifier { + // This is bad. It means somehow the module we're loading has + // the same key as main state and we're about to corrupt things. -func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, state *states.State) (*states.State, bool) { - log.Printf("[TRACE] TestFileRunner: executing run block %s/%s", file.Name, run.Name) + run.Diagnostics = run.Diagnostics.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module source", + Detail: fmt.Sprintf("The source for the selected module evaluated to %s which should not be possible. This is a bug in Terraform - please report it!", key), + Subject: run.Config.Module.DeclRange.Ptr(), + }) + + run.Status = moduletest.Error + file.UpdateStatus(moduletest.Error) + return + } + } + state := runner.EvalContext.GetFileState(key).State config := run.ModuleConfig if runner.Suite.Cancelled { // Don't do anything, just give up and return immediately. // The surrounding functions should stop this even being called, but in // case of race conditions or something we can still verify this. - return state, false + return } if runner.Suite.Stopped { // Basically the same as above, except we'll be a bit nicer. run.Status = moduletest.Skip - return state, false + return } start := time.Now().UTC().UnixMilli() @@ -459,35 +443,35 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st run.Diagnostics = run.Diagnostics.Append(run.Config.Validate(config)) if run.Diagnostics.HasErrors() { run.Status = moduletest.Error - return state, false + return } configDiags := graph.TransformConfigForTest(runner.EvalContext, run, file) run.Diagnostics = run.Diagnostics.Append(configDiags) if configDiags.HasErrors() { run.Status = moduletest.Error - return state, false + return } validateDiags := runner.validate(run, file, start) run.Diagnostics = run.Diagnostics.Append(validateDiags) if validateDiags.HasErrors() { run.Status = moduletest.Error - return state, false + return } references, referenceDiags := run.GetReferences() run.Diagnostics = run.Diagnostics.Append(referenceDiags) if referenceDiags.HasErrors() { run.Status = moduletest.Error - return state, false + return } variables, variableDiags := runner.GetVariables(run, references, true) run.Diagnostics = run.Diagnostics.Append(variableDiags) if variableDiags.HasErrors() { run.Status = moduletest.Error - return state, false + return } // FilterVariablesToModule only returns warnings, so we don't check the @@ -498,7 +482,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st tfCtx, ctxDiags := terraform.NewContext(runner.Suite.Opts) run.Diagnostics = run.Diagnostics.Append(ctxDiags) if ctxDiags.HasErrors() { - return state, false + return } planScope, plan, planDiags := runner.plan(tfCtx, config, state, run, file, setVariables, references, start) @@ -508,7 +492,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st run.Diagnostics = run.Diagnostics.Append(planDiags) if planDiags.HasErrors() { run.Status = moduletest.Error - return state, false + return } runner.AddVariablesToConfig(run, variables) @@ -549,8 +533,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st // Now we've successfully validated this run block, lets add it into // our prior run outputs so future run blocks can access it. runner.EvalContext.SetOutput(run, outputVals) - - return state, false + return } // Otherwise any error during the planning prevents our apply from @@ -559,7 +542,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st run.Diagnostics = run.Diagnostics.Append(planDiags) if planDiags.HasErrors() { run.Status = moduletest.Error - return state, false + return } // Since we're carrying on an executing the apply operation as well, we're @@ -586,7 +569,11 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st run.Status = moduletest.Error // Even though the apply operation failed, the graph may have done // partial updates and the returned state should reflect this. - return updated, true + runner.EvalContext.SetFileState(key, &graph.TestFileState{ + Run: run, + State: updated, + }) + return } runner.AddVariablesToConfig(run, variables) @@ -628,7 +615,15 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st // our prior run outputs so future run blocks can access it. runner.EvalContext.SetOutput(run, outputVals) - return updated, true + // Only update the most recent run and state if the state was + // actually updated by this change. We want to use the run that + // most recently updated the tracked state as the cleanup + // configuration. + runner.EvalContext.SetFileState(key, &graph.TestFileState{ + Run: run, + State: updated, + }) + return } func (runner *TestFileRunner) validate(run *moduletest.Run, file *moduletest.File, start int64) tfdiags.Diagnostics { diff --git a/internal/moduletest/graph/eval_context.go b/internal/moduletest/graph/eval_context.go index 377f63c65ecd..26dd8cf999af 100644 --- a/internal/moduletest/graph/eval_context.go +++ b/internal/moduletest/graph/eval_context.go @@ -320,11 +320,6 @@ func diagsForEphemeralResources(refs []*addrs.Reference) (diags tfdiags.Diagnost func (ec *EvalContext) SetFileState(key string, state *TestFileState) { ec.stateLock.Lock() defer ec.stateLock.Unlock() - fileState := ec.FileStates[key] - if fileState != nil { - ec.FileStates[key] = state - return - } ec.FileStates[key] = &TestFileState{ Run: state.Run, State: state.State, From 3aa740ae967de80e59a110b9616d6ebb779fdfd6 Mon Sep 17 00:00:00 2001 From: Samsondeen Dare Date: Thu, 6 Feb 2025 09:39:48 +0100 Subject: [PATCH 2/6] Continue test execution after an expected failure --- internal/backend/local/test.go | 57 ++++++++++++++++--- internal/command/test_test.go | 12 ++-- .../test/expect_failures_continue/main.tf | 13 +++++ .../expect_failures_continue/main.tftest.hcl | 20 +++++++ 4 files changed, 89 insertions(+), 13 deletions(-) create mode 100644 internal/command/testdata/test/expect_failures_continue/main.tf create mode 100644 internal/command/testdata/test/expect_failures_continue/main.tftest.hcl diff --git a/internal/backend/local/test.go b/internal/backend/local/test.go index da8ebdfeda58..9d25fdd10608 100644 --- a/internal/backend/local/test.go +++ b/internal/backend/local/test.go @@ -282,7 +282,7 @@ func (runner *TestFileRunner) Test(file *moduletest.File) { } // walk and execute the graph - diags = runner.walkGraph(graph) + diags = runner.walkGraph(graph, file) // If the graph walk was terminated, we don't want to add the diagnostics. // The error the user receives will just be: @@ -298,8 +298,9 @@ func (runner *TestFileRunner) Test(file *moduletest.File) { } // walkGraph goes through the graph and execute each run it finds. -func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics { +func (runner *TestFileRunner) walkGraph(g *terraform.Graph, file *moduletest.File) tfdiags.Diagnostics { sem := runner.Suite.semaphore + collectRunStatus, updateFileStatus := runner.trackRunStatuses(file) // Walk the graph. walkFn := func(v dag.Vertex) (diags tfdiags.Diagnostics) { @@ -376,9 +377,13 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics } startTime := time.Now().UTC() - runner.run(run, file, startTime) + deferFileStatus := runner.run(run, file, startTime) runner.Suite.View.Run(run, file, moduletest.Complete, 0) - file.UpdateStatus(run.Status) + // If the run block is done, but it was due to an expected failure, we + // don't want to update the file status immediately. We'll collect the + // status of this run block and update the file status at the end of the + // file execution. + collectRunStatus(run, deferFileStatus) case graph.GraphNodeExecutable: diags = v.Execute(runner.EvalContext) return diags @@ -389,10 +394,12 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics return } - return g.AcyclicGraph.Walk(walkFn) + diags := g.AcyclicGraph.Walk(walkFn) + updateFileStatus() + return diags } -func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, startTime time.Time) { +func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, startTime time.Time) (deferFileStatus bool) { log.Printf("[TRACE] TestFileRunner: executing run block %s/%s", file.Name, run.Name) defer func() { // If we got far enough to actually execute the run then we'll give @@ -401,6 +408,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st Start: startTime, Duration: time.Since(startTime), } + }() key := run.GetStateKey() @@ -487,9 +495,9 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st planScope, plan, planDiags := runner.plan(tfCtx, config, state, run, file, setVariables, references, start) if run.Config.Command == configs.PlanTestCommand { - // Then we want to assess our conditions and diagnostics differently. planDiags = run.ValidateExpectedFailures(planDiags) run.Diagnostics = run.Diagnostics.Append(planDiags) + // Then we want to assess our conditions and diagnostics differently. if planDiags.HasErrors() { run.Status = moduletest.Error return @@ -536,12 +544,21 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st return } - // Otherwise any error during the planning prevents our apply from + // Otherwise any error (expected or unexpected) during the planning prevents our apply from // continuing which is an error. planDiags = run.ExplainExpectedFailures(planDiags) run.Diagnostics = run.Diagnostics.Append(planDiags) if planDiags.HasErrors() { run.Status = moduletest.Error + // If the plan failed, but all the failures were expected, then we don't + // want to mark the overall file as a failure, so that subsequent runs can + // still be executed. + // We will collect the status of this run instead of updating the file status. + // At the end of the file execution, we will update the file status based on the + // statuses of all the runs. + if !run.ValidateExpectedFailures(planDiags).HasErrors() { + deferFileStatus = true + } return } @@ -1255,3 +1272,27 @@ func (runner *TestFileRunner) AddVariablesToConfig(run *moduletest.Run, variable } } + +// trackRunStatuses is a helper function that returns two functions. The first +// function is used to collect the statuses of the runs, and the second function +// is used to update the overall file status based on the statuses of the runs. +func (runner *TestFileRunner) trackRunStatuses(file *moduletest.File) (func(*moduletest.Run, bool), func()) { + statuses := make([]moduletest.Status, len(file.Runs)) + collector := func(run *moduletest.Run, deferred bool) { + if deferred { + statuses[run.Index] = run.Status + } else { + file.UpdateStatus(run.Status) + } + } + + updater := func() { + for _, status := range statuses { + file.UpdateStatus(status) + } + } + + // We'll return two functions, one to collect the statuses of the runs, and + // one to update the overall file status. + return collector, updater +} diff --git a/internal/command/test_test.go b/internal/command/test_test.go index 8b7aee1b48a5..d6633c294519 100644 --- a/internal/command/test_test.go +++ b/internal/command/test_test.go @@ -35,7 +35,7 @@ func TestTest_Runs(t *testing.T) { code int initCode int skip bool - desc string + description string }{ "simple_pass": { expectedOut: []string{"1 passed, 0 failed."}, @@ -60,7 +60,7 @@ func TestTest_Runs(t *testing.T) { args: []string{"-parallelism", "1"}, expectedOut: []string{"1 passed, 0 failed."}, code: 0, - desc: "simple_pass with parallelism set to 1", + description: "simple_pass with parallelism set to 1", }, "simple_pass_very_nested_alternate": { override: "simple_pass_very_nested", @@ -104,9 +104,11 @@ func TestTest_Runs(t *testing.T) { expectedOut: []string{"1 passed, 0 failed."}, code: 0, }, - "expect_failures_outputs": { - expectedOut: []string{"1 passed, 0 failed."}, - code: 0, + "expect_failures_continue": { + expectedOut: []string{"1 passed, 1 failed.", "Expected failure while planning"}, + code: 1, + expectedErr: []string{"Module output value precondition failed"}, + description: "continue test execution after an expected failure", }, "expect_failures_resources": { expectedOut: []string{"1 passed, 0 failed."}, diff --git a/internal/command/testdata/test/expect_failures_continue/main.tf b/internal/command/testdata/test/expect_failures_continue/main.tf new file mode 100644 index 000000000000..af05c486a045 --- /dev/null +++ b/internal/command/testdata/test/expect_failures_continue/main.tf @@ -0,0 +1,13 @@ + +variable "input" { + type = string +} + +output "output" { + value = var.input + + precondition { + condition = var.input == "something incredibly specific" + error_message = "this should fail" + } +} diff --git a/internal/command/testdata/test/expect_failures_continue/main.tftest.hcl b/internal/command/testdata/test/expect_failures_continue/main.tftest.hcl new file mode 100644 index 000000000000..73d0f196b544 --- /dev/null +++ b/internal/command/testdata/test/expect_failures_continue/main.tftest.hcl @@ -0,0 +1,20 @@ +variables { + input = "some value" +} + +run "test" { + + command = apply + + expect_failures = [ + output.output + ] +} + +run "follow-up" { + command = apply + + variables { + input = "something incredibly specific" + } +} \ No newline at end of file From f6c72a9def4c6b2bc3959d1d896ba15a6f44524d Mon Sep 17 00:00:00 2001 From: Samsondeen Dare Date: Thu, 6 Feb 2025 10:13:12 +0100 Subject: [PATCH 3/6] return diags as err --- internal/backend/local/test.go | 8 ++------ internal/moduletest/graph/transform_test_run.go | 2 +- internal/tfdiags/diagnostics.go | 13 +++++++------ 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/internal/backend/local/test.go b/internal/backend/local/test.go index 9d25fdd10608..ff4e1ab95595 100644 --- a/internal/backend/local/test.go +++ b/internal/backend/local/test.go @@ -468,12 +468,8 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st return } - references, referenceDiags := run.GetReferences() - run.Diagnostics = run.Diagnostics.Append(referenceDiags) - if referenceDiags.HasErrors() { - run.Status = moduletest.Error - return - } + // already validated during static analysis + references, _ := run.GetReferences() variables, variableDiags := runner.GetVariables(run, references, true) run.Diagnostics = run.Diagnostics.Append(variableDiags) diff --git a/internal/moduletest/graph/transform_test_run.go b/internal/moduletest/graph/transform_test_run.go index 08b07c865473..3dfb904e6483 100644 --- a/internal/moduletest/graph/transform_test_run.go +++ b/internal/moduletest/graph/transform_test_run.go @@ -33,7 +33,7 @@ func (t *TestRunTransformer) Transform(g *terraform.Graph) error { // Connect nodes based on dependencies if diags := t.connectDependencies(g, nodes); diags.HasErrors() { - return tfdiags.NonFatalError{Diagnostics: diags} + return tfdiags.DiagnosticsAsError{Diagnostics: diags} } // Runs with the same state key inherently depend on each other, so we diff --git a/internal/tfdiags/diagnostics.go b/internal/tfdiags/diagnostics.go index bcd6649664d3..1fd33904a2b8 100644 --- a/internal/tfdiags/diagnostics.go +++ b/internal/tfdiags/diagnostics.go @@ -57,7 +57,7 @@ func (diags Diagnostics) Append(new ...interface{}) Diagnostics { diags = append(diags, ti) case Diagnostics: diags = append(diags, ti...) // flatten - case diagnosticsAsError: + case DiagnosticsAsError: diags = diags.Append(ti.Diagnostics) // unwrap case NonFatalError: diags = diags.Append(ti.Diagnostics) // unwrap @@ -111,7 +111,7 @@ func diagnosticsForError(err error) []Diagnostic { // If we've wrapped a Diagnostics in an error then we'll unwrap // it and add it directly. - var asErr diagnosticsAsError + var asErr DiagnosticsAsError if errors.As(err, &asErr) { return asErr.Diagnostics } @@ -206,7 +206,7 @@ func (diags Diagnostics) Err() error { if !diags.HasErrors() { return nil } - return diagnosticsAsError{diags} + return DiagnosticsAsError{diags} } // ErrWithWarnings is similar to Err except that it will also return a non-nil @@ -257,11 +257,12 @@ func (diags Diagnostics) Sort() { sort.Stable(sortDiagnostics(diags)) } -type diagnosticsAsError struct { +// DiagnosticsAsError embeds diagnostics, and satisfies the error interface. +type DiagnosticsAsError struct { Diagnostics } -func (dae diagnosticsAsError) Error() string { +func (dae DiagnosticsAsError) Error() string { diags := dae.Diagnostics switch { case len(diags) == 0: @@ -291,7 +292,7 @@ func (dae diagnosticsAsError) Error() string { // WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped // diagnostics object can be picked apart by errwrap-aware code. -func (dae diagnosticsAsError) WrappedErrors() []error { +func (dae DiagnosticsAsError) WrappedErrors() []error { var errs []error for _, diag := range dae.Diagnostics { if wrapper, isErr := diag.(nativeError); isErr { From ece61bc8779fdf30cbe84cee3fc9cae768e0fc58 Mon Sep 17 00:00:00 2001 From: Samsondeen Dare Date: Thu, 6 Feb 2025 16:27:44 +0100 Subject: [PATCH 4/6] add changelog --- .changes/unreleased/ENHANCEMENTS-20250206-162053.yaml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changes/unreleased/ENHANCEMENTS-20250206-162053.yaml diff --git a/.changes/unreleased/ENHANCEMENTS-20250206-162053.yaml b/.changes/unreleased/ENHANCEMENTS-20250206-162053.yaml new file mode 100644 index 000000000000..f83db8acc194 --- /dev/null +++ b/.changes/unreleased/ENHANCEMENTS-20250206-162053.yaml @@ -0,0 +1,5 @@ +kind: ENHANCEMENTS +body: Continue Test execution when a run fails due to an expected failure. +time: 2025-02-06T16:20:53.83763+01:00 +custom: + Issue: "34969" From 881657e7d402573bd775c118c93ae762049dae71 Mon Sep 17 00:00:00 2001 From: Samsondeen Dare Date: Sat, 8 Feb 2025 09:22:27 +0100 Subject: [PATCH 5/6] continue to execute runs when exp failures did not fail during apply --- .../ENHANCEMENTS-20250206-162053.yaml | 2 +- internal/backend/local/test.go | 93 ++++++++----------- internal/command/test_test.go | 76 +++++++++++++-- .../test/expect_failures_continue/main.tf | 13 --- .../expect_failures_continue/main.tftest.hcl | 20 ---- .../test/expect_failures_during_apply/main.tf | 13 +++ .../main.tftest.hcl | 18 ++++ .../input.tftest.hcl | 14 +++ internal/moduletest/run.go | 5 +- 9 files changed, 158 insertions(+), 96 deletions(-) delete mode 100644 internal/command/testdata/test/expect_failures_continue/main.tf delete mode 100644 internal/command/testdata/test/expect_failures_continue/main.tftest.hcl create mode 100644 internal/command/testdata/test/expect_failures_during_apply/main.tf create mode 100644 internal/command/testdata/test/expect_failures_during_apply/main.tftest.hcl diff --git a/.changes/unreleased/ENHANCEMENTS-20250206-162053.yaml b/.changes/unreleased/ENHANCEMENTS-20250206-162053.yaml index f83db8acc194..6d8fa4ec9668 100644 --- a/.changes/unreleased/ENHANCEMENTS-20250206-162053.yaml +++ b/.changes/unreleased/ENHANCEMENTS-20250206-162053.yaml @@ -1,5 +1,5 @@ kind: ENHANCEMENTS -body: Continue Test execution when a run fails due to an expected failure. +body: Terraform Test: Continue subsequent test execution when an expected failure is not encountered. time: 2025-02-06T16:20:53.83763+01:00 custom: Issue: "34969" diff --git a/internal/backend/local/test.go b/internal/backend/local/test.go index ff4e1ab95595..40398ee8298d 100644 --- a/internal/backend/local/test.go +++ b/internal/backend/local/test.go @@ -300,7 +300,6 @@ func (runner *TestFileRunner) Test(file *moduletest.File) { // walkGraph goes through the graph and execute each run it finds. func (runner *TestFileRunner) walkGraph(g *terraform.Graph, file *moduletest.File) tfdiags.Diagnostics { sem := runner.Suite.semaphore - collectRunStatus, updateFileStatus := runner.trackRunStatuses(file) // Walk the graph. walkFn := func(v dag.Vertex) (diags tfdiags.Diagnostics) { @@ -377,13 +376,9 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph, file *moduletest.Fil } startTime := time.Now().UTC() - deferFileStatus := runner.run(run, file, startTime) + runner.run(run, file, startTime) runner.Suite.View.Run(run, file, moduletest.Complete, 0) - // If the run block is done, but it was due to an expected failure, we - // don't want to update the file status immediately. We'll collect the - // status of this run block and update the file status at the end of the - // file execution. - collectRunStatus(run, deferFileStatus) + file.UpdateStatus(run.Status) case graph.GraphNodeExecutable: diags = v.Execute(runner.EvalContext) return diags @@ -394,12 +389,10 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph, file *moduletest.Fil return } - diags := g.AcyclicGraph.Walk(walkFn) - updateFileStatus() - return diags + return g.AcyclicGraph.Walk(walkFn) } -func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, startTime time.Time) (deferFileStatus bool) { +func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, startTime time.Time) { log.Printf("[TRACE] TestFileRunner: executing run block %s/%s", file.Name, run.Name) defer func() { // If we got far enough to actually execute the run then we'll give @@ -491,9 +484,9 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st planScope, plan, planDiags := runner.plan(tfCtx, config, state, run, file, setVariables, references, start) if run.Config.Command == configs.PlanTestCommand { + // Then we want to assess our conditions and diagnostics differently. planDiags = run.ValidateExpectedFailures(planDiags) run.Diagnostics = run.Diagnostics.Append(planDiags) - // Then we want to assess our conditions and diagnostics differently. if planDiags.HasErrors() { run.Status = moduletest.Error return @@ -540,21 +533,12 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st return } - // Otherwise any error (expected or unexpected) during the planning prevents our apply from + // Otherwise any error during the planning prevents our apply from // continuing which is an error. planDiags = run.ExplainExpectedFailures(planDiags) run.Diagnostics = run.Diagnostics.Append(planDiags) if planDiags.HasErrors() { run.Status = moduletest.Error - // If the plan failed, but all the failures were expected, then we don't - // want to mark the overall file as a failure, so that subsequent runs can - // still be executed. - // We will collect the status of this run instead of updating the file status. - // At the end of the file execution, we will update the file status based on the - // statuses of all the runs. - if !run.ValidateExpectedFailures(planDiags).HasErrors() { - deferFileStatus = true - } return } @@ -575,11 +559,9 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st applyScope, updated, applyDiags := runner.apply(tfCtx, plan, state, run, file, moduletest.Running, start, variables) // Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't. - applyDiags = run.ValidateExpectedFailures(applyDiags) - - run.Diagnostics = run.Diagnostics.Append(applyDiags) - if applyDiags.HasErrors() { - run.Status = moduletest.Error + // We'll also update the run status based on the presence of errors or missing expected failures. + failOrErr := runner.checkForMissingExpectedFailures(run, applyDiags) + if failOrErr { // Even though the apply operation failed, the graph may have done // partial updates and the returned state should reflect this. runner.EvalContext.SetFileState(key, &graph.TestFileState{ @@ -636,7 +618,38 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st Run: run, State: updated, }) - return +} + +// checkForMissingExpectedFailures checks for missing expected failures in the diagnostics. +// It updates the run status based on the presence of errors or missing expected failures. +func (runner *TestFileRunner) checkForMissingExpectedFailures(run *moduletest.Run, diags tfdiags.Diagnostics) (failOrErr bool) { + // If there are errors in the diagnostics, update the run status to error. + if diags.HasErrors() { + run.Status = moduletest.Error + } + + // Retrieve diagnostics that are either unrelated to expected failures or report missing expected failures. + unexpectedDiags := run.ValidateExpectedFailures(diags) + var nonFailureDiags tfdiags.Diagnostics + for _, diag := range unexpectedDiags { + switch { + // If any diagnostic indicates a missing expected failure, update the run status to fail. + case diag.Description().Summary == moduletest.MissingFailureSummary: + diag.ExtraInfo() + run.Status = moduletest.Fail + default: + // Append other diagnostics. + nonFailureDiags = nonFailureDiags.Append(diag) + } + } + // If there are other errors, update the run status to error. + if nonFailureDiags.HasErrors() { + run.Status = moduletest.Error + } + + // Append all diagnostics that are not expected failures to the run diagnostics. + run.Diagnostics = run.Diagnostics.Append(unexpectedDiags) + return run.Status > moduletest.Pass } func (runner *TestFileRunner) validate(run *moduletest.Run, file *moduletest.File, start int64) tfdiags.Diagnostics { @@ -1268,27 +1281,3 @@ func (runner *TestFileRunner) AddVariablesToConfig(run *moduletest.Run, variable } } - -// trackRunStatuses is a helper function that returns two functions. The first -// function is used to collect the statuses of the runs, and the second function -// is used to update the overall file status based on the statuses of the runs. -func (runner *TestFileRunner) trackRunStatuses(file *moduletest.File) (func(*moduletest.Run, bool), func()) { - statuses := make([]moduletest.Status, len(file.Runs)) - collector := func(run *moduletest.Run, deferred bool) { - if deferred { - statuses[run.Index] = run.Status - } else { - file.UpdateStatus(run.Status) - } - } - - updater := func() { - for _, status := range statuses { - file.UpdateStatus(status) - } - } - - // We'll return two functions, one to collect the statuses of the runs, and - // one to update the overall file status. - return collector, updater -} diff --git a/internal/command/test_test.go b/internal/command/test_test.go index d6633c294519..73a5ad199cc4 100644 --- a/internal/command/test_test.go +++ b/internal/command/test_test.go @@ -104,12 +104,6 @@ func TestTest_Runs(t *testing.T) { expectedOut: []string{"1 passed, 0 failed."}, code: 0, }, - "expect_failures_continue": { - expectedOut: []string{"1 passed, 1 failed.", "Expected failure while planning"}, - code: 1, - expectedErr: []string{"Module output value precondition failed"}, - description: "continue test execution after an expected failure", - }, "expect_failures_resources": { expectedOut: []string{"1 passed, 0 failed."}, code: 0, @@ -399,7 +393,7 @@ func TestTest_Runs(t *testing.T) { Meta: meta, } - code := c.Run(tc.args) + code := c.Run(append(tc.args, "-no-color")) output := done(t) if code != tc.code { @@ -1837,6 +1831,7 @@ the apply operation could not be executed and so the overall test case will be marked as a failure and the original diagnostic included in the test report. + run "no_run"... skip input.tftest.hcl... tearing down input.tftest.hcl... fail output.tftest.hcl... in progress @@ -1869,7 +1864,7 @@ test report. resource.tftest.hcl... tearing down resource.tftest.hcl... fail -Failure! 1 passed, 3 failed. +Failure! 1 passed, 3 failed, 1 skipped. ` actualOut := output.Stdout() if diff := cmp.Diff(expectedOut, actualOut); len(diff) > 0 { @@ -1916,6 +1911,71 @@ input must contain the character 'b' } } +func TestTest_MissingExpectedFailuresDuringApply(t *testing.T) { + // Test asserting that the test run fails, but not errors out, when expected failures are not present during apply. + // This lets subsequent runs continue to execute and the file to be marked as failed. + td := t.TempDir() + testCopyDir(t, testFixturePath(path.Join("test", "expect_failures_during_apply")), td) + defer testChdir(t, td)() + + provider := testing_command.NewProvider(nil) + view, done := testView(t) + + c := &TestCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(provider.Provider), + View: view, + }, + } + + code := c.Run([]string{"-no-color"}) + output := done(t) + + if code == 0 { + t.Errorf("expected status code 0 but got %d", code) + } + + expectedOut := `main.tftest.hcl... in progress + run "test"... fail + run "follow-up"... pass + +Warning: Value for undeclared variable + + on main.tftest.hcl line 16, in run "follow-up": + 16: input = "does not matter" + +The module under test does not declare a variable named "input", but it is +declared in run block "follow-up". + +main.tftest.hcl... tearing down +main.tftest.hcl... fail + +Failure! 1 passed, 1 failed. +` + actualOut := output.Stdout() + if diff := cmp.Diff(expectedOut, actualOut); len(diff) > 0 { + t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedOut, actualOut, diff) + } + + expectedErr := ` +Error: Missing expected failure + + on main.tftest.hcl line 7, in run "test": + 7: output.output + +The checkable object, output.output, was expected to report an error but did +not. +` + actualErr := output.Stderr() + if diff := cmp.Diff(actualErr, expectedErr); len(diff) > 0 { + t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedErr, actualErr, diff) + } + + if provider.ResourceCount() > 0 { + t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString()) + } +} + func TestTest_UnknownAndNulls(t *testing.T) { tcs := map[string]struct { diff --git a/internal/command/testdata/test/expect_failures_continue/main.tf b/internal/command/testdata/test/expect_failures_continue/main.tf deleted file mode 100644 index af05c486a045..000000000000 --- a/internal/command/testdata/test/expect_failures_continue/main.tf +++ /dev/null @@ -1,13 +0,0 @@ - -variable "input" { - type = string -} - -output "output" { - value = var.input - - precondition { - condition = var.input == "something incredibly specific" - error_message = "this should fail" - } -} diff --git a/internal/command/testdata/test/expect_failures_continue/main.tftest.hcl b/internal/command/testdata/test/expect_failures_continue/main.tftest.hcl deleted file mode 100644 index 73d0f196b544..000000000000 --- a/internal/command/testdata/test/expect_failures_continue/main.tftest.hcl +++ /dev/null @@ -1,20 +0,0 @@ -variables { - input = "some value" -} - -run "test" { - - command = apply - - expect_failures = [ - output.output - ] -} - -run "follow-up" { - command = apply - - variables { - input = "something incredibly specific" - } -} \ No newline at end of file diff --git a/internal/command/testdata/test/expect_failures_during_apply/main.tf b/internal/command/testdata/test/expect_failures_during_apply/main.tf new file mode 100644 index 000000000000..acc1ca24d495 --- /dev/null +++ b/internal/command/testdata/test/expect_failures_during_apply/main.tf @@ -0,0 +1,13 @@ + +locals { + input = uuid() # using UUID to ensure that plan phase will return an unknown value +} + +output "output" { + value = local.input + + precondition { + condition = local.input != "" + error_message = "this should not fail during the apply phase" + } +} diff --git a/internal/command/testdata/test/expect_failures_during_apply/main.tftest.hcl b/internal/command/testdata/test/expect_failures_during_apply/main.tftest.hcl new file mode 100644 index 000000000000..a8039de9b438 --- /dev/null +++ b/internal/command/testdata/test/expect_failures_during_apply/main.tftest.hcl @@ -0,0 +1,18 @@ +run "test" { + + command = apply + +// We are expecting the output to fail during apply, but it will not, so the test will fail. + expect_failures = [ + output.output + ] +} + +// this should still run +run "follow-up" { + command = apply + + variables { + input = "does not matter" + } +} \ No newline at end of file diff --git a/internal/command/testdata/test/expected_failures_during_planning/input.tftest.hcl b/internal/command/testdata/test/expected_failures_during_planning/input.tftest.hcl index d7f8f46b47a0..7d92070f8873 100644 --- a/internal/command/testdata/test/expected_failures_during_planning/input.tftest.hcl +++ b/internal/command/testdata/test/expected_failures_during_planning/input.tftest.hcl @@ -14,3 +14,17 @@ run "input_failure" { ] } + + +// This should not run because the previous run block is expected to error, thus +// terminating the test file. +run "no_run" { + + variables { + input = "abc" + } + assert { + condition = var.input == "abc" + error_message = "should not run" + } +} \ No newline at end of file diff --git a/internal/moduletest/run.go b/internal/moduletest/run.go index 959697b1ad35..7b2bb5327053 100644 --- a/internal/moduletest/run.go +++ b/internal/moduletest/run.go @@ -20,7 +20,8 @@ import ( ) const ( - MainStateIdentifier = "" + MainStateIdentifier = "" + MissingFailureSummary = "Missing expected failure" ) type Run struct { @@ -545,7 +546,7 @@ func (run *Run) ValidateExpectedFailures(originals tfdiags.Diagnostics) tfdiags. // diagnostics. diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: "Missing expected failure", + Summary: MissingFailureSummary, Detail: fmt.Sprintf("The checkable object, %s, was expected to report an error but did not.", addr.String()), Subject: sourceRanges.Get(addr).ToHCL().Ptr(), }) From d0108eda1d7de1c6f8b6f22a92733f94755e1013 Mon Sep 17 00:00:00 2001 From: Samsondeen Dare Date: Mon, 10 Feb 2025 12:04:21 +0100 Subject: [PATCH 6/6] incorporate code reviews suggestions --- internal/backend/local/test.go | 35 +++++++++++++--------------------- internal/moduletest/run.go | 28 ++++++++++++++++++++++++--- 2 files changed, 38 insertions(+), 25 deletions(-) diff --git a/internal/backend/local/test.go b/internal/backend/local/test.go index 40398ee8298d..72541d426010 100644 --- a/internal/backend/local/test.go +++ b/internal/backend/local/test.go @@ -623,32 +623,23 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st // checkForMissingExpectedFailures checks for missing expected failures in the diagnostics. // It updates the run status based on the presence of errors or missing expected failures. func (runner *TestFileRunner) checkForMissingExpectedFailures(run *moduletest.Run, diags tfdiags.Diagnostics) (failOrErr bool) { - // If there are errors in the diagnostics, update the run status to error. - if diags.HasErrors() { - run.Status = moduletest.Error - } - - // Retrieve diagnostics that are either unrelated to expected failures or report missing expected failures. + // Retrieve and append diagnostics that are either unrelated to expected failures + // or report missing expected failures. unexpectedDiags := run.ValidateExpectedFailures(diags) - var nonFailureDiags tfdiags.Diagnostics + run.Diagnostics = run.Diagnostics.Append(unexpectedDiags) for _, diag := range unexpectedDiags { - switch { - // If any diagnostic indicates a missing expected failure, update the run status to fail. - case diag.Description().Summary == moduletest.MissingFailureSummary: - diag.ExtraInfo() - run.Status = moduletest.Fail - default: - // Append other diagnostics. - nonFailureDiags = nonFailureDiags.Append(diag) + // // If any diagnostic indicates a missing expected failure, set the run status to fail. + if ok := moduletest.DiagnosticFromMissingExpectedFailure(diag); ok { + run.Status = run.Status.Merge(moduletest.Fail) + continue } - } - // If there are other errors, update the run status to error. - if nonFailureDiags.HasErrors() { - run.Status = moduletest.Error - } - // Append all diagnostics that are not expected failures to the run diagnostics. - run.Diagnostics = run.Diagnostics.Append(unexpectedDiags) + // upgrade the run status to error if there still are other errors in the diagnostics + if diag.Severity() == tfdiags.Error { + run.Status = run.Status.Merge(moduletest.Error) + break + } + } return run.Status > moduletest.Pass } diff --git a/internal/moduletest/run.go b/internal/moduletest/run.go index 7b2bb5327053..8c841ea9f6d6 100644 --- a/internal/moduletest/run.go +++ b/internal/moduletest/run.go @@ -20,8 +20,7 @@ import ( ) const ( - MainStateIdentifier = "" - MissingFailureSummary = "Missing expected failure" + MainStateIdentifier = "" ) type Run struct { @@ -546,12 +545,35 @@ func (run *Run) ValidateExpectedFailures(originals tfdiags.Diagnostics) tfdiags. // diagnostics. diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, - Summary: MissingFailureSummary, + Summary: "Missing expected failure", Detail: fmt.Sprintf("The checkable object, %s, was expected to report an error but did not.", addr.String()), Subject: sourceRanges.Get(addr).ToHCL().Ptr(), + Extra: missingExpectedFailure(true), }) } } return diags } + +// DiagnosticExtraFromMissingExpectedFailure provides an interface for diagnostic ExtraInfo to +// denote that a diagnostic was generated as a result of a missing expected failure. +type DiagnosticExtraFromMissingExpectedFailure interface { + DiagnosticFromMissingExpectedFailure() bool +} + +// DiagnosticFromMissingExpectedFailure checks if the provided diagnostic +// is a result of a missing expected failure. +func DiagnosticFromMissingExpectedFailure(diag tfdiags.Diagnostic) bool { + maybe := tfdiags.ExtraInfo[DiagnosticExtraFromMissingExpectedFailure](diag) + if maybe == nil { + return false + } + return maybe.DiagnosticFromMissingExpectedFailure() +} + +type missingExpectedFailure bool + +func (missingExpectedFailure) DiagnosticFromMissingExpectedFailure() bool { + return true +}