Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Continue test execution after an expected failure #36447

Merged
merged 6 commits into from
Feb 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changes/unreleased/ENHANCEMENTS-20250206-162053.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
kind: ENHANCEMENTS
body: Terraform Test: Continue subsequent test execution when an expected failure is not encountered.
time: 2025-02-06T16:20:53.83763+01:00
custom:
Issue: "34969"
162 changes: 87 additions & 75 deletions internal/backend/local/test.go
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ func (runner *TestFileRunner) Test(file *moduletest.File) {
}

// walk and execute the graph
diags = runner.walkGraph(graph)
diags = runner.walkGraph(graph, file)

// If the graph walk was terminated, we don't want to add the diagnostics.
// The error the user receives will just be:
Expand All @@ -298,7 +298,7 @@ func (runner *TestFileRunner) Test(file *moduletest.File) {
}

// walkGraph goes through the graph and execute each run it finds.
func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics {
func (runner *TestFileRunner) walkGraph(g *terraform.Graph, file *moduletest.File) tfdiags.Diagnostics {
sem := runner.Suite.semaphore

// Walk the graph.
Expand Down Expand Up @@ -346,7 +346,7 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics
defer sem.Release()

switch v := v.(type) {
case *graph.NodeTestRun:
case *graph.NodeTestRun: // NodeTestRun is also executable, so it has to be first.
file := v.File()
run := v.Run()
if file.GetStatus() == moduletest.Error {
Expand Down Expand Up @@ -374,83 +374,68 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics
if diags.HasErrors() {
return diags
}
// continue the execution of the test run.

startTime := time.Now().UTC()
runner.run(run, file, startTime)
runner.Suite.View.Run(run, file, moduletest.Complete, 0)
file.UpdateStatus(run.Status)
case graph.GraphNodeExecutable:
diags = v.Execute(runner.EvalContext)
return diags
default:
// If the vertex isn't a test run or executable, we'll just skip it.
return
}
return
}

// We already know that the vertex is a test run
runNode := v.(*graph.NodeTestRun)

file := runNode.File()
run := runNode.Run()

key := run.GetStateKey()
if run.Config.ConfigUnderTest != nil {
if key == moduletest.MainStateIdentifier {
// This is bad. It means somehow the module we're loading has
// the same key as main state and we're about to corrupt things.

run.Diagnostics = run.Diagnostics.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid module source",
Detail: fmt.Sprintf("The source for the selected module evaluated to %s which should not be possible. This is a bug in Terraform - please report it!", key),
Subject: run.Config.Module.DeclRange.Ptr(),
})

run.Status = moduletest.Error
file.UpdateStatus(moduletest.Error)
return
}
}

startTime := time.Now().UTC()
state, updatedState := runner.run(run, file, runner.EvalContext.GetFileState(key).State)
runDuration := time.Since(startTime)
if updatedState {
// Only update the most recent run and state if the state was
// actually updated by this change. We want to use the run that
// most recently updated the tracked state as the cleanup
// configuration.
runner.EvalContext.SetFileState(key, &graph.TestFileState{
Run: run,
State: state,
})
}
return g.AcyclicGraph.Walk(walkFn)
}

func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, startTime time.Time) {
log.Printf("[TRACE] TestFileRunner: executing run block %s/%s", file.Name, run.Name)
defer func() {
// If we got far enough to actually execute the run then we'll give
// the view some additional metadata about the execution.
run.ExecutionMeta = &moduletest.RunExecutionMeta{
Start: startTime,
Duration: runDuration,
Duration: time.Since(startTime),
}
runner.Suite.View.Run(run, file, moduletest.Complete, 0)
file.UpdateStatus(run.Status)
return
}

return g.AcyclicGraph.Walk(walkFn)
}
}()

func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, state *states.State) (*states.State, bool) {
log.Printf("[TRACE] TestFileRunner: executing run block %s/%s", file.Name, run.Name)
key := run.GetStateKey()
if run.Config.ConfigUnderTest != nil {
if key == moduletest.MainStateIdentifier {
// This is bad. It means somehow the module we're loading has
// the same key as main state and we're about to corrupt things.

run.Diagnostics = run.Diagnostics.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid module source",
Detail: fmt.Sprintf("The source for the selected module evaluated to %s which should not be possible. This is a bug in Terraform - please report it!", key),
Subject: run.Config.Module.DeclRange.Ptr(),
})

run.Status = moduletest.Error
file.UpdateStatus(moduletest.Error)
return
}
}
state := runner.EvalContext.GetFileState(key).State

config := run.ModuleConfig
if runner.Suite.Cancelled {
// Don't do anything, just give up and return immediately.
// The surrounding functions should stop this even being called, but in
// case of race conditions or something we can still verify this.
return state, false
return
}

if runner.Suite.Stopped {
// Basically the same as above, except we'll be a bit nicer.
run.Status = moduletest.Skip
return state, false
return
}

start := time.Now().UTC().UnixMilli()
Expand All @@ -459,35 +444,31 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
run.Diagnostics = run.Diagnostics.Append(run.Config.Validate(config))
if run.Diagnostics.HasErrors() {
run.Status = moduletest.Error
return state, false
return
}

configDiags := graph.TransformConfigForTest(runner.EvalContext, run, file)
run.Diagnostics = run.Diagnostics.Append(configDiags)
if configDiags.HasErrors() {
run.Status = moduletest.Error
return state, false
return
}

validateDiags := runner.validate(run, file, start)
run.Diagnostics = run.Diagnostics.Append(validateDiags)
if validateDiags.HasErrors() {
run.Status = moduletest.Error
return state, false
return
}

references, referenceDiags := run.GetReferences()
run.Diagnostics = run.Diagnostics.Append(referenceDiags)
if referenceDiags.HasErrors() {
run.Status = moduletest.Error
return state, false
}
// already validated during static analysis
references, _ := run.GetReferences()

variables, variableDiags := runner.GetVariables(run, references, true)
run.Diagnostics = run.Diagnostics.Append(variableDiags)
if variableDiags.HasErrors() {
run.Status = moduletest.Error
return state, false
return
}

// FilterVariablesToModule only returns warnings, so we don't check the
Expand All @@ -498,7 +479,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
tfCtx, ctxDiags := terraform.NewContext(runner.Suite.Opts)
run.Diagnostics = run.Diagnostics.Append(ctxDiags)
if ctxDiags.HasErrors() {
return state, false
return
}

planScope, plan, planDiags := runner.plan(tfCtx, config, state, run, file, setVariables, references, start)
Expand All @@ -508,7 +489,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
run.Diagnostics = run.Diagnostics.Append(planDiags)
if planDiags.HasErrors() {
run.Status = moduletest.Error
return state, false
return
}

runner.AddVariablesToConfig(run, variables)
Expand Down Expand Up @@ -549,8 +530,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
// Now we've successfully validated this run block, lets add it into
// our prior run outputs so future run blocks can access it.
runner.EvalContext.SetOutput(run, outputVals)

return state, false
return
}

// Otherwise any error during the planning prevents our apply from
Expand All @@ -559,7 +539,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
run.Diagnostics = run.Diagnostics.Append(planDiags)
if planDiags.HasErrors() {
run.Status = moduletest.Error
return state, false
return
}

// Since we're carrying on an executing the apply operation as well, we're
Expand All @@ -579,14 +559,16 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
applyScope, updated, applyDiags := runner.apply(tfCtx, plan, state, run, file, moduletest.Running, start, variables)

// Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't.
applyDiags = run.ValidateExpectedFailures(applyDiags)

run.Diagnostics = run.Diagnostics.Append(applyDiags)
if applyDiags.HasErrors() {
run.Status = moduletest.Error
// We'll also update the run status based on the presence of errors or missing expected failures.
failOrErr := runner.checkForMissingExpectedFailures(run, applyDiags)
if failOrErr {
// Even though the apply operation failed, the graph may have done
// partial updates and the returned state should reflect this.
return updated, true
runner.EvalContext.SetFileState(key, &graph.TestFileState{
Run: run,
State: updated,
})
return
}

runner.AddVariablesToConfig(run, variables)
Expand Down Expand Up @@ -628,7 +610,37 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
// our prior run outputs so future run blocks can access it.
runner.EvalContext.SetOutput(run, outputVals)

return updated, true
// Only update the most recent run and state if the state was
// actually updated by this change. We want to use the run that
// most recently updated the tracked state as the cleanup
// configuration.
runner.EvalContext.SetFileState(key, &graph.TestFileState{
Run: run,
State: updated,
})
}

// checkForMissingExpectedFailures checks for missing expected failures in the diagnostics.
// It updates the run status based on the presence of errors or missing expected failures.
func (runner *TestFileRunner) checkForMissingExpectedFailures(run *moduletest.Run, diags tfdiags.Diagnostics) (failOrErr bool) {
// Retrieve and append diagnostics that are either unrelated to expected failures
// or report missing expected failures.
unexpectedDiags := run.ValidateExpectedFailures(diags)
run.Diagnostics = run.Diagnostics.Append(unexpectedDiags)
for _, diag := range unexpectedDiags {
// // If any diagnostic indicates a missing expected failure, set the run status to fail.
if ok := moduletest.DiagnosticFromMissingExpectedFailure(diag); ok {
run.Status = run.Status.Merge(moduletest.Fail)
continue
}

// upgrade the run status to error if there still are other errors in the diagnostics
if diag.Severity() == tfdiags.Error {
run.Status = run.Status.Merge(moduletest.Error)
break
}
}
return run.Status > moduletest.Pass
}

func (runner *TestFileRunner) validate(run *moduletest.Run, file *moduletest.File, start int64) tfdiags.Diagnostics {
Expand Down
Loading
Loading