Skip to content

Commit f2adf23

Browse files
committed
continue to execute runs when exp failures did not fail during apply
1 parent ece61bc commit f2adf23

File tree

9 files changed

+159
-71
lines changed

9 files changed

+159
-71
lines changed
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
kind: ENHANCEMENTS
2-
body: Continue Test execution when a run fails due to an expected failure.
2+
body: Terraform Test: Continue subsequent test execution when an expected failure is not encountered.
33
time: 2025-02-06T16:20:53.83763+01:00
44
custom:
55
Issue: "34969"

internal/backend/local/test.go

+41-27
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,6 @@ func (runner *TestFileRunner) Test(file *moduletest.File) {
300300
// walkGraph goes through the graph and execute each run it finds.
301301
func (runner *TestFileRunner) walkGraph(g *terraform.Graph, file *moduletest.File) tfdiags.Diagnostics {
302302
sem := runner.Suite.semaphore
303-
collectRunStatus, updateFileStatus := runner.trackRunStatuses(file)
304303

305304
// Walk the graph.
306305
walkFn := func(v dag.Vertex) (diags tfdiags.Diagnostics) {
@@ -377,13 +376,9 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph, file *moduletest.Fil
377376
}
378377

379378
startTime := time.Now().UTC()
380-
deferFileStatus := runner.run(run, file, startTime)
379+
runner.run(run, file, startTime)
381380
runner.Suite.View.Run(run, file, moduletest.Complete, 0)
382-
// If the run block is done, but it was due to an expected failure, we
383-
// don't want to update the file status immediately. We'll collect the
384-
// status of this run block and update the file status at the end of the
385-
// file execution.
386-
collectRunStatus(run, deferFileStatus)
381+
file.UpdateStatus(run.Status)
387382
case graph.GraphNodeExecutable:
388383
diags = v.Execute(runner.EvalContext)
389384
return diags
@@ -394,12 +389,10 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph, file *moduletest.Fil
394389
return
395390
}
396391

397-
diags := g.AcyclicGraph.Walk(walkFn)
398-
updateFileStatus()
399-
return diags
392+
return g.AcyclicGraph.Walk(walkFn)
400393
}
401394

402-
func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, startTime time.Time) (deferFileStatus bool) {
395+
func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, startTime time.Time) {
403396
log.Printf("[TRACE] TestFileRunner: executing run block %s/%s", file.Name, run.Name)
404397
defer func() {
405398
// If we got far enough to actually execute the run then we'll give
@@ -491,9 +484,9 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
491484

492485
planScope, plan, planDiags := runner.plan(tfCtx, config, state, run, file, setVariables, references, start)
493486
if run.Config.Command == configs.PlanTestCommand {
487+
// Then we want to assess our conditions and diagnostics differently.
494488
planDiags = run.ValidateExpectedFailures(planDiags)
495489
run.Diagnostics = run.Diagnostics.Append(planDiags)
496-
// Then we want to assess our conditions and diagnostics differently.
497490
if planDiags.HasErrors() {
498491
run.Status = moduletest.Error
499492
return
@@ -540,21 +533,12 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
540533
return
541534
}
542535

543-
// Otherwise any error (expected or unexpected) during the planning prevents our apply from
536+
// Otherwise any error during the planning prevents our apply from
544537
// continuing which is an error.
545538
planDiags = run.ExplainExpectedFailures(planDiags)
546539
run.Diagnostics = run.Diagnostics.Append(planDiags)
547540
if planDiags.HasErrors() {
548541
run.Status = moduletest.Error
549-
// If the plan failed, but all the failures were expected, then we don't
550-
// want to mark the overall file as a failure, so that subsequent runs can
551-
// still be executed.
552-
// We will collect the status of this run instead of updating the file status.
553-
// At the end of the file execution, we will update the file status based on the
554-
// statuses of all the runs.
555-
if !run.ValidateExpectedFailures(planDiags).HasErrors() {
556-
deferFileStatus = true
557-
}
558542
return
559543
}
560544

@@ -575,11 +559,9 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
575559
applyScope, updated, applyDiags := runner.apply(tfCtx, plan, state, run, file, moduletest.Running, start, variables)
576560

577561
// Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't.
578-
applyDiags = run.ValidateExpectedFailures(applyDiags)
579-
580-
run.Diagnostics = run.Diagnostics.Append(applyDiags)
581-
if applyDiags.HasErrors() {
582-
run.Status = moduletest.Error
562+
// We'll also update the run status based on the presence of errors or missing expected failures.
563+
failOrErr := runner.checkForMissingExpectedFailures(run, applyDiags)
564+
if failOrErr {
583565
// Even though the apply operation failed, the graph may have done
584566
// partial updates and the returned state should reflect this.
585567
runner.EvalContext.SetFileState(key, &graph.TestFileState{
@@ -639,6 +621,38 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
639621
return
640622
}
641623

624+
// checkForMissingExpectedFailures checks for missing expected failures in the diagnostics.
625+
// It updates the run status based on the presence of errors or missing expected failures.
626+
func (runner *TestFileRunner) checkForMissingExpectedFailures(run *moduletest.Run, diags tfdiags.Diagnostics) (failOrErr bool) {
627+
// If there are errors in the diagnostics, update the run status to error.
628+
if diags.HasErrors() {
629+
run.Status = moduletest.Error
630+
}
631+
632+
// Retrieve diagnostics that are either unrelated to expected failures or report missing expected failures.
633+
unexpectedDiags := run.ValidateExpectedFailures(diags)
634+
var nonFailureDiags tfdiags.Diagnostics
635+
for _, diag := range unexpectedDiags {
636+
switch {
637+
// If any diagnostic indicates a missing expected failure, update the run status to fail.
638+
case diag.Description().Summary == moduletest.MissingFailureSummary:
639+
diag.ExtraInfo()
640+
run.Status = moduletest.Fail
641+
default:
642+
// Append other diagnostics.
643+
nonFailureDiags = nonFailureDiags.Append(diag)
644+
}
645+
}
646+
// If there are other errors, update the run status to error.
647+
if nonFailureDiags.HasErrors() {
648+
run.Status = moduletest.Error
649+
}
650+
651+
// Append all diagnostics that are not expected failures to the run diagnostics.
652+
run.Diagnostics = run.Diagnostics.Append(unexpectedDiags)
653+
return run.Status > moduletest.Pass
654+
}
655+
642656
func (runner *TestFileRunner) validate(run *moduletest.Run, file *moduletest.File, start int64) tfdiags.Diagnostics {
643657
log.Printf("[TRACE] TestFileRunner: called validate for %s/%s", file.Name, run.Name)
644658

internal/command/test_test.go

+69-8
Original file line numberDiff line numberDiff line change
@@ -104,12 +104,6 @@ func TestTest_Runs(t *testing.T) {
104104
expectedOut: []string{"1 passed, 0 failed."},
105105
code: 0,
106106
},
107-
"expect_failures_continue": {
108-
expectedOut: []string{"1 passed, 1 failed.", "Expected failure while planning"},
109-
code: 1,
110-
expectedErr: []string{"Module output value precondition failed"},
111-
description: "continue test execution after an expected failure",
112-
},
113107
"expect_failures_resources": {
114108
expectedOut: []string{"1 passed, 0 failed."},
115109
code: 0,
@@ -399,14 +393,15 @@ func TestTest_Runs(t *testing.T) {
399393
Meta: meta,
400394
}
401395

402-
code := c.Run(tc.args)
396+
code := c.Run(append(tc.args, "-no-color"))
403397
output := done(t)
404398

405399
if code != tc.code {
406400
t.Errorf("expected status code %d but got %d:\n\n%s", tc.code, code, output.All())
407401
}
408402

409403
if len(tc.expectedOut) > 0 {
404+
fmt.Println(output.All())
410405
for _, expectedOut := range tc.expectedOut {
411406
if !strings.Contains(output.Stdout(), expectedOut) {
412407
t.Errorf("output didn't contain expected string:\n\n%s", output.Stdout())
@@ -1837,6 +1832,7 @@ the apply operation could not be executed and so the overall test case will
18371832
be marked as a failure and the original diagnostic included in the test
18381833
report.
18391834
1835+
run "no_run"... skip
18401836
input.tftest.hcl... tearing down
18411837
input.tftest.hcl... fail
18421838
output.tftest.hcl... in progress
@@ -1869,7 +1865,7 @@ test report.
18691865
resource.tftest.hcl... tearing down
18701866
resource.tftest.hcl... fail
18711867
1872-
Failure! 1 passed, 3 failed.
1868+
Failure! 1 passed, 3 failed, 1 skipped.
18731869
`
18741870
actualOut := output.Stdout()
18751871
if diff := cmp.Diff(expectedOut, actualOut); len(diff) > 0 {
@@ -1916,6 +1912,71 @@ input must contain the character 'b'
19161912
}
19171913
}
19181914

1915+
func TestTest_MissingExpectedFailuresDuringApply(t *testing.T) {
1916+
// Test asserting that the test run fails, but not errors out, when expected failures are not present during apply.
1917+
// This lets subsequent runs continue to execute and the file to be marked as failed.
1918+
td := t.TempDir()
1919+
testCopyDir(t, testFixturePath(path.Join("test", "expect_failures_during_apply")), td)
1920+
defer testChdir(t, td)()
1921+
1922+
provider := testing_command.NewProvider(nil)
1923+
view, done := testView(t)
1924+
1925+
c := &TestCommand{
1926+
Meta: Meta{
1927+
testingOverrides: metaOverridesForProvider(provider.Provider),
1928+
View: view,
1929+
},
1930+
}
1931+
1932+
code := c.Run([]string{"-no-color"})
1933+
output := done(t)
1934+
1935+
if code == 0 {
1936+
t.Errorf("expected status code 0 but got %d", code)
1937+
}
1938+
1939+
expectedOut := `main.tftest.hcl... in progress
1940+
run "test"... fail
1941+
run "follow-up"... pass
1942+
1943+
Warning: Value for undeclared variable
1944+
1945+
on main.tftest.hcl line 16, in run "follow-up":
1946+
16: input = "does not matter"
1947+
1948+
The module under test does not declare a variable named "input", but it is
1949+
declared in run block "follow-up".
1950+
1951+
main.tftest.hcl... tearing down
1952+
main.tftest.hcl... fail
1953+
1954+
Failure! 1 passed, 1 failed.
1955+
`
1956+
actualOut := output.Stdout()
1957+
if diff := cmp.Diff(expectedOut, actualOut); len(diff) > 0 {
1958+
t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedOut, actualOut, diff)
1959+
}
1960+
1961+
expectedErr := `
1962+
Error: Missing expected failure
1963+
1964+
on main.tftest.hcl line 7, in run "test":
1965+
7: output.output
1966+
1967+
The checkable object, output.output, was expected to report an error but did
1968+
not.
1969+
`
1970+
actualErr := output.Stderr()
1971+
if diff := cmp.Diff(actualErr, expectedErr); len(diff) > 0 {
1972+
t.Errorf("output didn't match expected:\nexpected:\n%s\nactual:\n%s\ndiff:\n%s", expectedErr, actualErr, diff)
1973+
}
1974+
1975+
if provider.ResourceCount() > 0 {
1976+
t.Errorf("should have deleted all resources on completion but left %v", provider.ResourceString())
1977+
}
1978+
}
1979+
19191980
func TestTest_UnknownAndNulls(t *testing.T) {
19201981

19211982
tcs := map[string]struct {

internal/command/testdata/test/expect_failures_continue/main.tf

-13
This file was deleted.

internal/command/testdata/test/expect_failures_continue/main.tftest.hcl

-20
This file was deleted.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
2+
locals {
3+
input = uuid() # using UUID to ensure that plan phase will return an unknown value
4+
}
5+
6+
output "output" {
7+
value = local.input
8+
9+
precondition {
10+
condition = local.input != ""
11+
error_message = "this should not fail during the apply phase"
12+
}
13+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
run "test" {
2+
3+
command = apply
4+
5+
// We are expecting the output to fail during apply, but it will not, so the test will fail.
6+
expect_failures = [
7+
output.output
8+
]
9+
}
10+
11+
// this should still run
12+
run "follow-up" {
13+
command = apply
14+
15+
variables {
16+
input = "does not matter"
17+
}
18+
}

internal/command/testdata/test/expected_failures_during_planning/input.tftest.hcl

+14
Original file line numberDiff line numberDiff line change
@@ -14,3 +14,17 @@ run "input_failure" {
1414
]
1515

1616
}
17+
18+
19+
// This should not run because the previous run block is expected to error, thus
20+
// terminating the test file.
21+
run "no_run" {
22+
23+
variables {
24+
input = "abc"
25+
}
26+
assert {
27+
condition = var.input == "abc"
28+
error_message = "should not run"
29+
}
30+
}

internal/moduletest/run.go

+3-2
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,8 @@ import (
2020
)
2121

2222
const (
23-
MainStateIdentifier = ""
23+
MainStateIdentifier = ""
24+
MissingFailureSummary = "Missing expected failure"
2425
)
2526

2627
type Run struct {
@@ -545,7 +546,7 @@ func (run *Run) ValidateExpectedFailures(originals tfdiags.Diagnostics) tfdiags.
545546
// diagnostics.
546547
diags = diags.Append(&hcl.Diagnostic{
547548
Severity: hcl.DiagError,
548-
Summary: "Missing expected failure",
549+
Summary: MissingFailureSummary,
549550
Detail: fmt.Sprintf("The checkable object, %s, was expected to report an error but did not.", addr.String()),
550551
Subject: sourceRanges.Get(addr).ToHCL().Ptr(),
551552
})

0 commit comments

Comments
 (0)