@@ -282,7 +282,7 @@ func (runner *TestFileRunner) Test(file *moduletest.File) {
282
282
}
283
283
284
284
// walk and execute the graph
285
- diags = runner .walkGraph (graph )
285
+ diags = runner .walkGraph (graph , file )
286
286
287
287
// If the graph walk was terminated, we don't want to add the diagnostics.
288
288
// The error the user receives will just be:
@@ -298,7 +298,7 @@ func (runner *TestFileRunner) Test(file *moduletest.File) {
298
298
}
299
299
300
300
// walkGraph goes through the graph and execute each run it finds.
301
- func (runner * TestFileRunner ) walkGraph (g * terraform.Graph ) tfdiags.Diagnostics {
301
+ func (runner * TestFileRunner ) walkGraph (g * terraform.Graph , file * moduletest. File ) tfdiags.Diagnostics {
302
302
sem := runner .Suite .semaphore
303
303
304
304
// Walk the graph.
@@ -346,7 +346,7 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics
346
346
defer sem .Release ()
347
347
348
348
switch v := v .(type ) {
349
- case * graph.NodeTestRun :
349
+ case * graph.NodeTestRun : // NodeTestRun is also executable, so it has to be first.
350
350
file := v .File ()
351
351
run := v .Run ()
352
352
if file .GetStatus () == moduletest .Error {
@@ -374,83 +374,68 @@ func (runner *TestFileRunner) walkGraph(g *terraform.Graph) tfdiags.Diagnostics
374
374
if diags .HasErrors () {
375
375
return diags
376
376
}
377
- // continue the execution of the test run.
377
+
378
+ startTime := time .Now ().UTC ()
379
+ runner .run (run , file , startTime )
380
+ runner .Suite .View .Run (run , file , moduletest .Complete , 0 )
381
+ file .UpdateStatus (run .Status )
378
382
case graph.GraphNodeExecutable :
379
383
diags = v .Execute (runner .EvalContext )
380
384
return diags
381
385
default :
382
386
// If the vertex isn't a test run or executable, we'll just skip it.
383
387
return
384
388
}
389
+ return
390
+ }
385
391
386
- // We already know that the vertex is a test run
387
- runNode := v .(* graph.NodeTestRun )
388
-
389
- file := runNode .File ()
390
- run := runNode .Run ()
391
-
392
- key := run .GetStateKey ()
393
- if run .Config .ConfigUnderTest != nil {
394
- if key == moduletest .MainStateIdentifier {
395
- // This is bad. It means somehow the module we're loading has
396
- // the same key as main state and we're about to corrupt things.
397
-
398
- run .Diagnostics = run .Diagnostics .Append (& hcl.Diagnostic {
399
- Severity : hcl .DiagError ,
400
- Summary : "Invalid module source" ,
401
- Detail : fmt .Sprintf ("The source for the selected module evaluated to %s which should not be possible. This is a bug in Terraform - please report it!" , key ),
402
- Subject : run .Config .Module .DeclRange .Ptr (),
403
- })
404
-
405
- run .Status = moduletest .Error
406
- file .UpdateStatus (moduletest .Error )
407
- return
408
- }
409
- }
410
-
411
- startTime := time .Now ().UTC ()
412
- state , updatedState := runner .run (run , file , runner .EvalContext .GetFileState (key ).State )
413
- runDuration := time .Since (startTime )
414
- if updatedState {
415
- // Only update the most recent run and state if the state was
416
- // actually updated by this change. We want to use the run that
417
- // most recently updated the tracked state as the cleanup
418
- // configuration.
419
- runner .EvalContext .SetFileState (key , & graph.TestFileState {
420
- Run : run ,
421
- State : state ,
422
- })
423
- }
392
+ return g .AcyclicGraph .Walk (walkFn )
393
+ }
424
394
395
+ func (runner * TestFileRunner ) run (run * moduletest.Run , file * moduletest.File , startTime time.Time ) {
396
+ log .Printf ("[TRACE] TestFileRunner: executing run block %s/%s" , file .Name , run .Name )
397
+ defer func () {
425
398
// If we got far enough to actually execute the run then we'll give
426
399
// the view some additional metadata about the execution.
427
400
run .ExecutionMeta = & moduletest.RunExecutionMeta {
428
401
Start : startTime ,
429
- Duration : runDuration ,
402
+ Duration : time . Since ( startTime ) ,
430
403
}
431
- runner .Suite .View .Run (run , file , moduletest .Complete , 0 )
432
- file .UpdateStatus (run .Status )
433
- return
434
- }
435
404
436
- return g .AcyclicGraph .Walk (walkFn )
437
- }
405
+ }()
438
406
439
- func (runner * TestFileRunner ) run (run * moduletest.Run , file * moduletest.File , state * states.State ) (* states.State , bool ) {
440
- log .Printf ("[TRACE] TestFileRunner: executing run block %s/%s" , file .Name , run .Name )
407
+ key := run .GetStateKey ()
408
+ if run .Config .ConfigUnderTest != nil {
409
+ if key == moduletest .MainStateIdentifier {
410
+ // This is bad. It means somehow the module we're loading has
411
+ // the same key as main state and we're about to corrupt things.
412
+
413
+ run .Diagnostics = run .Diagnostics .Append (& hcl.Diagnostic {
414
+ Severity : hcl .DiagError ,
415
+ Summary : "Invalid module source" ,
416
+ Detail : fmt .Sprintf ("The source for the selected module evaluated to %s which should not be possible. This is a bug in Terraform - please report it!" , key ),
417
+ Subject : run .Config .Module .DeclRange .Ptr (),
418
+ })
419
+
420
+ run .Status = moduletest .Error
421
+ file .UpdateStatus (moduletest .Error )
422
+ return
423
+ }
424
+ }
425
+ state := runner .EvalContext .GetFileState (key ).State
441
426
442
427
config := run .ModuleConfig
443
428
if runner .Suite .Cancelled {
444
429
// Don't do anything, just give up and return immediately.
445
430
// The surrounding functions should stop this even being called, but in
446
431
// case of race conditions or something we can still verify this.
447
- return state , false
432
+ return
448
433
}
449
434
450
435
if runner .Suite .Stopped {
451
436
// Basically the same as above, except we'll be a bit nicer.
452
437
run .Status = moduletest .Skip
453
- return state , false
438
+ return
454
439
}
455
440
456
441
start := time .Now ().UTC ().UnixMilli ()
@@ -459,35 +444,31 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
459
444
run .Diagnostics = run .Diagnostics .Append (run .Config .Validate (config ))
460
445
if run .Diagnostics .HasErrors () {
461
446
run .Status = moduletest .Error
462
- return state , false
447
+ return
463
448
}
464
449
465
450
configDiags := graph .TransformConfigForTest (runner .EvalContext , run , file )
466
451
run .Diagnostics = run .Diagnostics .Append (configDiags )
467
452
if configDiags .HasErrors () {
468
453
run .Status = moduletest .Error
469
- return state , false
454
+ return
470
455
}
471
456
472
457
validateDiags := runner .validate (run , file , start )
473
458
run .Diagnostics = run .Diagnostics .Append (validateDiags )
474
459
if validateDiags .HasErrors () {
475
460
run .Status = moduletest .Error
476
- return state , false
461
+ return
477
462
}
478
463
479
- references , referenceDiags := run .GetReferences ()
480
- run .Diagnostics = run .Diagnostics .Append (referenceDiags )
481
- if referenceDiags .HasErrors () {
482
- run .Status = moduletest .Error
483
- return state , false
484
- }
464
+ // already validated during static analysis
465
+ references , _ := run .GetReferences ()
485
466
486
467
variables , variableDiags := runner .GetVariables (run , references , true )
487
468
run .Diagnostics = run .Diagnostics .Append (variableDiags )
488
469
if variableDiags .HasErrors () {
489
470
run .Status = moduletest .Error
490
- return state , false
471
+ return
491
472
}
492
473
493
474
// FilterVariablesToModule only returns warnings, so we don't check the
@@ -498,7 +479,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
498
479
tfCtx , ctxDiags := terraform .NewContext (runner .Suite .Opts )
499
480
run .Diagnostics = run .Diagnostics .Append (ctxDiags )
500
481
if ctxDiags .HasErrors () {
501
- return state , false
482
+ return
502
483
}
503
484
504
485
planScope , plan , planDiags := runner .plan (tfCtx , config , state , run , file , setVariables , references , start )
@@ -508,7 +489,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
508
489
run .Diagnostics = run .Diagnostics .Append (planDiags )
509
490
if planDiags .HasErrors () {
510
491
run .Status = moduletest .Error
511
- return state , false
492
+ return
512
493
}
513
494
514
495
runner .AddVariablesToConfig (run , variables )
@@ -549,8 +530,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
549
530
// Now we've successfully validated this run block, lets add it into
550
531
// our prior run outputs so future run blocks can access it.
551
532
runner .EvalContext .SetOutput (run , outputVals )
552
-
553
- return state , false
533
+ return
554
534
}
555
535
556
536
// Otherwise any error during the planning prevents our apply from
@@ -559,7 +539,7 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
559
539
run .Diagnostics = run .Diagnostics .Append (planDiags )
560
540
if planDiags .HasErrors () {
561
541
run .Status = moduletest .Error
562
- return state , false
542
+ return
563
543
}
564
544
565
545
// Since we're carrying on an executing the apply operation as well, we're
@@ -579,14 +559,16 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
579
559
applyScope , updated , applyDiags := runner .apply (tfCtx , plan , state , run , file , moduletest .Running , start , variables )
580
560
581
561
// Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't.
582
- applyDiags = run .ValidateExpectedFailures (applyDiags )
583
-
584
- run .Diagnostics = run .Diagnostics .Append (applyDiags )
585
- if applyDiags .HasErrors () {
586
- run .Status = moduletest .Error
562
+ // We'll also update the run status based on the presence of errors or missing expected failures.
563
+ failOrErr := runner .checkForMissingExpectedFailures (run , applyDiags )
564
+ if failOrErr {
587
565
// Even though the apply operation failed, the graph may have done
588
566
// partial updates and the returned state should reflect this.
589
- return updated , true
567
+ runner .EvalContext .SetFileState (key , & graph.TestFileState {
568
+ Run : run ,
569
+ State : updated ,
570
+ })
571
+ return
590
572
}
591
573
592
574
runner .AddVariablesToConfig (run , variables )
@@ -628,7 +610,37 @@ func (runner *TestFileRunner) run(run *moduletest.Run, file *moduletest.File, st
628
610
// our prior run outputs so future run blocks can access it.
629
611
runner .EvalContext .SetOutput (run , outputVals )
630
612
631
- return updated , true
613
+ // Only update the most recent run and state if the state was
614
+ // actually updated by this change. We want to use the run that
615
+ // most recently updated the tracked state as the cleanup
616
+ // configuration.
617
+ runner .EvalContext .SetFileState (key , & graph.TestFileState {
618
+ Run : run ,
619
+ State : updated ,
620
+ })
621
+ }
622
+
623
+ // checkForMissingExpectedFailures checks for missing expected failures in the diagnostics.
624
+ // It updates the run status based on the presence of errors or missing expected failures.
625
+ func (runner * TestFileRunner ) checkForMissingExpectedFailures (run * moduletest.Run , diags tfdiags.Diagnostics ) (failOrErr bool ) {
626
+ // Retrieve and append diagnostics that are either unrelated to expected failures
627
+ // or report missing expected failures.
628
+ unexpectedDiags := run .ValidateExpectedFailures (diags )
629
+ run .Diagnostics = run .Diagnostics .Append (unexpectedDiags )
630
+ for _ , diag := range unexpectedDiags {
631
+ // // If any diagnostic indicates a missing expected failure, set the run status to fail.
632
+ if ok := moduletest .DiagnosticFromMissingExpectedFailure (diag ); ok {
633
+ run .Status = run .Status .Merge (moduletest .Fail )
634
+ continue
635
+ }
636
+
637
+ // upgrade the run status to error if there still are other errors in the diagnostics
638
+ if diag .Severity () == tfdiags .Error {
639
+ run .Status = run .Status .Merge (moduletest .Error )
640
+ break
641
+ }
642
+ }
643
+ return run .Status > moduletest .Pass
632
644
}
633
645
634
646
func (runner * TestFileRunner ) validate (run * moduletest.Run , file * moduletest.File , start int64 ) tfdiags.Diagnostics {
0 commit comments