@@ -4663,10 +4663,6 @@ void Compiler::fgDoReversePostOrderLayout()
4663
4663
}
4664
4664
#endif // DEBUG
4665
4665
4666
- // Compute DFS of all blocks in the method, using profile data to determine the order successors are visited in.
4667
- //
4668
- m_dfsTree = fgComputeDfs</* useProfile */ true >();
4669
-
4670
4666
// If LSRA didn't create any new blocks, we can reuse its loop-aware RPO traversal,
4671
4667
// which is cached in Compiler::fgBBs.
4672
4668
// If the cache isn't available, we need to recompute the loop-aware RPO.
@@ -4675,15 +4671,21 @@ void Compiler::fgDoReversePostOrderLayout()
4675
4671
4676
4672
if (rpoSequence == nullptr )
4677
4673
{
4678
- rpoSequence = new (this , CMK_BasicBlock) BasicBlock*[m_dfsTree->GetPostOrderCount ()];
4674
+ assert (m_dfsTree == nullptr );
4675
+ m_dfsTree = fgComputeDfs</* useProfile */ true >();
4679
4676
FlowGraphNaturalLoops* const loops = FlowGraphNaturalLoops::Find (m_dfsTree);
4680
- unsigned index = 0 ;
4681
- auto addToSequence = [rpoSequence, &index](BasicBlock* block) {
4677
+ rpoSequence = new (this , CMK_BasicBlock) BasicBlock*[m_dfsTree->GetPostOrderCount ()];
4678
+ unsigned index = 0 ;
4679
+ auto addToSequence = [rpoSequence, &index](BasicBlock* block) {
4682
4680
rpoSequence[index++] = block;
4683
4681
};
4684
4682
4685
4683
fgVisitBlocksInLoopAwareRPO (m_dfsTree, loops, addToSequence);
4686
4684
}
4685
+ else
4686
+ {
4687
+ assert (m_dfsTree != nullptr );
4688
+ }
4687
4689
4688
4690
// Fast path: We don't have any EH regions, so just reorder the blocks
4689
4691
//
0 commit comments