@@ -1013,7 +1013,7 @@ StaticRuntime::Deallocator::~Deallocator() {
1013
1013
// Assume cleanup cannot throw.
1014
1014
cleanupImpl ();
1015
1015
#ifndef NDEBUG
1016
- runtime_.check_for_memory_leak (false );
1016
+ runtime_.check_for_memory_leak (/* output_returned */ false );
1017
1017
#endif
1018
1018
}
1019
1019
@@ -1074,8 +1074,11 @@ c10::IValue StaticRuntime::run_impl(
1074
1074
if (static_module_.num_outputs () > 1 ) {
1075
1075
return move_outputs_to_tuple (static_module_.num_outputs ());
1076
1076
}
1077
+
1078
+ DCHECK (check_for_memory_leak (/* output_returned*/ false ));
1077
1079
// The exact output tensor should never be managed.
1078
1080
DCHECK (!isManagedOutputTensor (*outputs_[0 ]));
1081
+
1079
1082
// use move here. Otherwise, clean up outputs_[0] explicitly
1080
1083
return std::move (*outputs_[0 ]);
1081
1084
}
@@ -1437,9 +1440,7 @@ StaticRuntime::IndividualMetrics StaticRuntime::benchmark_individual_ops(
1437
1440
output = move_outputs_to_tuple (static_module_.num_outputs ());
1438
1441
}
1439
1442
1440
- #ifndef NDEBUG
1441
- check_for_memory_leak (false );
1442
- #endif
1443
+ DCHECK (check_for_memory_leak (/* output_returned*/ false ));
1443
1444
1444
1445
// use move here. Otherwise, clean up outputs_[0] explicitly
1445
1446
output = std::move (*outputs_[0 ]);
@@ -1478,9 +1479,9 @@ StaticRuntime::IndividualMetrics StaticRuntime::benchmark_individual_ops(
1478
1479
return results;
1479
1480
}
1480
1481
1481
- void StaticRuntime::check_for_memory_leak (bool output_returned) {
1482
+ bool StaticRuntime::check_for_memory_leak (bool output_returned) {
1482
1483
if (!static_module_.opts ().cleanup_activations ) {
1483
- return ;
1484
+ return true ;
1484
1485
}
1485
1486
1486
1487
// check for inputs
@@ -1531,6 +1532,7 @@ void StaticRuntime::check_for_memory_leak(bool output_returned) {
1531
1532
}
1532
1533
}
1533
1534
VLOG (1 ) << " Finished checking for memory leak" ;
1535
+ return true ;
1534
1536
}
1535
1537
1536
1538
void StaticRuntime::deallocateOutputTensors () {
0 commit comments