@@ -92,7 +92,7 @@ def _name_base_dir(self) -> Path:
92
92
base_dir = Path (f"./tuning_{ timestamp } " )
93
93
return base_dir
94
94
95
- def _set_run_log (self , run_log : Path ):
95
+ def set_run_log (self , run_log : Path ):
96
96
object .__setattr__ (self , "run_log" , run_log )
97
97
98
98
def get_candidate_spec_filename (self , candidate_id : int ) -> str :
@@ -334,10 +334,10 @@ def parse_arguments(
334
334
return parser .parse_args ()
335
335
336
336
337
- def setup_logging (args : argparse .Namespace , path_config : PathConfig ):
337
+ def setup_logging (args : argparse .Namespace , path_config : PathConfig ) -> logging . Logger :
338
338
log_file_name = f"autotune_{ args .input_file .stem } .log"
339
339
run_log_path = path_config .base_dir / log_file_name
340
- path_config ._set_run_log (run_log_path )
340
+ path_config .set_run_log (run_log_path )
341
341
342
342
# Create file handler for logging to a file
343
343
if path_config .run_log is None :
@@ -384,7 +384,9 @@ def format(self, record):
384
384
# Log all arguments
385
385
logging .debug (f"Input Arguments:" )
386
386
for arg , value in vars (args ).items ():
387
- tune_logger .info (f"{ arg } : { value } " )
387
+ logging .debug (f"{ arg } : { value } " )
388
+
389
+ return logging .getLogger ()
388
390
389
391
390
392
def handle_error (
@@ -717,10 +719,18 @@ def generate_candidate_specs(
717
719
tune_logger .exception ("Error in candidate_gen.py:" )
718
720
raise
719
721
720
- logging .info (f"Generated [{ len (candidates ) - 1 } ] candidates" )
722
+ logging .debug (f"Generated [{ len (candidates ) - 1 } ] candidates" )
721
723
return candidates
722
724
723
725
726
+ def get_compilation_success_rate (compiled_candiates : list [Optional [int ]]) -> float :
727
+ if not compiled_candiates :
728
+ return 0.0
729
+ successful_candidates = [c for c in compiled_candiates if c is not None ]
730
+ success_rate = float (len (successful_candidates )) / float (len (compiled_candiates ))
731
+ return success_rate
732
+
733
+
724
734
def collision_handler (index_hash_list : list [tuple [int , str ]]) -> tuple [bool , list [int ]]:
725
735
"""If a collision is found, generate a list of new indexes. If no collision, `unique_indexes = []`"""
726
736
# Check if candidate produces tbe same .vmfb
@@ -800,11 +810,11 @@ def compile(
800
810
compiled_candidates = multiprocess_progress_wrapper (
801
811
num_worker = num_worker , task_list = task_list , function = run_iree_compile_command
802
812
)
803
- compiled_candidates = [c for c in compiled_candidates if c is not None ]
804
- success_rate = float (len (compiled_candidates )) / float (len (candidates ))
805
- logging .info (
813
+ success_rate = get_compilation_success_rate (compiled_candidates )
814
+ logging .debug (
806
815
f"Successfully compiled [{ len (compiled_candidates )} ] candidates. Success rate: { success_rate :.2f} "
807
816
)
817
+ compiled_candidates = [c for c in compiled_candidates if c is not None ]
808
818
809
819
# Remove duplicate vmfbs from the candidate list.
810
820
compiled_candidate_hashes = []
@@ -818,7 +828,7 @@ def compile(
818
828
if collision_detected :
819
829
compiled_candidates = unique_compiled_candidates
820
830
821
- logging .info (f"Produced [{ len (compiled_candidates )} ] unique vmfbs" )
831
+ logging .debug (f"Produced [{ len (compiled_candidates )} ] unique vmfbs" )
822
832
return compiled_candidates
823
833
824
834
@@ -875,7 +885,8 @@ def get_speedup(result: BenchmarkResult) -> float:
875
885
speedup = f"{ round (get_speedup (r ) * 100 , 2 )} % of baseline"
876
886
else :
877
887
speedup = "baseline unavailable"
878
- logging .info (f"Candidate { r .candidate_id } time: { r .time :.2f} ({ speedup } )" )
888
+ result = f"Candidate { r .candidate_id } time: { r .time :.2f} ms ({ speedup } )"
889
+ logging .info (result )
879
890
return best_results
880
891
881
892
0 commit comments