From f21215bf06bf453f0822abae3309a8f93fe1670d Mon Sep 17 00:00:00 2001 From: Alexander Dokuchaev Date: Fri, 31 Jan 2025 17:52:03 +0200 Subject: [PATCH] Enable pyupgrade and flake8-errmsg rules in ruff (#3221) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Changes https://docs.astral.sh/ruff/rules/#pyupgrade-up https://docs.astral.sh/ruff/rules/#flake8-errmsg-em Rules regarding annotations will be enableŠ² in next pr ``` "UP006", # non-pep585-annotation "UP007", # non-pep604-annotation-union "UP035", # deprecated-import "UP038", # non-pep604-isinstance "UP045", # non-pep604-annotation-optional ``` --- custom_version.py | 3 +- examples/common/paths.py | 2 +- .../torch/classification/bootstrap_nas.py | 20 +-- .../classification/bootstrap_nas_search.py | 18 +-- .../main.py | 2 +- .../torch/ssd300_vgg16/main.py | 2 +- .../torch_fx/resnet18/main.py | 2 +- .../torch/resnet18/main.py | 2 +- .../classification/datasets/builder.py | 3 +- .../datasets/preprocessing/utils.py | 12 +- .../datasets/preprocessing_selector.py | 5 +- examples/tensorflow/classification/main.py | 15 +- examples/tensorflow/common/dataset_builder.py | 10 +- examples/tensorflow/common/distributed.py | 7 +- examples/tensorflow/common/model_loader.py | 3 +- examples/tensorflow/common/models.py | 6 +- .../object_detection/architecture/factory.py | 9 +- .../object_detection/architecture/fpn.py | 15 +- .../object_detection/architecture/heads.py | 21 +-- .../object_detection/architecture/nn_ops.py | 3 +- .../object_detection/architecture/resnet.py | 8 +- .../common/object_detection/base_model.py | 3 +- .../object_detection/checkpoint_utils.py | 5 +- .../datasets/preprocessing_selector.py | 3 +- .../evaluation/coco_evaluator.py | 6 +- .../object_detection/evaluation/coco_utils.py | 9 +- .../common/object_detection/losses.py | 4 +- .../common/object_detection/ops/roi_ops.py | 2 +- .../object_detection/utils/argmax_matcher.py | 11 +- .../balanced_positive_negative_sampler.py | 24 ++- .../object_detection/utils/box_coder.py | 7 +- .../common/object_detection/utils/box_list.py | 15 +- .../object_detection/utils/box_utils.py | 21 ++- .../common/object_detection/utils/matcher.py | 6 +- .../object_detection/utils/shape_utils.py | 3 +- .../object_detection/utils/target_assigner.py | 6 +- examples/tensorflow/common/optimizer.py | 3 +- .../tensorflow/common/prepare_checkpoint.py | 11 +- examples/tensorflow/common/scheduler.py | 15 +- examples/tensorflow/common/utils.py | 10 +- examples/tensorflow/object_detection/main.py | 34 +++-- .../object_detection/models/model_selector.py | 6 +- .../models/retinanet_model.py | 9 +- .../object_detection/models/yolo_v4_model.py | 2 +- .../tensorflow/segmentation/evaluation.py | 30 ++-- .../segmentation/models/maskrcnn_model.py | 10 +- .../segmentation/models/model_selector.py | 6 +- examples/tensorflow/segmentation/train.py | 20 +-- examples/torch/classification/main.py | 12 +- .../models/mobilenet_v2_32x32.py | 6 +- .../staged_quantization_worker.py | 13 +- examples/torch/common/argparser.py | 3 +- examples/torch/common/distributed.py | 10 +- examples/torch/common/execution.py | 2 +- examples/torch/common/export.py | 3 +- examples/torch/common/model_loader.py | 10 +- .../classification/mobilenet_v2_tv_092.py | 6 +- .../classification/mobilenet_v3_tv_092.py | 15 +- .../models/classification/resnet_cifar10.py | 11 +- .../models/classification/rmnet_cifar.py | 2 +- .../torch/common/models/segmentation/enet.py | 15 +- .../torch/common/models/segmentation/icnet.py | 3 +- .../torch/common/models/segmentation/unet.py | 3 +- examples/torch/common/optimizer.py | 6 +- .../torch/common/restricted_pickle_module.py | 3 +- examples/torch/common/utils.py | 8 +- .../torch/object_detection/datasets/coco.py | 5 +- .../object_detection/datasets/voc0712.py | 7 +- examples/torch/object_detection/eval.py | 34 ++--- .../layers/functions/detection.py | 3 +- .../layers/functions/prior_box.py | 5 +- examples/torch/object_detection/main.py | 12 +- .../object_detection/models/ssd_mobilenet.py | 3 +- .../semantic_segmentation/datasets/camvid.py | 9 +- .../datasets/cityscapes.py | 9 +- .../datasets/mapillary.py | 9 +- examples/torch/semantic_segmentation/main.py | 47 +++--- .../torch/semantic_segmentation/metric/iou.py | 3 +- examples/torch/semantic_segmentation/test.py | 2 +- examples/torch/semantic_segmentation/train.py | 2 +- .../semantic_segmentation/utils/checkpoint.py | 2 +- .../torch/semantic_segmentation/utils/data.py | 15 +- nncf/common/accuracy_aware_training/runner.py | 4 +- .../accuracy_aware_training/runner_factory.py | 6 +- .../accuracy_aware_training/training_loop.py | 17 ++- nncf/common/composite_compression.py | 14 +- nncf/common/compression.py | 6 +- nncf/common/deprecation.py | 3 +- nncf/common/factory.py | 30 ++-- nncf/common/graph/graph.py | 25 ++-- nncf/common/graph/operator_metatypes.py | 3 +- nncf/common/graph/patterns/manager.py | 6 +- nncf/common/graph/patterns/patterns.py | 6 +- nncf/common/hardware/config.py | 18 ++- .../initialization/batchnorm_adaptation.py | 3 +- nncf/common/logging/progress_bar.py | 6 +- nncf/common/logging/track_progress.py | 12 +- nncf/common/pruning/clusterization.py | 12 +- nncf/common/pruning/schedulers.py | 3 +- .../common/pruning/shape_pruning_processor.py | 3 +- nncf/common/pruning/symbolic_mask.py | 6 +- nncf/common/pruning/utils.py | 6 +- .../pruning/weights_flops_calculator.py | 6 +- .../quantization/initialization/range.py | 6 +- .../quantizer_propagation/graph.py | 69 +++++---- .../quantizer_propagation/grouping.py | 8 +- .../quantizer_propagation/solver.py | 41 +++--- .../quantizer_propagation/visualizer.py | 2 +- nncf/common/quantization/quantizer_setup.py | 21 ++- nncf/common/quantization/structs.py | 7 +- nncf/common/schedulers.py | 3 +- nncf/common/sparsity/schedulers.py | 6 +- nncf/common/stateful_classes_registry.py | 24 +-- nncf/common/statistics.py | 5 +- nncf/common/strip.py | 3 +- nncf/common/tensor.py | 6 +- nncf/common/tensor_statistics/aggregator.py | 3 +- .../tensor_statistics/statistic_point.py | 3 +- .../statistics_serializer.py | 3 +- .../tensor_statistics/statistics_validator.py | 11 +- nncf/common/utils/backend.py | 3 +- nncf/common/utils/helpers.py | 4 +- nncf/common/utils/os.py | 3 +- nncf/common/utils/registry.py | 6 +- nncf/config/config.py | 3 +- nncf/config/extractors.py | 30 ++-- nncf/config/schema.py | 12 +- nncf/data/generators.py | 14 +- .../experimental/common/pruning/operations.py | 6 +- .../common/pruning/propagation_data.py | 2 +- .../common/tensor_statistics/collectors.py | 16 +- .../statistical_functions.py | 3 +- .../common/tensor_statistics/statistics.py | 3 +- .../algorithms/post_training/algorithm.py | 3 +- .../algorithms/range_estimator/algorithm.py | 3 +- .../quantizers/openvino_quantizer.py | 3 +- .../quantizers/torch_ao_adapter.py | 8 +- nncf/experimental/tensorflow/context.py | 3 +- .../tensorflow/graph/argprovider.py | 21 ++- .../tensorflow/graph/converter.py | 3 +- .../tensorflow/graph/model_transformer.py | 3 +- .../graph/transformations/layout.py | 3 +- nncf/experimental/tensorflow/patch_tf.py | 3 +- .../tensorflow/quantization/algorithm.py | 16 +- .../tensorflow/quantization/quantizers.py | 10 +- nncf/experimental/torch/fx/node_utils.py | 9 +- .../torch/fx/quantization/quantize_model.py | 6 +- .../torch/fx/quantization/quantize_pt2e.py | 3 +- nncf/experimental/torch/fx/transformations.py | 17 ++- .../bootstrapNAS/elasticity/elastic_kernel.py | 17 ++- .../bootstrapNAS/elasticity/elastic_width.py | 34 +++-- .../torch/nas/bootstrapNAS/search/search.py | 15 +- .../nas/bootstrapNAS/training/lr_scheduler.py | 3 +- .../training/progressive_shrinking_builder.py | 3 +- .../nas/bootstrapNAS/training/scheduler.py | 16 +- .../training/training_algorithm.py | 3 +- .../search_building_blocks/search_blocks.py | 5 +- .../sparsify_activations_impl.py | 16 +- .../sparsify_activations/torch_backend.py | 6 +- .../torch/sparsity/movement/algo.py | 12 +- .../torch/sparsity/movement/layers.py | 43 +++--- .../torch/sparsity/movement/scheduler.py | 15 +- .../movement/structured_mask_handler.py | 6 +- .../graph/graph_visualization.py | 6 +- .../nncf_graph/nncf_graph_builder.py | 9 +- .../torch2/function_hook/wrapper.py | 40 +++-- nncf/experimental/torch2/model_transformer.py | 3 +- .../torch2/quantization/quantize_model.py | 9 +- .../torch2/statistics/aggregator.py | 3 +- nncf/onnx/graph/model_transformer.py | 11 +- nncf/onnx/graph/nncf_graph_builder.py | 3 +- nncf/onnx/graph/node_utils.py | 3 +- nncf/onnx/graph/onnx_helper.py | 12 +- nncf/onnx/quantization/quantize_model.py | 9 +- .../onnx/quantization/quantizer_parameters.py | 11 +- .../graph/metatypes/openvino_metatypes.py | 3 +- nncf/openvino/graph/model_transformer.py | 18 ++- nncf/openvino/graph/nncf_graph_builder.py | 3 +- nncf/openvino/graph/node_utils.py | 8 +- nncf/openvino/optimized_functions/models.py | 47 +++--- nncf/openvino/quantization/quantize_model.py | 5 +- nncf/quantization/advanced_parameters.py | 21 ++- .../algorithms/accuracy_control/algorithm.py | 5 +- .../algorithms/accuracy_control/evaluator.py | 16 +- .../accuracy_control/rank_functions.py | 5 +- .../algorithms/bias_correction/algorithm.py | 8 +- .../bias_correction/torch_fx_backend.py | 3 +- .../channel_alignment/openvino_backend.py | 6 +- .../fast_bias_correction/algorithm.py | 8 +- .../algorithms/layerwise/engine.py | 8 +- .../algorithms/layerwise/openvino_iterator.py | 5 +- .../algorithms/min_max/algorithm.py | 61 ++++---- .../algorithms/min_max/onnx_backend.py | 3 +- .../algorithms/min_max/openvino_backend.py | 3 +- .../algorithms/min_max/torch_backend.py | 3 +- .../algorithms/min_max/torch_fx_backend.py | 3 +- .../algorithms/post_training/algorithm.py | 3 +- .../algorithms/smooth_quant/algorithm.py | 17 +-- .../smooth_quant/openvino_backend.py | 9 +- .../algorithms/smooth_quant/torch_backend.py | 3 +- .../smooth_quant/torch_fx_backend.py | 3 +- .../weight_compression/algorithm.py | 29 ++-- .../algorithms/weight_compression/awq.py | 5 +- .../algorithms/weight_compression/gptq.py | 17 ++- .../weight_compression/lora_correction.py | 6 +- .../weight_compression/mixed_precision.py | 16 +- .../weight_compression/openvino_backend.py | 6 +- .../weight_compression/scale_estimation.py | 8 +- .../weight_compression/torch_backend.py | 13 +- .../weight_compression/torch_fx_backend.py | 9 +- .../weight_compression/weight_lowering.py | 22 +-- nncf/quantization/fake_quantize.py | 6 +- nncf/quantization/quantize_model.py | 60 ++++---- nncf/scopes.py | 3 +- nncf/tensor/functions/dispatcher.py | 6 +- nncf/tensor/functions/io.py | 3 +- nncf/tensor/functions/numeric.py | 6 +- nncf/tensor/functions/numpy_numeric.py | 3 +- nncf/tensor/functions/torch_numeric.py | 15 +- nncf/tensor/tensor.py | 3 +- .../keras_model_utils.py | 3 +- .../accuracy_aware_training/runner.py | 3 +- nncf/tensorflow/api/composite_compression.py | 3 +- nncf/tensorflow/api/compression.py | 3 +- nncf/tensorflow/batchnorm_adaptation.py | 3 +- .../callbacks/checkpoint_callback.py | 3 +- .../callbacks/statistics_callback.py | 8 +- nncf/tensorflow/exporter.py | 3 +- nncf/tensorflow/graph/converter.py | 9 +- .../graph/metatypes/keras_layers.py | 6 +- nncf/tensorflow/graph/model_transformer.py | 46 +++--- .../graph/transformations/commands.py | 15 +- nncf/tensorflow/graph/utils.py | 5 +- nncf/tensorflow/helpers/model_creation.py | 6 +- nncf/tensorflow/helpers/utils.py | 3 +- nncf/tensorflow/layers/wrapper.py | 13 +- nncf/tensorflow/pruning/base_algorithm.py | 3 +- .../pruning/filter_pruning/algorithm.py | 11 +- nncf/tensorflow/pruning/utils.py | 9 +- nncf/tensorflow/quantization/algorithm.py | 20 ++- nncf/tensorflow/quantization/init_range.py | 14 +- .../tensorflow/quantization/quantize_model.py | 15 +- nncf/tensorflow/quantization/quantizers.py | 16 +- nncf/tensorflow/quantization/utils.py | 3 +- .../sparsity/magnitude/algorithm.py | 8 +- .../sparsity/magnitude/operation.py | 3 +- nncf/tensorflow/sparsity/rb/algorithm.py | 6 +- nncf/tensorflow/sparsity/rb/loss.py | 3 +- nncf/tensorflow/sparsity/rb/operation.py | 5 +- nncf/tensorflow/sparsity/utils.py | 3 +- .../tensor_statistics/statistics.py | 6 +- nncf/tensorflow/utils/state.py | 5 +- nncf/torch/accuracy_aware_training/runner.py | 3 +- nncf/torch/automl/agent/ddpg/ddpg.py | 8 +- nncf/torch/automl/agent/ddpg/memory.py | 6 +- .../automl/environment/quantization_env.py | 29 ++-- nncf/torch/checkpoint_loading.py | 2 +- nncf/torch/composite_compression.py | 3 +- nncf/torch/compression_method_api.py | 6 +- nncf/torch/dynamic_graph/context.py | 4 +- nncf/torch/dynamic_graph/io_handling.py | 9 +- .../layer_attributes_handlers.py | 6 +- nncf/torch/dynamic_graph/patch_pytorch.py | 9 +- nncf/torch/dynamic_graph/scope.py | 8 +- nncf/torch/dynamic_graph/scope_access.py | 8 +- nncf/torch/dynamic_graph/trace_functions.py | 16 +- nncf/torch/dynamic_graph/trace_tensor.py | 2 +- nncf/torch/dynamic_graph/wrappers.py | 2 +- nncf/torch/exporter.py | 6 +- nncf/torch/extensions/__init__.py | 3 +- nncf/torch/extractor.py | 9 +- nncf/torch/graph/graph.py | 3 +- nncf/torch/graph/transformations/commands.py | 7 +- .../graph/transformations/serialization.py | 9 +- nncf/torch/initialization.py | 6 +- nncf/torch/knowledge_distillation/algo.py | 3 +- nncf/torch/layers.py | 39 ++--- nncf/torch/model_creation.py | 12 +- nncf/torch/model_graph_manager.py | 17 ++- nncf/torch/module_operations.py | 6 +- nncf/torch/nncf_module_replacement.py | 11 +- nncf/torch/nncf_network.py | 39 +++-- nncf/torch/pruning/base_algo.py | 6 +- nncf/torch/pruning/filter_pruning/algo.py | 25 ++-- nncf/torch/pruning/filter_pruning/layers.py | 7 +- nncf/torch/pruning/operations.py | 7 +- nncf/torch/pruning/utils.py | 3 +- nncf/torch/quantization/adjust_padding.py | 3 +- nncf/torch/quantization/algo.py | 49 ++++--- nncf/torch/quantization/debug_interface.py | 12 +- nncf/torch/quantization/extensions.py | 5 +- nncf/torch/quantization/init_range.py | 16 +- nncf/torch/quantization/layers.py | 32 ++-- .../quantization/precision_init/autoq_init.py | 18 +-- .../precision_init/bitwidth_graph.py | 13 +- .../quantization/precision_init/hawq_init.py | 28 ++-- .../precision_init/manual_init.py | 7 +- .../precision_init/traces_order.py | 6 +- nncf/torch/quantization/quantize_functions.py | 6 +- nncf/torch/quantization/quantize_model.py | 9 +- nncf/torch/quantization/reference.py | 3 +- nncf/torch/quantization/strip.py | 3 +- nncf/torch/sparsity/magnitude/algo.py | 5 +- nncf/torch/sparsity/rb/algo.py | 3 +- nncf/torch/sparsity/rb/loss.py | 13 +- nncf/torch/tensor_statistics/statistics.py | 6 +- nncf/torch/utils.py | 14 +- pyproject.toml | 12 +- .../test_reducers_and_aggregators.py | 3 +- .../common/pruning/test_pruning_operations.py | 4 +- tests/common/quantization/data_generators.py | 3 +- tests/common/quantization/mock_graphs.py | 2 +- tests/common/test_logging.py | 2 +- tests/common/test_statistics_aggregator.py | 6 +- tests/cross_fw/install/common.py | 3 +- .../cross_fw/install/install_checks_torch.py | 8 +- tests/cross_fw/install/test_install.py | 3 +- tests/cross_fw/shared/command.py | 2 - tests/cross_fw/shared/helpers.py | 8 +- tests/cross_fw/shared/json.py | 2 +- tests/cross_fw/shared/nx_graph.py | 14 +- .../test_templates/test_channel_alignment.py | 3 +- tests/onnx/benchmarking/run_ptq.py | 5 +- tests/onnx/models.py | 2 +- .../test_classification_models_graph.py | 3 +- .../quantization/test_fast_bias_correction.py | 3 +- tests/onnx/test_e2e_ptq.py | 2 +- tests/onnx/test_model_transformer.py | 2 +- tests/onnx/test_statistics_caching.py | 2 +- tests/openvino/native/common.py | 2 +- tests/openvino/native/models.py | 12 +- .../openvino/native/quantization/test_gptq.py | 2 +- .../native/test_fast_bias_correction.py | 3 +- .../openvino/native/test_layer_attributes.py | 2 +- .../openvino/native/test_model_transformer.py | 4 +- .../native/test_statistics_caching.py | 2 +- tests/openvino/native/test_tensor.py | 6 +- tests/openvino/tools/calibrate.py | 76 ++++++---- tests/openvino/tools/config.py | 33 +++-- .../sparsify_activations/model_scope.py | 6 +- .../sparsify_activations/pipelines.py | 11 +- .../test_sparsify_activations_conformance.py | 3 +- tests/post_training/model_scope.py | 3 +- tests/post_training/pipelines/base.py | 3 +- .../pipelines/image_classification_base.py | 4 +- .../pipelines/lm_weight_compression.py | 8 +- .../test_quantize_conformance.py | 6 +- .../experimental/test_models/resnet.py | 11 +- tests/tensorflow/helpers.py | 3 +- .../test_algorithm_quantization.py | 2 +- .../quantization/test_unified_scales.py | 8 +- .../tensorflow/sparsity/rb/test_components.py | 2 +- tests/tensorflow/test_models/mobilenet.py | 16 +- tests/tensorflow/test_models/mobilenet_v2.py | 4 +- tests/tensorflow/test_models/nasnet.py | 138 +++++++++--------- tests/tensorflow/test_sanity_sample.py | 9 +- tests/tensorflow/test_sota_checkpoints.py | 6 +- tests/tensorflow/test_weekly.py | 5 +- .../composite/test_sparsity_quantization.py | 2 +- tests/torch/extensions_build_checks.py | 6 +- tests/torch/fx/helpers.py | 2 +- tests/torch/fx/test_fast_bias_correction.py | 3 +- tests/torch/fx/test_quantizer.py | 6 +- tests/torch/fx/test_statistics_caching.py | 2 +- tests/torch/helpers.py | 8 +- tests/torch/models_hub_test/common.py | 7 +- tests/torch/modules/test_rnn.py | 4 +- tests/torch/nas/test_elastic_depth.py | 2 +- .../test_transformation_layout.py | 3 +- .../torch/pruning/filter_pruning/test_legr.py | 4 +- tests/torch/pruning/test_onnx_export.py | 6 +- tests/torch/ptq/test_fast_bias_correction.py | 6 +- .../ptq/test_reducers_and_aggregators.py | 3 +- tests/torch/ptq/test_statistics_caching.py | 2 +- tests/torch/ptq/test_weights_compression.py | 2 +- tests/torch/qat/helpers.py | 9 +- tests/torch/qat/test_qat_object_detection.py | 4 +- tests/torch/qat/test_qat_segmentation.py | 11 +- .../quantization/test_algo_quantization.py | 8 +- .../quantization/test_autoq_precision_init.py | 2 +- .../quantization/test_hawq_precision_init.py | 6 +- .../quantization/test_logarithm_scale.py | 6 +- .../test_manual_precision_init.py | 4 +- tests/torch/quantization/test_range_init.py | 89 +++++------ tests/torch/quantization/test_strip.py | 3 +- tests/torch/sample_test_validator.py | 4 +- .../torch/sparsity/movement/test_training.py | 2 +- .../movement/training_scripts/run_glue.py | 2 +- tests/torch/test_compressed_graph.py | 8 +- tests/torch/test_compression_lr_multiplier.py | 9 +- tests/torch/test_compression_training.py | 6 +- .../test_distributed_data_parallel_mode.py | 3 +- tests/torch/test_extensions_build.py | 6 +- tests/torch/test_graph_building.py | 4 +- tests/torch/test_knowledge_distillation.py | 2 +- tests/torch/test_layer_attributes.py | 2 +- tests/torch/test_model_transformer.py | 11 +- tests/torch/test_models/preact_resnet.py | 2 +- tests/torch/test_models/squeezenet.py | 3 +- tests/torch/test_models/ssd_mobilenet.py | 3 +- tests/torch/test_models/swin.py | 2 +- tests/torch/test_sota_checkpoints.py | 3 +- .../graph/test_build_graph_mode.py | 3 +- tools/add_new_quantization_parameters.py | 2 +- tools/benchmark.py | 9 +- tools/clip_dot.py | 3 +- tools/correct_checkpoint.py | 2 +- tools/extract_ov_subgraph.py | 6 +- tools/ir_utils.py | 4 +- tools/memory_monitor.py | 11 +- tools/visualize_compression_results.py | 8 +- 411 files changed, 2195 insertions(+), 1679 deletions(-) diff --git a/custom_version.py b/custom_version.py index 070e2b60a20..1e925f996a0 100644 --- a/custom_version.py +++ b/custom_version.py @@ -60,7 +60,8 @@ def get_custom_version() -> str: r"^__version__ = ['\"]((\d+\.\d+\.\d+)([^'\"]*))['\"]", Path(NNCF_VERSION_FILE).read_text(), re.M ) if not version_match: - raise RuntimeError("Unable to find version string.") + msg = "Unable to find version string." + raise RuntimeError(msg) version_full = version_match.group(1) version_value = version_match.group(2) diff --git a/examples/common/paths.py b/examples/common/paths.py index 27eed6a82af..7fdde58a329 100644 --- a/examples/common/paths.py +++ b/examples/common/paths.py @@ -17,7 +17,7 @@ def configure_paths(config: SampleConfig, run_name: str): config.name = run_name d = datetime.datetime.now() - run_id = "{:%Y-%m-%d__%H-%M-%S}".format(d) + run_id = f"{d:%Y-%m-%d__%H-%M-%S}" log_dir = Path(config.log_dir) / run_name / run_id log_dir.mkdir(parents=True, exist_ok=True) config.log_dir = str(log_dir) diff --git a/examples/experimental/torch/classification/bootstrap_nas.py b/examples/experimental/torch/classification/bootstrap_nas.py index ec83f636a30..b7c548d2215 100644 --- a/examples/experimental/torch/classification/bootstrap_nas.py +++ b/examples/experimental/torch/classification/bootstrap_nas.py @@ -204,31 +204,23 @@ def validate_model_fn_top1(model_, loader_): validate_model_fn_top1, val_loader, config.checkpoint_save_dir, tensorboard_writer=config.tb ) - logger.info("Best config: {best_config}".format(best_config=best_config)) - logger.info("Performance metrics: {performance_metrics}".format(performance_metrics=performance_metrics)) + logger.info(f"Best config: {best_config}") + logger.info(f"Performance metrics: {performance_metrics}") search_algo.visualize_search_progression() # Maximal subnet elasticity_ctrl.multi_elasticity_handler.activate_maximum_subnet() search_algo.bn_adaptation.run(nncf_network) top1_acc = validate_model_fn_top1(nncf_network, val_loader) - logger.info( - "Maximal subnet Top1 acc: {top1_acc}, Macs: {macs}".format( - top1_acc=top1_acc, - macs=elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000, - ) - ) + macs = elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000 + logger.info(f"Maximal subnet Top1 acc: {top1_acc}, Macs: {macs}") # Best found subnet elasticity_ctrl.multi_elasticity_handler.activate_subnet_for_config(best_config) search_algo.bn_adaptation.run(nncf_network) top1_acc = validate_model_fn_top1(nncf_network, val_loader) - logger.info( - "Best found subnet Top1 acc: {top1_acc}, Macs: {macs}".format( - top1_acc=top1_acc, - macs=elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000, - ) - ) + macs = elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000 + logger.info(f"Best found subnet Top1 acc: {top1_acc}, Macs: {macs}") elasticity_ctrl.export_model(osp.join(config.log_dir, "best_subnet.onnx")) if "test" in config.mode: diff --git a/examples/experimental/torch/classification/bootstrap_nas_search.py b/examples/experimental/torch/classification/bootstrap_nas_search.py index d53011bcc3f..2bb7a85896c 100644 --- a/examples/experimental/torch/classification/bootstrap_nas_search.py +++ b/examples/experimental/torch/classification/bootstrap_nas_search.py @@ -147,7 +147,7 @@ def validate_model_fn_top1(model_, loader_): load_state(model, model_weights, is_resume=True) top1_acc = validate_model_fn_top1(model, val_loader) - logger.info("SuperNetwork Top 1: {top1_acc}".format(top1_acc=top1_acc)) + logger.info(f"SuperNetwork Top 1: {top1_acc}") search_algo = BaseSearchAlgorithm.from_config(model, elasticity_ctrl, nncf_config) @@ -163,23 +163,15 @@ def validate_model_fn_top1(model_, loader_): elasticity_ctrl.multi_elasticity_handler.activate_maximum_subnet() search_algo.bn_adaptation.run(nncf_network) top1_acc = validate_model_fn_top1(nncf_network, val_loader) - logger.info( - "Maximal subnet Top1 acc: {top1_acc}, Macs: {macs}".format( - top1_acc=top1_acc, - macs=elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000, - ) - ) + macs = elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000 + logger.info(f"Maximal subnet Top1 acc: {top1_acc}, Macs: {macs}") # Best found subnet elasticity_ctrl.multi_elasticity_handler.activate_subnet_for_config(best_config) search_algo.bn_adaptation.run(nncf_network) top1_acc = validate_model_fn_top1(nncf_network, val_loader) - logger.info( - "Best found subnet Top1 acc: {top1_acc}, Macs: {macs}".format( - top1_acc=top1_acc, - macs=elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000, - ) - ) + macs = elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000 + logger.info(f"Best found subnet Top1 acc: {top1_acc}, Macs: {macs}") elasticity_ctrl.export_model(osp.join(config.log_dir, "best_subnet.onnx")) search_algo.search_progression_to_csv() diff --git a/examples/post_training_quantization/openvino/anomaly_stfpm_quantize_with_accuracy_control/main.py b/examples/post_training_quantization/openvino/anomaly_stfpm_quantize_with_accuracy_control/main.py index e1f1c90c64a..59cbb9e0228 100644 --- a/examples/post_training_quantization/openvino/anomaly_stfpm_quantize_with_accuracy_control/main.py +++ b/examples/post_training_quantization/openvino/anomaly_stfpm_quantize_with_accuracy_control/main.py @@ -132,7 +132,7 @@ def run_example(): download_and_extract(MODEL_PATH, MODEL_INFO) ov_model = ov.Core().read_model(MODEL_PATH / "stfpm_capsule.xml") - with open(MODEL_PATH / "meta_data_stfpm_capsule.json", "r", encoding="utf-8") as f: + with open(MODEL_PATH / "meta_data_stfpm_capsule.json", encoding="utf-8") as f: validation_params = json.load(f) ############################################################################### diff --git a/examples/post_training_quantization/torch/ssd300_vgg16/main.py b/examples/post_training_quantization/torch/ssd300_vgg16/main.py index c99002786d5..dfb5ba78139 100644 --- a/examples/post_training_quantization/torch/ssd300_vgg16/main.py +++ b/examples/post_training_quantization/torch/ssd300_vgg16/main.py @@ -86,7 +86,7 @@ def __getitem__(self, item: int) -> Tuple[torch.Tensor, Dict]: target = dict(image_id=[image_id], boxes=[], labels=[]) label_filepath = self.labels_path / f"{image_id:012d}.txt" if label_filepath.exists(): - with open(label_filepath, "r", encoding="utf-8") as f: + with open(label_filepath, encoding="utf-8") as f: for box_descr in f.readlines(): category_id, rel_x, rel_y, rel_w, rel_h = tuple(map(float, box_descr.split(" "))) box_x1, box_y1 = img_w * (rel_x - rel_w / 2), img_h * (rel_y - rel_h / 2) diff --git a/examples/post_training_quantization/torch_fx/resnet18/main.py b/examples/post_training_quantization/torch_fx/resnet18/main.py index 662e7c7aea8..00300b79730 100644 --- a/examples/post_training_quantization/torch_fx/resnet18/main.py +++ b/examples/post_training_quantization/torch_fx/resnet18/main.py @@ -153,7 +153,7 @@ def prepare_tiny_imagenet_200(dataset_dir: Path): return val_annotations_file = val_data_dir / "val_annotations.txt" - with open(val_annotations_file, "r") as f: + with open(val_annotations_file) as f: val_annotation_data = map(lambda line: line.split("\t")[:2], f.readlines()) for image_filename, image_label in val_annotation_data: from_image_filepath = val_images_dir / image_filename diff --git a/examples/quantization_aware_training/torch/resnet18/main.py b/examples/quantization_aware_training/torch/resnet18/main.py index f52e0a54d12..7fc0ad4f665 100644 --- a/examples/quantization_aware_training/torch/resnet18/main.py +++ b/examples/quantization_aware_training/torch/resnet18/main.py @@ -195,7 +195,7 @@ def prepare_tiny_imagenet_200(dataset_dir: Path): return val_annotations_file = val_data_dir / "val_annotations.txt" - with open(val_annotations_file, "r") as f: + with open(val_annotations_file) as f: val_annotation_data = map(lambda line: line.split("\t")[:2], f.readlines()) for image_filename, image_label in val_annotation_data: from_image_filepath = val_images_dir / image_filename diff --git a/examples/tensorflow/classification/datasets/builder.py b/examples/tensorflow/classification/datasets/builder.py index 3f67ec9766b..48283c50224 100644 --- a/examples/tensorflow/classification/datasets/builder.py +++ b/examples/tensorflow/classification/datasets/builder.py @@ -65,7 +65,8 @@ def dtype(self): } dtype = dtype_map.get(self._dtype, None) if dtype is None: - raise nncf.ValidationError("Invalid DType provided. Supported types: {}".format(dtype_map.keys())) + msg = f"Invalid DType provided. Supported types: {dtype_map.keys()}" + raise nncf.ValidationError(msg) return dtype diff --git a/examples/tensorflow/classification/datasets/preprocessing/utils.py b/examples/tensorflow/classification/datasets/preprocessing/utils.py index de36df28549..2e863ed7eda 100644 --- a/examples/tensorflow/classification/datasets/preprocessing/utils.py +++ b/examples/tensorflow/classification/datasets/preprocessing/utils.py @@ -41,10 +41,12 @@ def mean_image_subtraction( :return: the centered image. """ if image.get_shape().ndims != 3: - raise nncf.ValidationError("Input must be of size [height, width, C>0]") + msg = "Input must be of size [height, width, C>0]" + raise nncf.ValidationError(msg) if len(means) != num_channels: - raise nncf.ValidationError("len(means) must match the number of channels") + msg = "len(means) must match the number of channels" + raise nncf.ValidationError(msg) means = tf.broadcast_to(means, tf.shape(image)) if dtype is not None: @@ -66,10 +68,12 @@ def standardize_image( :return: the centered image. """ if image.get_shape().ndims != 3: - raise nncf.ValidationError("Input must be of size [height, width, C>0]") + msg = "Input must be of size [height, width, C>0]" + raise nncf.ValidationError(msg) if len(stddev) != num_channels: - raise nncf.ValidationError("len(stddev) must match the number of channels") + msg = "len(stddev) must match the number of channels" + raise nncf.ValidationError(msg) stddev = tf.broadcast_to(stddev, tf.shape(image)) if dtype is not None: diff --git a/examples/tensorflow/classification/datasets/preprocessing_selector.py b/examples/tensorflow/classification/datasets/preprocessing_selector.py index daaad510a28..6577377beb3 100644 --- a/examples/tensorflow/classification/datasets/preprocessing_selector.py +++ b/examples/tensorflow/classification/datasets/preprocessing_selector.py @@ -30,9 +30,8 @@ def get_preprocessing(dataset_name, model_name, preset=None): if not preset: preset = dataset_name if preset not in PREPROCESSING_FN_MAP: - raise nncf.ValidationError( - "Preprocessing for dataset {} and model {} was not recognized".format(dataset_name, model_name) - ) + msg = f"Preprocessing for dataset {dataset_name} and model {model_name} was not recognized" + raise nncf.ValidationError(msg) ext_kwargs = {} if preset == "imagenet2012": diff --git a/examples/tensorflow/classification/main.py b/examples/tensorflow/classification/main.py index c00882e8728..3ca9c20cce4 100644 --- a/examples/tensorflow/classification/main.py +++ b/examples/tensorflow/classification/main.py @@ -96,7 +96,7 @@ def get_num_classes(dataset): else: num_classes = 1000 - logger.info("The sample is started with {} classes".format(num_classes)) + logger.info(f"The sample is started with {num_classes} classes") return num_classes @@ -104,17 +104,18 @@ def load_checkpoint(checkpoint, ckpt_path): logger.info("Load from checkpoint is enabled.") if tf.io.gfile.isdir(ckpt_path): path_to_checkpoint = tf.train.latest_checkpoint(ckpt_path) - logger.info("Latest checkpoint: {}".format(path_to_checkpoint)) + logger.info(f"Latest checkpoint: {path_to_checkpoint}") else: path_to_checkpoint = ckpt_path if tf.io.gfile.exists(ckpt_path + ".index") else None - logger.info("Provided checkpoint: {}".format(path_to_checkpoint)) + logger.info(f"Provided checkpoint: {path_to_checkpoint}") if not path_to_checkpoint: logger.info("No checkpoint detected.") if ckpt_path: - raise nncf.ValidationError(f"ckpt_path was given, but no checkpoint detected in path: {ckpt_path}") + msg = f"ckpt_path was given, but no checkpoint detected in path: {ckpt_path}" + raise nncf.ValidationError(msg) - logger.info("Checkpoint file {} found and restoring from checkpoint".format(path_to_checkpoint)) + logger.info(f"Checkpoint file {path_to_checkpoint} found and restoring from checkpoint") status = checkpoint.restore(path_to_checkpoint) status.expect_partial() @@ -284,7 +285,7 @@ def run(config): if "export" in config.mode: save_path, save_format = get_saving_parameters(config) export_model(compression_ctrl.strip(), save_path, save_format) - logger.info("Saved to {}".format(save_path)) + logger.info(f"Saved to {save_path}") def export(config): @@ -319,7 +320,7 @@ def export(config): save_path, save_format = get_saving_parameters(config) export_model(compression_ctrl.strip(), save_path, save_format) - logger.info("Saved to {}".format(save_path)) + logger.info(f"Saved to {save_path}") def main(argv): diff --git a/examples/tensorflow/common/dataset_builder.py b/examples/tensorflow/common/dataset_builder.py index 187f823e4bf..6ed66f1e465 100644 --- a/examples/tensorflow/common/dataset_builder.py +++ b/examples/tensorflow/common/dataset_builder.py @@ -88,7 +88,8 @@ def build(self): builder = dataset_builders.get(self._dataset_type, None) if builder is None: - raise nncf.UnknownDatasetError("Unknown dataset type {}".format(self._dataset_type)) + msg = f"Unknown dataset type {self._dataset_type}" + raise nncf.UnknownDatasetError(msg) dataset = builder() dataset = self._pipeline(dataset) @@ -96,7 +97,7 @@ def build(self): return dataset def _load_tfds(self): - logger.info("Using TFDS to load {} data.".format(self._split)) + logger.info(f"Using TFDS to load {self._split} data.") set_hard_limit_num_open_files() @@ -119,13 +120,14 @@ def _load_tfds(self): return dataset def _load_tfrecords(self): - logger.info("Using TFRecords to load {} data.".format(self._split)) + logger.info(f"Using TFRecords to load {self._split} data.") dataset_key = self._dataset_name.replace("/", "") if dataset_key in self._tfrecord_datasets: self._dataset_loader = self._tfrecord_datasets[dataset_key](config=self._config, is_train=self._is_train) else: - raise nncf.UnknownDatasetError("Unknown dataset name: {}".format(self._dataset_name)) + msg = f"Unknown dataset name: {self._dataset_name}" + raise nncf.UnknownDatasetError(msg) dataset = self._dataset_loader.as_dataset() diff --git a/examples/tensorflow/common/distributed.py b/examples/tensorflow/common/distributed.py index 27bf3610521..edf3c416c15 100644 --- a/examples/tensorflow/common/distributed.py +++ b/examples/tensorflow/common/distributed.py @@ -28,12 +28,13 @@ def get_distribution_strategy(config): if "CUDA_VISIBLE_DEVICES" not in os.environ or _gpu_id in os.environ["CUDA_VISIBLE_DEVICES"].split(","): os.environ["CUDA_VISIBLE_DEVICES"] = _gpu_id else: - raise nncf.ValidationError( - "GPU with id = {id} was not found in the specified " + msg = ( + f"GPU with id = {_gpu_id} was not found in the specified " "CUDA_VISIBLE_DEVICES environment variable. " "Please do not export the CUDA_VISIBLE_DEVICES environment variable " - "or specify GPU with id = {id} in it".format(id=_gpu_id) + f"or specify GPU with id = {_gpu_id} in it" ) + raise nncf.ValidationError(msg) gpus = tf.config.list_physical_devices("GPU") diff --git a/examples/tensorflow/common/model_loader.py b/examples/tensorflow/common/model_loader.py index fab20bd9230..b8569c7aee8 100644 --- a/examples/tensorflow/common/model_loader.py +++ b/examples/tensorflow/common/model_loader.py @@ -21,7 +21,8 @@ def get_model(model_name, input_shape=None, pretrained=True, num_classes=1000, w if model_name in AVAILABLE_MODELS: model = AVAILABLE_MODELS[model_name] else: - raise Exception("Undefined model name: {}".format(model_name)) + msg = f"Undefined model name: {model_name}" + raise Exception(msg) model_params = {"classes": num_classes} if weights is not None: diff --git a/examples/tensorflow/common/models.py b/examples/tensorflow/common/models.py index 6ce1c739c40..5f7a0284ff3 100644 --- a/examples/tensorflow/common/models.py +++ b/examples/tensorflow/common/models.py @@ -81,7 +81,7 @@ def MobileNetV3(stack_fn, last_point_ch, input_shape=None, model_type="large", * x = tf.keras.layers.Activation(activation="softmax", name="Predictions")(x) # Create model. - model = tf.keras.Model(img_input, x, name="MobilenetV3{}".format(model_type)) + model = tf.keras.Model(img_input, x, name=f"MobilenetV3{model_type}") BASE_WEIGHT_PATH = "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v3/" WEIGHTS_HASHES = { @@ -89,7 +89,7 @@ def MobileNetV3(stack_fn, last_point_ch, input_shape=None, model_type="large", * "small": "8768d4c2e7dee89b9d02b2d03d65d862", } - file_name = "weights_mobilenet_v3_{}_224_1.0_float.h5".format(model_type) + file_name = f"weights_mobilenet_v3_{model_type}_224_1.0_float.h5" file_hash = WEIGHTS_HASHES[model_type] weights_path = tf.keras.utils.get_file( @@ -185,7 +185,7 @@ def _inverted_res_block(x, expansion, filters, kernel_size, stride, se_ratio, ac infilters = tf.keras.backend.int_shape(x)[channel_axis] if block_id: # Expand - prefix = "expanded_conv_{}/".format(block_id) + prefix = f"expanded_conv_{block_id}/" x = tf.keras.layers.Conv2D( _depth(infilters * expansion), kernel_size=1, padding="same", use_bias=False, name=prefix + "expand" )(x) diff --git a/examples/tensorflow/common/object_detection/architecture/factory.py b/examples/tensorflow/common/object_detection/architecture/factory.py index 43274dbd5a0..a94a0feb74b 100644 --- a/examples/tensorflow/common/object_detection/architecture/factory.py +++ b/examples/tensorflow/common/object_detection/architecture/factory.py @@ -37,14 +37,17 @@ def backbone_generator(params): norm_activation=norm_activation_generator(params.model_params.norm_activation), ) else: - raise ValueError("Backbone {} is not supported for {} model.".format(backbone_name, params.model)) + msg = f"Backbone {backbone_name} is not supported for {params.model} model." + raise ValueError(msg) elif params.model == "YOLOv4": if backbone_name == "darknet": backbone_fn = darknet.CSPDarknet53() else: - raise ValueError("Backbone {} is not supported for {} model.".format(backbone_name, params.model)) + msg = f"Backbone {backbone_name} is not supported for {params.model} model." + raise ValueError(msg) else: - raise ValueError("Model {} is not supported.".format(params.model)) + msg = f"Model {params.model} is not supported." + raise ValueError(msg) return backbone_fn diff --git a/examples/tensorflow/common/object_detection/architecture/fpn.py b/examples/tensorflow/common/object_detection/architecture/fpn.py index 96d17976ebe..834b3746ae7 100644 --- a/examples/tensorflow/common/object_detection/architecture/fpn.py +++ b/examples/tensorflow/common/object_detection/architecture/fpn.py @@ -60,7 +60,8 @@ def __init__( elif activation == "swish": self._activation_op = tf.nn.swish else: - raise ValueError("Unsupported activation `{}`.".format(activation)) + msg = f"Unsupported activation `{activation}`." + raise ValueError(msg) self._use_batch_norm = use_batch_norm self._norm_activation = norm_activation @@ -72,10 +73,10 @@ def __init__( for level in range(self._min_level, self._max_level + 1): if self._use_batch_norm: - self._norm_activations[level] = norm_activation(use_activation=False, name="p%d-bn" % level) + self._norm_activations[level] = norm_activation(use_activation=False, name=f"p{level}-bn") self._lateral_conv2d_op[level] = self._conv2d_op( - filters=self._fpn_feat_dims, kernel_size=(1, 1), padding="same", name="l%d" % level + filters=self._fpn_feat_dims, kernel_size=(1, 1), padding="same", name=f"l{level}" ) self._post_hoc_conv2d_op[level] = self._conv2d_op( @@ -83,11 +84,11 @@ def __init__( strides=(1, 1), kernel_size=(3, 3), padding="same", - name="post_hoc_d%d" % level, + name=f"post_hoc_d{level}", ) self._coarse_conv2d_op[level] = self._conv2d_op( - filters=self._fpn_feat_dims, strides=(2, 2), kernel_size=(3, 3), padding="same", name="p%d" % level + filters=self._fpn_feat_dims, strides=(2, 2), kernel_size=(3, 3), padding="same", name=f"p{level}" ) def __call__(self, multilevel_features, is_training=None): @@ -108,8 +109,8 @@ def __call__(self, multilevel_features, is_training=None): input_levels = list(multilevel_features.keys()) if min(input_levels) > self._min_level: raise ValueError( - "The minimum backbone level {} should be ".format(min(input_levels)) - + "less or equal to FPN minimum level {}.".format(self._min_level) + f"The minimum backbone level {min(input_levels)} should be " + + f"less or equal to FPN minimum level {self._min_level}." ) backbone_max_level = min(max(input_levels), self._max_level) diff --git a/examples/tensorflow/common/object_detection/architecture/heads.py b/examples/tensorflow/common/object_detection/architecture/heads.py index f210f12150d..bd6ba14ad73 100644 --- a/examples/tensorflow/common/object_detection/architecture/heads.py +++ b/examples/tensorflow/common/object_detection/architecture/heads.py @@ -63,10 +63,10 @@ def __init__( self._build_box_net_layers(norm_activation) def _class_net_batch_norm_name(self, i, level): - return "class-%d-%d" % (i, level) + return f"class-{i}-{level}" def _box_net_batch_norm_name(self, i, level): - return "box-%d-%d" % (i, level) + return f"box-{i}-{level}" def _build_class_net_layers(self, norm_activation): """Build re-usable layers for class prediction network.""" @@ -254,7 +254,8 @@ def __init__( elif activation == "swish": self._activation_op = tf.nn.swish else: - raise ValueError("Unsupported activation `{}`.".format(activation)) + msg = f"Unsupported activation `{activation}`." + raise ValueError(msg) self._use_batch_norm = use_batch_norm if use_separable_conv: @@ -286,7 +287,7 @@ def __init__( self._norm_activations = {} if self._use_batch_norm: for level in range(self._min_level, self._max_level + 1): - self._norm_activations[level] = norm_activation(name="rpn-l%d-bn" % level) + self._norm_activations[level] = norm_activation(name=f"rpn-l{level}-bn") def _shared_rpn_heads(self, features, anchors_per_location, level, is_training): """Shared RPN heads.""" @@ -372,7 +373,8 @@ def __init__( elif activation == "swish": self._activation_op = tf.nn.swish else: - raise ValueError("Unsupported activation `{}`.".format(activation)) + msg = f"Unsupported activation `{activation}`." + raise ValueError(msg) self._use_batch_norm = use_batch_norm self._norm_activation = norm_activation @@ -387,7 +389,7 @@ def __init__( padding="same", dilation_rate=(1, 1), activation=(None if self._use_batch_norm else self._activation_op), - name="conv_{}".format(i), + name=f"conv_{i}", ) ) if self._use_batch_norm: @@ -400,7 +402,7 @@ def __init__( tf.keras.layers.Dense( units=self._fc_dims, activation=(None if self._use_batch_norm else self._activation_op), - name="fc{}".format(i), + name=f"fc{i}", ) ) if self._use_batch_norm: @@ -511,7 +513,8 @@ def __init__( elif activation == "swish": self._activation_op = tf.nn.swish else: - raise ValueError("Unsupported activation `{}`.".format(activation)) + msg = f"Unsupported activation `{activation}`." + raise ValueError(msg) self._use_batch_norm = use_batch_norm self._norm_activation = norm_activation self._conv2d_ops = [] @@ -524,7 +527,7 @@ def __init__( padding="same", dilation_rate=(1, 1), activation=(None if self._use_batch_norm else self._activation_op), - name="mask-conv-l%d" % i, + name=f"mask-conv-l{i}", ) ) diff --git a/examples/tensorflow/common/object_detection/architecture/nn_ops.py b/examples/tensorflow/common/object_detection/architecture/nn_ops.py index 2855577034a..7b598b7289a 100644 --- a/examples/tensorflow/common/object_detection/architecture/nn_ops.py +++ b/examples/tensorflow/common/object_detection/architecture/nn_ops.py @@ -69,7 +69,8 @@ def __init__( elif activation == "swish": self._activation_op = tf.nn.swish else: - raise ValueError("Unsupported activation `{}`.".format(activation)) + msg = f"Unsupported activation `{activation}`." + raise ValueError(msg) def __call__(self, inputs, is_training=None): """Builds the normalization layer followed by an optional activation layer. diff --git a/examples/tensorflow/common/object_detection/architecture/resnet.py b/examples/tensorflow/common/object_detection/architecture/resnet.py index 4063a4767b7..784e18cec84 100644 --- a/examples/tensorflow/common/object_detection/architecture/resnet.py +++ b/examples/tensorflow/common/object_detection/architecture/resnet.py @@ -40,7 +40,8 @@ def __init__( elif activation == "swish": self._activation_op = tf.nn.swish else: - raise ValueError("Unsupported activation `{}`.".format(activation)) + msg = f"Unsupported activation `{activation}`." + raise ValueError(msg) self._norm_activation = norm_activation self._data_format = data_format @@ -57,8 +58,9 @@ def __init__( if resnet_depth not in model_params: valid_resnet_depths = ", ".join([str(depth) for depth in sorted(model_params.keys())]) + msg = f"The resnet_depth should be in [{valid_resnet_depths}]. Not a valid resnet_depth:" raise ValueError( - "The resnet_depth should be in [%s]. Not a valid resnet_depth:" % (valid_resnet_depths), + msg, self._resnet_depth, ) @@ -78,7 +80,7 @@ def __call__(self, inputs, is_training=None): The values are corresponding feature hierarchy in ResNet with shape [batch_size, height_l, width_l, num_filters]. """ - with tf.name_scope("resnet%s" % self._resnet_depth): + with tf.name_scope(f"resnet{self._resnet_depth}"): return self._resnet_fn(inputs, is_training) def fixed_padding(self, inputs, kernel_size): diff --git a/examples/tensorflow/common/object_detection/base_model.py b/examples/tensorflow/common/object_detection/base_model.py index 84b9a166e06..b6b9669acfb 100644 --- a/examples/tensorflow/common/object_detection/base_model.py +++ b/examples/tensorflow/common/object_detection/base_model.py @@ -98,4 +98,5 @@ def make_restore_checkpoint_fn(self): def eval_metrics(self): """Returns tuple of metric function and its inputs for evaluation.""" - raise NotImplementedError("Unimplemented eval_metrics") + msg = "Unimplemented eval_metrics" + raise NotImplementedError(msg) diff --git a/examples/tensorflow/common/object_detection/checkpoint_utils.py b/examples/tensorflow/common/object_detection/checkpoint_utils.py index a6d4b37e304..de58600d1c1 100644 --- a/examples/tensorflow/common/object_detection/checkpoint_utils.py +++ b/examples/tensorflow/common/object_detection/checkpoint_utils.py @@ -64,7 +64,7 @@ def _build_assignment_map(keras_model, prefix="", skip_variables_regex=None, var try: if match_names: - assert len(match_names) == 1, "more then on matches for {}: {}".format(var_name, match_names) + assert len(match_names) == 1, f"more then on matches for {var_name}: {match_names}" checkpoint_names.remove(match_names[0]) assignment_map[match_names[0]] = var else: @@ -119,7 +119,8 @@ def _restore_checkpoint_fn(keras_model): ) if not vars_to_load: - raise ValueError("Variables to load is empty.") + msg = "Variables to load is empty." + raise ValueError(msg) tf.compat.v1.train.init_from_checkpoint(checkpoint_path, vars_to_load) diff --git a/examples/tensorflow/common/object_detection/datasets/preprocessing_selector.py b/examples/tensorflow/common/object_detection/datasets/preprocessing_selector.py index 7801cf942cb..02c60aea8cf 100644 --- a/examples/tensorflow/common/object_detection/datasets/preprocessing_selector.py +++ b/examples/tensorflow/common/object_detection/datasets/preprocessing_selector.py @@ -23,6 +23,7 @@ def get_preprocess_input_fn(config, is_train): elif model_name == "YOLOv4": tfds_decoder, preprocess_input_fn = YOLOv4Preprocessor(config, is_train).create_preprocess_input_fn() else: - raise ValueError("Unknown model name {}".format(model_name)) + msg = f"Unknown model name {model_name}" + raise ValueError(msg) return tfds_decoder, preprocess_input_fn diff --git a/examples/tensorflow/common/object_detection/evaluation/coco_evaluator.py b/examples/tensorflow/common/object_detection/evaluation/coco_evaluator.py index 5a1ceeee30b..ddc2d5463ac 100644 --- a/examples/tensorflow/common/object_detection/evaluation/coco_evaluator.py +++ b/examples/tensorflow/common/object_detection/evaluation/coco_evaluator.py @@ -214,7 +214,8 @@ def update(self, predictions, groundtruths=None): for k in self._required_prediction_fields: if k not in predictions: - raise ValueError("Missing the required key `{}` in predictions!".format(k)) + msg = f"Missing the required key `{k}` in predictions!" + raise ValueError(msg) if self._need_rescale_bboxes: self._process_predictions(predictions) @@ -235,7 +236,8 @@ def update(self, predictions, groundtruths=None): for k in self._required_groundtruth_fields: if k not in groundtruths: - raise ValueError("Missing the required key `{}` in groundtruths!".format(k)) + msg = f"Missing the required key `{k}` in groundtruths!" + raise ValueError(msg) for k, v in groundtruths.items(): if k not in self._groundtruths: diff --git a/examples/tensorflow/common/object_detection/evaluation/coco_utils.py b/examples/tensorflow/common/object_detection/evaluation/coco_utils.py index 446ebc49d67..488e7def33d 100644 --- a/examples/tensorflow/common/object_detection/evaluation/coco_utils.py +++ b/examples/tensorflow/common/object_detection/evaluation/coco_utils.py @@ -40,10 +40,12 @@ def __init__(self, eval_type="box", annotation_file=None, gt_dataset=None): gt_dataset: the groundtruth eval datatset in COCO API format. """ if (annotation_file and gt_dataset) or ((not annotation_file) and (not gt_dataset)): - raise ValueError("One and only one of `annotation_file` and `gt_dataset` needs to be specified.") + msg = "One and only one of `annotation_file` and `gt_dataset` needs to be specified." + raise ValueError(msg) if eval_type not in ["box", "mask"]: - raise ValueError("The `eval_type` can only be either `box` or `mask`.") + msg = "The `eval_type` can only be either `box` or `mask`." + raise ValueError(msg) coco.COCO.__init__(self, annotation_file=annotation_file) self._eval_type = eval_type @@ -72,7 +74,8 @@ def load_res(self, predictions): image_ids = [ann["image_id"] for ann in predictions] if set(image_ids) != (set(image_ids) & set(self.getImgIds())): - raise ValueError("Results do not correspond to the current dataset!") + msg = "Results do not correspond to the current dataset!" + raise ValueError(msg) for ann in predictions: x1, x2, y1, y2 = [ ann["bbox"][0], diff --git a/examples/tensorflow/common/object_detection/losses.py b/examples/tensorflow/common/object_detection/losses.py index ae4c6cd6bbd..333b6b3d5c3 100644 --- a/examples/tensorflow/common/object_detection/losses.py +++ b/examples/tensorflow/common/object_detection/losses.py @@ -142,7 +142,7 @@ class RpnBoxLoss: """Region Proposal Network box regression loss function.""" def __init__(self, params): - logger.info("RpnBoxLoss huber_loss_delta {}".format(params.huber_loss_delta)) + logger.info(f"RpnBoxLoss huber_loss_delta {params.huber_loss_delta}") # The delta is typically around the mean value of regression target. # for instances, the regression targets of 512x512 input with 6 anchors on # P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2]. @@ -230,7 +230,7 @@ class FastrcnnBoxLoss: """Fast R-CNN box regression loss function.""" def __init__(self, params): - logger.info("FastrcnnBoxLoss huber_loss_delta {}".format(params.huber_loss_delta)) + logger.info(f"FastrcnnBoxLoss huber_loss_delta {params.huber_loss_delta}") # The delta is typically around the mean value of regression target. # for instances, the regression targets of 512x512 input with 6 anchors on # P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2]. diff --git a/examples/tensorflow/common/object_detection/ops/roi_ops.py b/examples/tensorflow/common/object_detection/ops/roi_ops.py index 912e08ce612..9cd1c446c6b 100644 --- a/examples/tensorflow/common/object_detection/ops/roi_ops.py +++ b/examples/tensorflow/common/object_detection/ops/roi_ops.py @@ -91,7 +91,7 @@ def multilevel_propose_rois( roi_scores = [] image_shape = tf.expand_dims(image_shape, axis=1) for level in sorted(rpn_scores.keys()): - with tf.name_scope("level_{}".format(level)): + with tf.name_scope(f"level_{level}"): _, feature_h, feature_w, num_anchors_per_location = rpn_scores[level].get_shape().as_list() num_boxes = feature_h * feature_w * num_anchors_per_location diff --git a/examples/tensorflow/common/object_detection/utils/argmax_matcher.py b/examples/tensorflow/common/object_detection/utils/argmax_matcher.py index 3071f064a09..d53d68dbf6b 100644 --- a/examples/tensorflow/common/object_detection/utils/argmax_matcher.py +++ b/examples/tensorflow/common/object_detection/utils/argmax_matcher.py @@ -65,22 +65,25 @@ def __init__( or if unmatched_threshold > matched_threshold. """ if (matched_threshold is None) and (unmatched_threshold is not None): - raise ValueError("Need to also define matched_threshold when unmatched_threshold is defined") + msg = "Need to also define matched_threshold when unmatched_threshold is defined" + raise ValueError(msg) self._matched_threshold = matched_threshold if unmatched_threshold is None: self._unmatched_threshold = matched_threshold else: if unmatched_threshold > matched_threshold: - raise ValueError("unmatched_threshold needs to be smaller or equal to matched_threshold") + msg = "unmatched_threshold needs to be smaller or equal to matched_threshold" + raise ValueError(msg) self._unmatched_threshold = unmatched_threshold if not negatives_lower_than_unmatched and self._unmatched_threshold == self._matched_threshold: - raise ValueError( + msg = ( "When negatives are in between matched and " "unmatched thresholds, these cannot be of equal " - "value. matched: {}, unmatched: {}".format(self._matched_threshold, self._unmatched_threshold) + f"value. matched: {self._matched_threshold}, unmatched: {self._unmatched_threshold}" ) + raise ValueError(msg) self._force_match_for_each_row = force_match_for_each_row self._negatives_lower_than_unmatched = negatives_lower_than_unmatched diff --git a/examples/tensorflow/common/object_detection/utils/balanced_positive_negative_sampler.py b/examples/tensorflow/common/object_detection/utils/balanced_positive_negative_sampler.py index f2b0df9ac53..4941a727bfe 100644 --- a/examples/tensorflow/common/object_detection/utils/balanced_positive_negative_sampler.py +++ b/examples/tensorflow/common/object_detection/utils/balanced_positive_negative_sampler.py @@ -48,7 +48,8 @@ def __init__(self, positive_fraction=0.5, is_static=False): """ super().__init__() if positive_fraction < 0 or positive_fraction > 1: - raise ValueError("positive_fraction should be in range [0,1]. Received: {}.".format(positive_fraction)) + msg = f"positive_fraction should be in range [0,1]. Received: {positive_fraction}." + raise ValueError(msg) self._positive_fraction = positive_fraction self._is_static = is_static @@ -126,13 +127,16 @@ def _static_subsample(self, indicator, batch_size, labels): # Check if indicator and labels have a static size. if not indicator.shape.is_fully_defined(): - raise ValueError("indicator must be static in shape when is_static is True") + msg = "indicator must be static in shape when is_static is True" + raise ValueError(msg) if not labels.shape.is_fully_defined(): - raise ValueError("labels must be static in shape when is_static is True") + msg = "labels must be static in shape when is_static is True" + raise ValueError(msg) if not isinstance(batch_size, int): - raise ValueError("batch_size has to be an integer when is_static is True.") + msg = "batch_size has to be an integer when is_static is True." + raise ValueError(msg) input_length = tf.shape(input=indicator)[0] @@ -210,13 +214,17 @@ def subsample(self, indicator, batch_size, labels, scope=None): """ if len(indicator.get_shape().as_list()) != 1: - raise ValueError("indicator must be 1 dimensional, got a tensor of shape {}".format(indicator.get_shape())) + msg = f"indicator must be 1 dimensional, got a tensor of shape {indicator.get_shape()}" + raise ValueError(msg) if len(labels.get_shape().as_list()) != 1: - raise ValueError("labels must be 1 dimensional, got a tensor of shape {}".format(labels.get_shape())) + msg = f"labels must be 1 dimensional, got a tensor of shape {labels.get_shape()}" + raise ValueError(msg) if labels.dtype != tf.bool: - raise ValueError("labels should be of type bool. Received: {}".format(labels.dtype)) + msg = f"labels should be of type bool. Received: {labels.dtype}" + raise ValueError(msg) if indicator.dtype != tf.bool: - raise ValueError("indicator should be of type bool. Received: {}".format(indicator.dtype)) + msg = f"indicator should be of type bool. Received: {indicator.dtype}" + raise ValueError(msg) scope = scope or "BalancedPositiveNegativeSampler" with tf.name_scope(scope): diff --git a/examples/tensorflow/common/object_detection/utils/box_coder.py b/examples/tensorflow/common/object_detection/utils/box_coder.py index c1216710206..a7902a203bc 100644 --- a/examples/tensorflow/common/object_detection/utils/box_coder.py +++ b/examples/tensorflow/common/object_detection/utils/box_coder.py @@ -118,12 +118,13 @@ def batch_decode(encoded_boxes, box_coder, anchors): """ encoded_boxes.get_shape().assert_has_rank(3) if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static(): - raise ValueError( + msg = ( "The number of anchors inferred from encoded_boxes" " and anchors are inconsistent: shape[1] of encoded_boxes" - " %s should be equal to the number of anchors: %s." - % (encoded_boxes.get_shape()[1].value, anchors.num_boxes_static()) + f" {encoded_boxes.get_shape()[1].value} should be equal to the number of anchors:" + f" {anchors.num_boxes_static()}." ) + raise ValueError(msg) decoded_boxes = tf.stack([box_coder.decode(boxes, anchors).get() for boxes in tf.unstack(encoded_boxes)]) diff --git a/examples/tensorflow/common/object_detection/utils/box_list.py b/examples/tensorflow/common/object_detection/utils/box_list.py index 788305b4e86..cba15d0e581 100644 --- a/examples/tensorflow/common/object_detection/utils/box_list.py +++ b/examples/tensorflow/common/object_detection/utils/box_list.py @@ -26,9 +26,11 @@ def __init__(self, boxes): float32 format. """ if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4: - raise ValueError("Invalid dimensions for box data.") + msg = "Invalid dimensions for box data." + raise ValueError(msg) if boxes.dtype != tf.float32: - raise ValueError("Invalid tensor type: should be tf.float32") + msg = "Invalid tensor type: should be tf.float32" + raise ValueError(msg) self.data = {"boxes": boxes} def num_boxes(self): @@ -91,7 +93,8 @@ def set(self, boxes): ValueError: if invalid dimensions for bbox data """ if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4: - raise ValueError("Invalid dimensions for box data.") + msg = "Invalid dimensions for box data." + raise ValueError(msg) self.data["boxes"] = boxes def get_field(self, field): @@ -127,7 +130,8 @@ def set_field(self, field, value): ValueError: if the box_list does not have specified field. """ if not self.has_field(field): - raise ValueError("field %s does not exist" % field) + msg = f"field {field} does not exist" + raise ValueError(msg) self.data[field] = value def get_center_coordinates_and_sizes(self, scope=None): @@ -180,6 +184,7 @@ def as_tensor_dict(self, fields=None): fields = self.get_all_fields() for field in fields: if not self.has_field(field): - raise ValueError("boxlist must contain all specified fields") + msg = "boxlist must contain all specified fields" + raise ValueError(msg) tensor_dict[field] = self.get_field(field) return tensor_dict diff --git a/examples/tensorflow/common/object_detection/utils/box_utils.py b/examples/tensorflow/common/object_detection/utils/box_utils.py index 6753d3fd6eb..0bd7d33554c 100644 --- a/examples/tensorflow/common/object_detection/utils/box_utils.py +++ b/examples/tensorflow/common/object_detection/utils/box_utils.py @@ -30,7 +30,8 @@ def yxyx_to_xywh(boxes): ValueError: If the last dimension of boxes is not 4. """ if boxes.shape[-1] != 4: - raise ValueError("boxes.shape[-1] is {:d}, but must be 4.".format(boxes.shape[-1])) + msg = f"boxes.shape[-1] is {boxes.shape[-1]:d}, but must be 4." + raise ValueError(msg) boxes_ymin = boxes[..., 0] boxes_xmin = boxes[..., 1] @@ -60,7 +61,8 @@ def normalize_boxes(boxes, image_shape): """ if boxes.shape[-1] != 4: - raise ValueError("boxes.shape[-1] is {:d}, but must be 4.".format(boxes.shape[-1])) + msg = f"boxes.shape[-1] is {boxes.shape[-1]:d}, but must be 4." + raise ValueError(msg) with tf.name_scope("normalize_boxes"): if isinstance(image_shape, (list, tuple)): @@ -131,7 +133,8 @@ def clip_boxes(boxes, image_shape): ValueError: If the last dimension of boxes is not 4. """ if boxes.shape[-1] != 4: - raise ValueError("boxes.shape[-1] is {:d}, but must be 4.".format(boxes.shape[-1])) + msg = f"boxes.shape[-1] is {boxes.shape[-1]:d}, but must be 4." + raise ValueError(msg) with tf.name_scope("clip_boxes"): if isinstance(image_shape, (list, tuple)): @@ -165,7 +168,8 @@ def encode_boxes(boxes, anchors, weights=None): """ if boxes.shape[-1] != 4: - raise ValueError("boxes.shape[-1] is {:d}, but must be 4.".format(boxes.shape[-1])) + msg = f"boxes.shape[-1] is {boxes.shape[-1]:d}, but must be 4." + raise ValueError(msg) with tf.name_scope("encode_boxes"): boxes = tf.cast(boxes, anchors.dtype) @@ -218,7 +222,8 @@ def decode_boxes(encoded_boxes, anchors, weights=None): decoded box targets. """ if encoded_boxes.shape[-1] != 4: - raise ValueError("encoded_boxes.shape[-1] is {:d}, but must be 4.".format(encoded_boxes.shape[-1])) + msg = f"encoded_boxes.shape[-1] is {encoded_boxes.shape[-1]:d}, but must be 4." + raise ValueError(msg) with tf.name_scope("decode_boxes"): encoded_boxes = tf.cast(encoded_boxes, anchors.dtype) @@ -280,7 +285,8 @@ def filter_boxes(boxes, scores, image_shape, min_size_threshold): the positinon of the filtered boxes filled with 0. """ if boxes.shape[-1] != 4: - raise ValueError("boxes.shape[1] is {:d}, but must be 4.".format(boxes.shape[-1])) + msg = f"boxes.shape[1] is {boxes.shape[-1]:d}, but must be 4." + raise ValueError(msg) with tf.name_scope("filter_boxes"): if isinstance(image_shape, (list, tuple)): @@ -333,7 +339,8 @@ def filter_boxes_by_scores(boxes, scores, min_score_threshold): the """ if boxes.shape[-1] != 4: - raise ValueError("boxes.shape[1] is {:d}, but must be 4.".format(boxes.shape[-1])) + msg = f"boxes.shape[1] is {boxes.shape[-1]:d}, but must be 4." + raise ValueError(msg) with tf.name_scope("filter_boxes_by_scores"): filtered_mask = tf.math.greater(scores, min_score_threshold) diff --git a/examples/tensorflow/common/object_detection/utils/matcher.py b/examples/tensorflow/common/object_detection/utils/matcher.py index adcf59587ee..dc70d39b42c 100644 --- a/examples/tensorflow/common/object_detection/utils/matcher.py +++ b/examples/tensorflow/common/object_detection/utils/matcher.py @@ -36,9 +36,11 @@ def __init__(self, match_results): integer int32 scalar tensor """ if match_results.shape.ndims != 1: - raise ValueError("match_results should have rank 1") + msg = "match_results should have rank 1" + raise ValueError(msg) if match_results.dtype != tf.int32: - raise ValueError("match_results should be an int32 or int64 scalar tensor") + msg = "match_results should be an int32 or int64 scalar tensor" + raise ValueError(msg) self._match_results = match_results @property diff --git a/examples/tensorflow/common/object_detection/utils/shape_utils.py b/examples/tensorflow/common/object_detection/utils/shape_utils.py index 8af9c4cd531..d86e2ec3632 100644 --- a/examples/tensorflow/common/object_detection/utils/shape_utils.py +++ b/examples/tensorflow/common/object_detection/utils/shape_utils.py @@ -34,7 +34,8 @@ def assert_shape_equal(shape_a, shape_b): """ if all(isinstance(dim, int) for dim in shape_a) and all(isinstance(dim, int) for dim in shape_b): if shape_a != shape_b: - raise ValueError("Unequal shapes {}, {}".format(shape_a, shape_b)) + msg = f"Unequal shapes {shape_a}, {shape_b}" + raise ValueError(msg) return tf.no_op() return tf.assert_equal(shape_a, shape_b) diff --git a/examples/tensorflow/common/object_detection/utils/target_assigner.py b/examples/tensorflow/common/object_detection/utils/target_assigner.py index a02fc16d600..a8983d3a98f 100644 --- a/examples/tensorflow/common/object_detection/utils/target_assigner.py +++ b/examples/tensorflow/common/object_detection/utils/target_assigner.py @@ -95,9 +95,11 @@ def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None, groundtrut box_list.BoxList """ if not isinstance(anchors, box_list.BoxList): - raise ValueError("anchors must be an BoxList") + msg = "anchors must be an BoxList" + raise ValueError(msg) if not isinstance(groundtruth_boxes, box_list.BoxList): - raise ValueError("groundtruth_boxes must be an BoxList") + msg = "groundtruth_boxes must be an BoxList" + raise ValueError(msg) if groundtruth_labels is None: groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), 0)) diff --git a/examples/tensorflow/common/optimizer.py b/examples/tensorflow/common/optimizer.py index 04c58fdff71..9f064bbaee4 100644 --- a/examples/tensorflow/common/optimizer.py +++ b/examples/tensorflow/common/optimizer.py @@ -62,6 +62,7 @@ def build_optimizer(config, scheduler): else: optimizer = tf.keras.optimizers.Adam(**common_params) else: - raise ValueError("Unknown optimizer %s" % optimizer_type) + msg = f"Unknown optimizer {optimizer_type}" + raise ValueError(msg) return optimizer diff --git a/examples/tensorflow/common/prepare_checkpoint.py b/examples/tensorflow/common/prepare_checkpoint.py index 3022d9c7ea8..2ee6abaf60d 100644 --- a/examples/tensorflow/common/prepare_checkpoint.py +++ b/examples/tensorflow/common/prepare_checkpoint.py @@ -41,7 +41,8 @@ def get_config_and_model_type_from_argv(argv, parser): elif args.model_type == ModelType.segmentation: predefined_config = get_predefined_seg_config(config_from_json.model) else: - raise nncf.ValidationError("Wrong model type specified") + msg = "Wrong model type specified" + raise nncf.ValidationError(msg) predefined_config.update(config_from_json) return predefined_config, args.model_type @@ -51,16 +52,16 @@ def load_checkpoint(checkpoint, ckpt_path): logger.info("Load from checkpoint is enabled") if tf.io.gfile.isdir(ckpt_path): path_to_checkpoint = tf.train.latest_checkpoint(ckpt_path) - logger.info("Latest checkpoint: {}".format(path_to_checkpoint)) + logger.info(f"Latest checkpoint: {path_to_checkpoint}") else: path_to_checkpoint = ckpt_path if tf.io.gfile.exists(ckpt_path + ".index") else None - logger.info("Provided checkpoint: {}".format(path_to_checkpoint)) + logger.info(f"Provided checkpoint: {path_to_checkpoint}") if not path_to_checkpoint: logger.info("No checkpoint detected") return 0 - logger.info("Checkpoint file {} found and restoring from checkpoint".format(path_to_checkpoint)) + logger.info(f"Checkpoint file {path_to_checkpoint} found and restoring from checkpoint") status = checkpoint.restore(path_to_checkpoint) status.expect_partial() logger.info("Completed loading from checkpoint") @@ -114,7 +115,7 @@ def load_and_save_checkpoint(checkpoint, config): config.checkpoint_save_dir = config.log_dir checkpoint_manager = tf.train.CheckpointManager(checkpoint, config.checkpoint_save_dir, max_to_keep=None) save_path = checkpoint_manager.save() - logger.info("Saved checkpoint: {}".format(save_path)) + logger.info(f"Saved checkpoint: {save_path}") def main(argv): diff --git a/examples/tensorflow/common/scheduler.py b/examples/tensorflow/common/scheduler.py index 54a9caec83e..f4523eab139 100644 --- a/examples/tensorflow/common/scheduler.py +++ b/examples/tensorflow/common/scheduler.py @@ -75,7 +75,8 @@ def get_config(self): def schedule_base_lr_check(schedule_type, base_lr): schedules_with_base_lr = ["exponential", "multistep", "step", "cosine"] if schedule_type in schedules_with_base_lr and base_lr is None: - raise ValueError("`base_lr` parameter must be specified for the %s scheduler" % schedule_type) + msg = f"`base_lr` parameter must be specified for the {schedule_type} scheduler" + raise ValueError(msg) def build_scheduler(config, steps_per_epoch): @@ -104,11 +105,13 @@ def build_scheduler(config, steps_per_epoch): elif schedule_type == "piecewise_constant": boundaries = schedule_params.get("boundaries", optimizer_config.get("boundaries", None)) if boundaries is None: - raise ValueError("`boundaries` parameter must be specified for the `piecewise_constant` scheduler") + msg = "`boundaries` parameter must be specified for the `piecewise_constant` scheduler" + raise ValueError(msg) values = schedule_params.get("values", optimizer_config.get("values", None)) if values is None: - raise ValueError("`values` parameter must be specified for the `piecewise_constant` scheduler") + msg = "`values` parameter must be specified for the `piecewise_constant` scheduler" + raise ValueError(msg) logger.info( "Using Piecewise constant decay with warmup. Parameters: boundaries: %s, values: %s", boundaries, values @@ -120,7 +123,8 @@ def build_scheduler(config, steps_per_epoch): logger.info("Using MultiStep learning rate.") steps = schedule_params.get("steps", optimizer_config.get("steps", None)) if steps is None: - raise ValueError("`steps` parameter must be specified for the `multistep` scheduler") + msg = "`steps` parameter must be specified for the `multistep` scheduler" + raise ValueError(msg) steps = [steps_per_epoch * x for x in steps] lr = MultiStepLearningRate(base_lr, steps, gamma=gamma) @@ -144,6 +148,7 @@ def build_scheduler(config, steps_per_epoch): lr = tf.keras.experimental.CosineDecay(initial_learning_rate=base_lr, decay_steps=decay_steps) else: - raise KeyError(f"Unknown learning rate scheduler type: {schedule_type}") + msg = f"Unknown learning rate scheduler type: {schedule_type}" + raise KeyError(msg) return lr diff --git a/examples/tensorflow/common/utils.py b/examples/tensorflow/common/utils.py index defebe5d45a..77eb82e1c69 100644 --- a/examples/tensorflow/common/utils.py +++ b/examples/tensorflow/common/utils.py @@ -55,11 +55,11 @@ def get_run_name(config: SampleConfig) -> str: weights = algo_dict.get("weights", {}) w_bits = weights.get("bits", QUANTIZATION_BITS) if a_bits == w_bits: - retval += "_int{}".format(a_bits) + retval += f"_int{a_bits}" else: - retval += "_a_int{}_w_int{}".format(a_bits, w_bits) + retval += f"_a_int{a_bits}_w_int{w_bits}" else: - retval += "_{}".format(algo_name) + retval += f"_{algo_name}" return retval @@ -82,7 +82,7 @@ def create_code_snapshot(root, dst_path, extensions=(".py", ".json", ".cpp", ".c def print_args(config, logger=default_logger): args = "Command line arguments\n" - args += "\n".join(["{: <27s}: {}".format(arg, config.get(arg)) for arg in sorted(config)]) + args += "\n".join([f"{arg: <27s}: {config.get(arg)}" for arg in sorted(config)]) logger.info(args) @@ -125,7 +125,7 @@ def set_memory_growth(devices): try: tf.config.experimental.set_memory_growth(device, True) except (ValueError, RuntimeError) as e: - default_logger.info("{}: {}".format(device, e)) + default_logger.info(f"{device}: {e}") def get_learning_rate(optimizer, step=0): diff --git a/examples/tensorflow/object_detection/main.py b/examples/tensorflow/object_detection/main.py index 46acbfa5916..f332f1c89e6 100644 --- a/examples/tensorflow/object_detection/main.py +++ b/examples/tensorflow/object_detection/main.py @@ -83,17 +83,18 @@ def load_checkpoint(checkpoint, ckpt_path): logger.info("Load from checkpoint is enabled") if tf.io.gfile.isdir(ckpt_path): path_to_checkpoint = tf.train.latest_checkpoint(ckpt_path) - logger.info("Latest checkpoint: {}".format(path_to_checkpoint)) + logger.info(f"Latest checkpoint: {path_to_checkpoint}") else: path_to_checkpoint = ckpt_path if tf.io.gfile.exists(ckpt_path + ".index") else None - logger.info("Provided checkpoint: {}".format(path_to_checkpoint)) + logger.info(f"Provided checkpoint: {path_to_checkpoint}") if not path_to_checkpoint: logger.info("No checkpoint detected.") if ckpt_path: - raise nncf.ValidationError(f"ckpt_path was given, but no checkpoint detected in path: {ckpt_path}") + msg = f"ckpt_path was given, but no checkpoint detected in path: {ckpt_path}" + raise nncf.ValidationError(msg) - logger.info("Checkpoint file {} found and restoring from checkpoint".format(path_to_checkpoint)) + logger.info(f"Checkpoint file {path_to_checkpoint} found and restoring from checkpoint") status = checkpoint.restore(path_to_checkpoint) status.expect_partial() logger.info("Completed loading from checkpoint") @@ -185,7 +186,7 @@ def train_epoch( continue if step == steps_per_epoch: save_path = checkpoint_manager.save() - logger.info("Saved checkpoint for epoch={}: {}".format(epoch, save_path)) + logger.info(f"Saved checkpoint for epoch={epoch}: {save_path}") break compression_ctrl.scheduler.step() @@ -193,7 +194,8 @@ def train_epoch( train_metric_result = tf.nest.map_structure(lambda s: s.numpy().astype(float), train_loss) if np.isnan(train_metric_result["total_loss"]): - raise ValueError("total loss is NaN") + msg = "total loss is NaN" + raise ValueError(msg) train_metric_result.update({"learning_rate": get_learning_rate(optimizer, optimizer.iterations)}) @@ -201,8 +203,8 @@ def train_epoch( if step % print_freq == 0: time = timer.toc(average=False) - logger.info("Step: {}/{} Time: {:.3f} sec".format(step, steps_per_epoch, time)) - logger.info("Training metric = {}".format(train_metric_result)) + logger.info(f"Step: {step}/{steps_per_epoch} Time: {time:.3f} sec") + logger.info(f"Training metric = {train_metric_result}") timer.tic() @@ -235,7 +237,7 @@ def train( logger.info("Training...") for epoch in range(initial_epoch, epochs): - logger.info("Epoch: {}/{}".format(epoch, epochs)) + logger.info(f"Epoch: {epoch}/{epochs}") train_epoch( train_step, @@ -255,7 +257,7 @@ def train( test_metric_result = evaluate(test_step, eval_metric, test_dist_dataset, num_test_batches, print_freq) validation_summary_writer(metrics=test_metric_result, step=optimizer.iterations.numpy()) eval_metric.reset_states() - logger.info("Validation metric = {}".format(test_metric_result)) + logger.info(f"Validation metric = {test_metric_result}") statistics = compression_ctrl.statistics() logger.info(statistics.to_str()) @@ -281,10 +283,10 @@ def evaluate(test_step, metric, test_dist_dataset, num_batches, print_freq): if batch_idx % print_freq == 0: time = timer.toc(average=False) - logger.info("Predict for batch: {}/{} Time: {:.3f} sec".format(batch_idx, num_batches, time)) + logger.info(f"Predict for batch: {batch_idx}/{num_batches} Time: {time:.3f} sec") timer.tic() - logger.info("Total time: {:.3f} sec".format(timer.total_time)) + logger.info(f"Total time: {timer.total_time:.3f} sec") timer.reset() @@ -292,7 +294,7 @@ def evaluate(test_step, metric, test_dist_dataset, num_batches, print_freq): timer.tic() result = metric.result() timer.toc(average=False) - logger.info("Total time: {:.3f} sec".format(timer.total_time)) + logger.info(f"Total time: {timer.total_time:.3f} sec") return result @@ -442,7 +444,7 @@ def validate_fn(model, **kwargs): logger.info(compression_ctrl.statistics().to_str()) metric_result = evaluate(test_step, eval_metric, test_dist_dataset, num_test_batches, config.print_freq) - logger.info("Validation metric = {}".format(metric_result)) + logger.info(f"Validation metric = {metric_result}") if config.metrics_dump is not None: write_metrics(metric_result["AP"], config.metrics_dump) @@ -450,7 +452,7 @@ def validate_fn(model, **kwargs): if "export" in config.mode: save_path, save_format = get_saving_parameters(config) export_model(compression_ctrl.strip(), save_path, save_format) - logger.info("Saved to {}".format(save_path)) + logger.info(f"Saved to {save_path}") def export(config): @@ -469,7 +471,7 @@ def export(config): save_path, save_format = get_saving_parameters(config) export_model(compression_ctrl.strip(), save_path, save_format) - logger.info("Saved to {}".format(save_path)) + logger.info(f"Saved to {save_path}") def main(argv): diff --git a/examples/tensorflow/object_detection/models/model_selector.py b/examples/tensorflow/object_detection/models/model_selector.py index 529e4643ce8..a59cf8178e5 100644 --- a/examples/tensorflow/object_detection/models/model_selector.py +++ b/examples/tensorflow/object_detection/models/model_selector.py @@ -23,7 +23,8 @@ def get_predefined_config(model_name): elif model_name == "YOLOv4": predefined_config = YOLOV4_CONFIG else: - raise ValueError("Model {} is not supported.".format(model_name)) + msg = f"Model {model_name} is not supported." + raise ValueError(msg) return copy.deepcopy(predefined_config) @@ -36,6 +37,7 @@ def get_model_builder(config): elif model_name == "YOLOv4": model_builder = YOLOv4Model(config) else: - raise ValueError("Model {} is not supported.".format(model_name)) + msg = f"Model {model_name} is not supported." + raise ValueError(msg) return model_builder diff --git a/examples/tensorflow/object_detection/models/retinanet_model.py b/examples/tensorflow/object_detection/models/retinanet_model.py index 89f66254d74..a62ad4fb5dd 100644 --- a/examples/tensorflow/object_detection/models/retinanet_model.py +++ b/examples/tensorflow/object_detection/models/retinanet_model.py @@ -98,7 +98,7 @@ def build_model(self, weights=None, is_training=None): init_checkpoint_fn(keras_model) if weights: - logger.info("Loaded pretrained weights from {}".format(weights)) + logger.info(f"Loaded pretrained weights from {weights}") keras_model.load_weights(weights) return keras_model @@ -108,11 +108,8 @@ def post_processing(self, labels, outputs): for field in required_output_fields: if field not in outputs: - raise ValueError( - '"{}" is missing in outputs, requried {} found {}'.format( - field, required_output_fields, outputs.keys() - ) - ) + msg = f'"{field}" is missing in outputs, requried {required_output_fields} found {outputs.keys()}' + raise ValueError(msg) boxes, scores, classes, valid_detections = self._generate_detections_fn( outputs["box_outputs"], outputs["cls_outputs"], labels["anchor_boxes"], labels["image_info"][:, 1:2, :] diff --git a/examples/tensorflow/object_detection/models/yolo_v4_model.py b/examples/tensorflow/object_detection/models/yolo_v4_model.py index efe3f9bf737..ca8627ca914 100644 --- a/examples/tensorflow/object_detection/models/yolo_v4_model.py +++ b/examples/tensorflow/object_detection/models/yolo_v4_model.py @@ -63,7 +63,7 @@ def build_model(self, weights=None, is_training=None): outputs = self.model_outputs(self._input_layer, is_training) keras_model = tf.keras.models.Model(inputs=self._input_layer, outputs=outputs, name="yolo_v4") if weights: - logger.info("Loaded pretrained weights from {}".format(weights)) + logger.info(f"Loaded pretrained weights from {weights}") keras_model.load_weights(weights, by_name=True) return keras_model diff --git a/examples/tensorflow/segmentation/evaluation.py b/examples/tensorflow/segmentation/evaluation.py index 930bd9a1f35..5662f0976b6 100644 --- a/examples/tensorflow/segmentation/evaluation.py +++ b/examples/tensorflow/segmentation/evaluation.py @@ -101,17 +101,18 @@ def load_checkpoint(checkpoint, ckpt_path): logger.info("Load from checkpoint is enabled") if tf.io.gfile.isdir(ckpt_path): path_to_checkpoint = tf.train.latest_checkpoint(ckpt_path) - logger.info("Latest checkpoint: {}".format(path_to_checkpoint)) + logger.info(f"Latest checkpoint: {path_to_checkpoint}") else: path_to_checkpoint = ckpt_path if tf.io.gfile.exists(ckpt_path + ".index") else None - logger.info("Provided checkpoint: {}".format(path_to_checkpoint)) + logger.info(f"Provided checkpoint: {path_to_checkpoint}") if not path_to_checkpoint: logger.info("No checkpoint detected.") if ckpt_path: - raise nncf.ValidationError(f"ckpt_path was given, but no checkpoint detected in path: {ckpt_path}") + msg = f"ckpt_path was given, but no checkpoint detected in path: {ckpt_path}" + raise nncf.ValidationError(msg) - logger.info("Checkpoint file {} found and restoring from checkpoint".format(path_to_checkpoint)) + logger.info(f"Checkpoint file {path_to_checkpoint} found and restoring from checkpoint") status = checkpoint.restore(path_to_checkpoint) status.expect_partial() logger.info("Completed loading from checkpoint") @@ -135,10 +136,10 @@ def evaluate(test_step, metric, test_dist_dataset, num_batches, print_freq): if batch_idx % print_freq == 0: time = timer.toc(average=False) - logger.info("Predict for batch: {}/{} Time: {:.3f} sec".format(batch_idx, num_batches, time)) + logger.info(f"Predict for batch: {batch_idx}/{num_batches} Time: {time:.3f} sec") timer.tic() - logger.info("Total time: {:.3f} sec".format(timer.total_time)) + logger.info(f"Total time: {timer.total_time:.3f} sec") timer.reset() @@ -146,7 +147,7 @@ def evaluate(test_step, metric, test_dist_dataset, num_batches, print_freq): timer.tic() result = metric.result() timer.toc(average=False) - logger.info("Total time: {:.3f} sec".format(timer.total_time)) + logger.info(f"Total time: {timer.total_time:.3f} sec") return result @@ -229,12 +230,12 @@ def run_evaluation(config, eval_timeout=None): logger.info(statistics.to_str()) metric_result = evaluate(test_step, eval_metric, test_dist_dataset, num_batches, config.print_freq) eval_metric.reset_states() - logger.info("Test metric = {}".format(metric_result)) + logger.info(f"Test metric = {metric_result}") if "export" in config.mode: save_path, save_format = get_saving_parameters(config) export_model(compression_ctrl.strip(), save_path, save_format) - logger.info("Saved to {}".format(save_path)) + logger.info(f"Saved to {save_path}") elif "train" in config.mode: validation_summary_writer = SummaryWriter(config.log_dir, "validation") @@ -250,15 +251,15 @@ def run_evaluation(config, eval_timeout=None): else: checkpoint.restore(checkpoint_path).expect_partial() - logger.info("Checkpoint file {} found and restoring from checkpoint".format(checkpoint_path)) - logger.info("Checkpoint step: {}".format(checkpoint.step.numpy())) + logger.info(f"Checkpoint file {checkpoint_path} found and restoring from checkpoint") + logger.info(f"Checkpoint step: {checkpoint.step.numpy()}") metric_result = evaluate(test_step, eval_metric, test_dist_dataset, num_batches, config.print_freq) current_step = checkpoint.step.numpy() validation_summary_writer(metrics=metric_result, step=current_step) eval_metric.reset_states() - logger.info("Validation metric = {}".format(metric_result)) + logger.info(f"Validation metric = {metric_result}") validation_summary_writer.close() @@ -274,7 +275,7 @@ def export(config): save_path, save_format = get_saving_parameters(config) export_model(compression_ctrl.strip(), save_path, save_format) - logger.info("Saved to {}".format(save_path)) + logger.info(f"Saved to {save_path}") def main(argv): @@ -285,7 +286,8 @@ def main(argv): patch_if_experimental_quantization(config.nncf_config) if config.dataset_type != "tfrecords": - raise nncf.ValidationError("The train.py does not support TensorFlow Datasets (TFDS). Please use TFRecords.") + msg = "The train.py does not support TensorFlow Datasets (TFDS). Please use TFRecords." + raise nncf.ValidationError(msg) if "train" in config.mode or "test" in config.mode: run_evaluation(config) diff --git a/examples/tensorflow/segmentation/models/maskrcnn_model.py b/examples/tensorflow/segmentation/models/maskrcnn_model.py index aa22dd71b0e..aa435584d68 100644 --- a/examples/tensorflow/segmentation/models/maskrcnn_model.py +++ b/examples/tensorflow/segmentation/models/maskrcnn_model.py @@ -44,7 +44,8 @@ def _restore_baseline_weights(keras_model, checkpoint_path): match_names.append(x) if len(match_names) != 1: - raise Exception("More than one matches for {}: {}".format(v, match_names)) + msg = f"More than one matches for {v}: {match_names}" + raise Exception(msg) assignment_map[match_names[0]] = v @@ -314,7 +315,7 @@ def build_model(self, weights=None, is_training=None): init_checkpoint_fn(keras_model) if weights: - logger.info("Loaded pretrained weights from {}".format(weights)) + logger.info(f"Loaded pretrained weights from {weights}") _restore_baseline_weights(keras_model, weights) return keras_model @@ -323,9 +324,8 @@ def post_processing(self, labels, outputs): required_output_fields = ["class_outputs", "box_outputs"] for field in required_output_fields: if field not in outputs: - raise ValueError( - '"%s" is missing in outputs, requried %s found %s' % (field, required_output_fields, outputs.keys()) - ) + msg = f'"{field}" is missing in outputs, required {required_output_fields} found {outputs.keys()}' + raise ValueError(msg) predictions = { "image_info": labels["image_info"], diff --git a/examples/tensorflow/segmentation/models/model_selector.py b/examples/tensorflow/segmentation/models/model_selector.py index 3f5c5cd0fd8..93bf45f67fa 100644 --- a/examples/tensorflow/segmentation/models/model_selector.py +++ b/examples/tensorflow/segmentation/models/model_selector.py @@ -19,7 +19,8 @@ def get_predefined_config(model_name): if model_name == "MaskRCNN": predefined_config = MASKRCNN_CONFIG else: - raise ValueError("Model {} is not supported.".format(model_name)) + msg = f"Model {model_name} is not supported." + raise ValueError(msg) return copy.deepcopy(predefined_config) @@ -30,6 +31,7 @@ def get_model_builder(config): if model_name == "MaskRCNN": model_builder = MaskrcnnModel(config) else: - raise ValueError("Model {} is not supported.".format(model_name)) + msg = f"Model {model_name} is not supported." + raise ValueError(msg) return model_builder diff --git a/examples/tensorflow/segmentation/train.py b/examples/tensorflow/segmentation/train.py index 75f0e63fc8a..d4d6b822900 100644 --- a/examples/tensorflow/segmentation/train.py +++ b/examples/tensorflow/segmentation/train.py @@ -98,16 +98,16 @@ def load_checkpoint(checkpoint, ckpt_path): logger.info("Load from checkpoint is enabled") if tf.io.gfile.isdir(ckpt_path): path_to_checkpoint = tf.train.latest_checkpoint(ckpt_path) - logger.info("Latest checkpoint: {}".format(path_to_checkpoint)) + logger.info(f"Latest checkpoint: {path_to_checkpoint}") else: path_to_checkpoint = ckpt_path if tf.io.gfile.exists(ckpt_path + ".index") else None - logger.info("Provided checkpoint: {}".format(path_to_checkpoint)) + logger.info(f"Provided checkpoint: {path_to_checkpoint}") if not path_to_checkpoint: logger.info("No checkpoint detected") return 0 - logger.info("Checkpoint file {} found and restoring from checkpoint".format(path_to_checkpoint)) + logger.info(f"Checkpoint file {path_to_checkpoint} found and restoring from checkpoint") status = checkpoint.restore(path_to_checkpoint) status.expect_partial() logger.info("Completed loading from checkpoint") @@ -182,7 +182,7 @@ def train( logger.info("Training...") for epoch in range(initial_epoch, epochs): - logger.info("Epoch: {}/{}".format(epoch, epochs)) + logger.info(f"Epoch: {epoch}/{epochs}") compression_ctrl.scheduler.epoch_step(epoch) for step, x in enumerate(train_dist_dataset): @@ -193,7 +193,7 @@ def train( if step == steps_per_epoch: save_path = checkpoint_manager.save() - logger.info("Saved checkpoint for epoch={}: {}".format(epoch, save_path)) + logger.info(f"Saved checkpoint for epoch={epoch}: {save_path}") break compression_ctrl.scheduler.step() @@ -201,7 +201,8 @@ def train( train_metric_result = tf.nest.map_structure(lambda s: s.numpy().astype(float), train_loss) if np.isnan(train_metric_result["total_loss"]): - raise ValueError("total loss is NaN") + msg = "total loss is NaN" + raise ValueError(msg) train_metric_result.update({"learning_rate": get_learning_rate(optimizer, optimizer.iterations)}) @@ -209,8 +210,8 @@ def train( if step % print_freq == 0: time = timer.toc(average=False) - logger.info("Step: {}/{} Time: {:.3f} sec".format(step, steps_per_epoch, time)) - logger.info("Training metric = {}".format(train_metric_result)) + logger.info(f"Step: {step}/{steps_per_epoch} Time: {time:.3f} sec") + logger.info(f"Training metric = {train_metric_result}") timer.tic() statistics = compression_ctrl.statistics() @@ -325,7 +326,8 @@ def main(argv): create_code_snapshot(nncf_root, os.path.join(config.log_dir, "snapshot.tar.gz")) if config.dataset_type != "tfrecords": - raise nncf.ValidationError("The train.py does not support TensorFlow Datasets (TFDS). Please use TFRecords.") + msg = "The train.py does not support TensorFlow Datasets (TFDS). Please use TFRecords." + raise nncf.ValidationError(msg) run_train(config) diff --git a/examples/torch/classification/main.py b/examples/torch/classification/main.py index 6d852d1b917..ae569322e6c 100644 --- a/examples/torch/classification/main.py +++ b/examples/torch/classification/main.py @@ -256,7 +256,7 @@ def model_eval_fn(model): ) ) else: - logger.info("=> loaded checkpoint '{}'".format(resuming_checkpoint_path)) + logger.info(f"=> loaded checkpoint '{resuming_checkpoint_path}'") if config.execution_mode != ExecutionMode.CPU_ONLY: cudnn.benchmark = True @@ -391,7 +391,7 @@ def train( make_additional_checkpoints(checkpoint_path, is_best, epoch + 1, config) for key, value in prepare_for_tensorboard(statistics).items(): - config.tb.add_scalar("compression/statistics/{0}".format(key), value, len(train_loader) * epoch) + config.tb.add_scalar(f"compression/statistics/{key}", value, len(train_loader) * epoch) def get_dataset(dataset_config, config, transform, is_train): @@ -644,7 +644,7 @@ def train_epoch( loss=losses, top1=top1, top5=top5, - rank="{}:".format(config.rank) if config.multiprocessing_distributed else "", + rank=f"{config.rank}:" if config.multiprocessing_distributed else "", ) ) @@ -659,7 +659,7 @@ def train_epoch( statistics = compression_ctrl.statistics(quickly_collected_only=True) for stat_name, stat_value in prepare_for_tensorboard(statistics).items(): - config.tb.add_scalar("train/statistics/{}".format(stat_name), stat_value, i + global_step) + config.tb.add_scalar(f"train/statistics/{stat_name}", stat_value, i + global_step) if i >= train_iters: break @@ -710,7 +710,7 @@ def validate(val_loader, model, criterion, config, epoch=0, log_validation_info= loss=losses, top1=top1, top5=top5, - rank="{}:".format(config.rank) if config.multiprocessing_distributed else "", + rank=f"{config.rank}:" if config.multiprocessing_distributed else "", ) ) @@ -719,7 +719,7 @@ def validate(val_loader, model, criterion, config, epoch=0, log_validation_info= config.tb.add_scalar("val/top1", top1.avg, len(val_loader) * epoch) config.tb.add_scalar("val/top5", top5.avg, len(val_loader) * epoch) - logger.info(" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}\n".format(top1=top1, top5=top5)) + logger.info(f" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}\n") if is_main_process() and config.metrics_dump is not None: acc = top1.avg / 100 diff --git a/examples/torch/classification/models/mobilenet_v2_32x32.py b/examples/torch/classification/models/mobilenet_v2_32x32.py index 0075aacda29..6ed6ee96a52 100644 --- a/examples/torch/classification/models/mobilenet_v2_32x32.py +++ b/examples/torch/classification/models/mobilenet_v2_32x32.py @@ -152,10 +152,8 @@ def __init__( # only check the first element, assuming user knows t,c,n,s are required if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: - raise ValueError( - "inverted_residual_setting should be non-empty " - "or a 4-element list, got {}".format(inverted_residual_setting) - ) + msg = f"inverted_residual_setting should be non-empty or a 4-element list, got {inverted_residual_setting}" + raise ValueError(msg) # building first layer input_channel = _make_divisible(input_channel * width_mult, round_nearest) diff --git a/examples/torch/classification/staged_quantization_worker.py b/examples/torch/classification/staged_quantization_worker.py index 8c29236cfc5..c3ab45638c1 100644 --- a/examples/torch/classification/staged_quantization_worker.py +++ b/examples/torch/classification/staged_quantization_worker.py @@ -186,9 +186,8 @@ def autoq_eval_fn(model, eval_loader): load_state(model, model_state_dict, is_resume=True) if not isinstance(compression_ctrl, QuantizationController): - raise nncf.InternalError( - "The stage quantization sample worker may only be run with the quantization algorithms!" - ) + msg = "The stage quantization sample worker may only be run with the quantization algorithms!" + raise nncf.InternalError(msg) model, _ = prepare_model_for_execution(model, config) original_model.to(config.device) @@ -219,7 +218,7 @@ def autoq_eval_fn(model, eval_loader): ) ) else: - logger.info("=> loaded checkpoint '{}'".format(resuming_checkpoint_path)) + logger.info(f"=> loaded checkpoint '{resuming_checkpoint_path}'") if is_export_only: export_model(compression_ctrl, config) @@ -340,7 +339,7 @@ def train_staged( make_additional_checkpoints(checkpoint_path, is_best, epoch + 1, config) for key, value in prepare_for_tensorboard(statistics).items(): - config.tb.add_scalar("compression/statistics/{0}".format(key), value, len(train_loader) * epoch) + config.tb.add_scalar(f"compression/statistics/{key}", value, len(train_loader) * epoch) def train_epoch_staged( @@ -437,7 +436,7 @@ def train_epoch_staged( loss=losses, top1=top1, top5=top5, - rank="{}:".format(config.rank) if config.multiprocessing_distributed else "", + rank=f"{config.rank}:" if config.multiprocessing_distributed else "", ) ) @@ -452,7 +451,7 @@ def train_epoch_staged( statistics = compression_ctrl.statistics(quickly_collected_only=True) for stat_name, stat_value in prepare_for_tensorboard(statistics).items(): - config.tb.add_scalar("train/statistics/{}".format(stat_name), stat_value, i + global_step) + config.tb.add_scalar(f"train/statistics/{stat_name}", stat_value, i + global_step) def get_wd(optimizer): diff --git a/examples/torch/common/argparser.py b/examples/torch/common/argparser.py index b17b9888d0f..059ddb1578a 100644 --- a/examples/torch/common/argparser.py +++ b/examples/torch/common/argparser.py @@ -205,5 +205,6 @@ def get_common_argument_parser(): def parse_args(parser, argv): args = parser.parse_args(argv) if "export" in args.mode and args.export_model_path is None: - raise nncf.ValidationError("--mode export requires --export-model-path argument to be set") + msg = "--mode export requires --export-model-path argument to be set" + raise nncf.ValidationError(msg) return args diff --git a/examples/torch/common/distributed.py b/examples/torch/common/distributed.py index 50b2d90314e..0812e37fb63 100644 --- a/examples/torch/common/distributed.py +++ b/examples/torch/common/distributed.py @@ -9,8 +9,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import os import torch @@ -33,7 +31,7 @@ def configure_distributed(config): # default device (E.g. NMS kernel - https://github.com/facebookresearch/maskrcnn-benchmark/issues/74) torch.cuda.set_device(config.current_gpu) - logger.info("| distributed init (rank {}): {}".format(config.rank, config.dist_url)) + logger.info(f"| distributed init (rank {config.rank}): {config.dist_url}") dist.init_process_group( backend=config.dist_backend, init_method=config.dist_url, world_size=config.world_size, rank=config.rank ) @@ -45,11 +43,13 @@ def __init__(self, dataset, rank=None, world_size=None): super().__init__(dataset) if world_size is None: if not dist.is_available(): - raise nncf.ValidationError("Requires distributed package to be available") + msg = "Requires distributed package to be available" + raise nncf.ValidationError(msg) world_size = dist.get_world_size() if rank is None: if not dist.is_available(): - raise nncf.ValidationError("Requires distributed package to be available") + msg = "Requires distributed package to be available" + raise nncf.ValidationError(msg) rank = dist.get_rank() self.world_size = world_size self.rank = rank diff --git a/examples/torch/common/execution.py b/examples/torch/common/execution.py index aef39107755..3ded41f5c64 100644 --- a/examples/torch/common/execution.py +++ b/examples/torch/common/execution.py @@ -47,7 +47,7 @@ def get_device(config): if config.execution_mode == ExecutionMode.CPU_ONLY: return "cpu" if config.current_gpu is not None: - return "cuda:{}".format(config.current_gpu) + return f"cuda:{config.current_gpu}" return "cuda" diff --git a/examples/torch/common/export.py b/examples/torch/common/export.py index 9bb97f92e96..195e4cc55e2 100644 --- a/examples/torch/common/export.py +++ b/examples/torch/common/export.py @@ -66,5 +66,6 @@ def export_model(ctrl: CompressionAlgorithmController, config: SampleConfig) -> input_node.node.set_friendly_name(input_name) ov.save_model(ov_model, model_path) else: - raise ValueError(f"--export-model-path argument should have suffix `.xml` or `.onnx` but got {extension}") + msg = f"--export-model-path argument should have suffix `.xml` or `.onnx` but got {extension}" + raise ValueError(msg) logger.info(f"Saved to {model_path}") diff --git a/examples/torch/common/model_loader.py b/examples/torch/common/model_loader.py index ffaed6de3bc..46aec5602b9 100644 --- a/examples/torch/common/model_loader.py +++ b/examples/torch/common/model_loader.py @@ -36,7 +36,7 @@ def load_model( arbitrary code execution during unpickling. Only load the data you trust. """ - logger.info("Loading model: {}".format(model)) + logger.info(f"Loading model: {model}") if model_params is None: model_params = {} if model in torchvision.models.__dict__: @@ -50,7 +50,8 @@ def load_model( elif model == "mobilenet_v2_32x32": load_model_fn = partial(MobileNetV2For32x32, num_classes=100) else: - raise Exception("Undefined model name") + msg = "Undefined model name" + raise Exception(msg) loaded_model = safe_thread_call(load_model_fn) if not pretrained and weights_path is not None: # Check if provided path is a url and download the checkpoint if yes @@ -69,10 +70,11 @@ def load_model( def load_resuming_checkpoint(resuming_checkpoint_path: str): if osp.isfile(resuming_checkpoint_path): - logger.info("=> loading checkpoint '{}'".format(resuming_checkpoint_path)) + logger.info(f"=> loading checkpoint '{resuming_checkpoint_path}'") checkpoint = torch.load(resuming_checkpoint_path, map_location="cpu", pickle_module=restricted_pickle_module) return checkpoint - raise FileNotFoundError("no checkpoint found at '{}'".format(resuming_checkpoint_path)) + msg = f"no checkpoint found at '{resuming_checkpoint_path}'" + raise FileNotFoundError(msg) def extract_model_and_compression_states(resuming_checkpoint: Optional[Dict] = None): diff --git a/examples/torch/common/models/classification/mobilenet_v2_tv_092.py b/examples/torch/common/models/classification/mobilenet_v2_tv_092.py index 912e6415d04..a733d5f2c85 100644 --- a/examples/torch/common/models/classification/mobilenet_v2_tv_092.py +++ b/examples/torch/common/models/classification/mobilenet_v2_tv_092.py @@ -155,10 +155,8 @@ def __init__( # only check the first element, assuming user knows t,c,n,s are required if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: - raise ValueError( - "inverted_residual_setting should be non-empty " - "or a 4-element list, got {}".format(inverted_residual_setting) - ) + msg = f"inverted_residual_setting should be non-empty or a 4-element list, got {inverted_residual_setting}" + raise ValueError(msg) # building first layer input_channel = _make_divisible(input_channel * width_mult, round_nearest) diff --git a/examples/torch/common/models/classification/mobilenet_v3_tv_092.py b/examples/torch/common/models/classification/mobilenet_v3_tv_092.py index dfa4f65986f..e67a21fc045 100644 --- a/examples/torch/common/models/classification/mobilenet_v3_tv_092.py +++ b/examples/torch/common/models/classification/mobilenet_v3_tv_092.py @@ -86,7 +86,8 @@ def __init__( ): super().__init__() if not 1 <= cnf.stride <= 2: - raise ValueError("illegal stride value") + msg = "illegal stride value" + raise ValueError(msg) self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels @@ -166,12 +167,14 @@ def __init__( super().__init__() if not inverted_residual_setting: - raise ValueError("The inverted_residual_setting should not be empty") + msg = "The inverted_residual_setting should not be empty" + raise ValueError(msg) if not ( isinstance(inverted_residual_setting, Sequence) and all(isinstance(s, InvertedResidualConfig) for s in inverted_residual_setting) ): - raise TypeError("The inverted_residual_setting should be List[InvertedResidualConfig]") + msg = "The inverted_residual_setting should be List[InvertedResidualConfig]" + raise TypeError(msg) if block is None: block = InvertedResidual @@ -290,7 +293,8 @@ def _mobilenet_v3_conf(arch: str, params: Dict[str, Any]): ] last_channel = adjust_channels(1024 // reduce_divider) # C5 else: - raise ValueError("Unsupported model type {}".format(arch)) + msg = f"Unsupported model type {arch}" + raise ValueError(msg) return inverted_residual_setting, last_channel @@ -306,7 +310,8 @@ def _mobilenet_v3_model( model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs) if pretrained: if model_urls.get(arch) is None: - raise ValueError("No checkpoint is available for model type {}".format(arch)) + msg = f"No checkpoint is available for model type {arch}" + raise ValueError(msg) state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict) return model diff --git a/examples/torch/common/models/classification/resnet_cifar10.py b/examples/torch/common/models/classification/resnet_cifar10.py index 618b4170dba..0ebe4466595 100644 --- a/examples/torch/common/models/classification/resnet_cifar10.py +++ b/examples/torch/common/models/classification/resnet_cifar10.py @@ -59,9 +59,11 @@ def __init__( if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: - raise ValueError("BasicBlock only supports groups=1 and base_width=64") + msg = "BasicBlock only supports groups=1 and base_width=64" + raise ValueError(msg) if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + msg = "Dilation > 1 not supported in BasicBlock" + raise NotImplementedError(msg) # Both self.conv1 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) @@ -166,10 +168,11 @@ def __init__( # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: - raise ValueError( + msg = ( "replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation) + f"or a 3-element tuple, got {replace_stride_with_dilation}" ) + raise ValueError(msg) self.groups = groups self.base_width = width_per_group diff --git a/examples/torch/common/models/classification/rmnet_cifar.py b/examples/torch/common/models/classification/rmnet_cifar.py index a941e295fd5..f9dc5d4ade3 100644 --- a/examples/torch/common/models/classification/rmnet_cifar.py +++ b/examples/torch/common/models/classification/rmnet_cifar.py @@ -110,7 +110,7 @@ def __init__( stage.append(block(w, wb, w)) stages.append(nn.Sequential(*stage)) - self.stages = nn.Sequential(OrderedDict([("stage_{}".format(i), stage) for i, stage in enumerate(stages)])) + self.stages = nn.Sequential(OrderedDict([(f"stage_{i}", stage) for i, stage in enumerate(stages)])) self.init_weights() diff --git a/examples/torch/common/models/segmentation/enet.py b/examples/torch/common/models/segmentation/enet.py index 69ee2d57243..5ebeda26eb4 100644 --- a/examples/torch/common/models/segmentation/enet.py +++ b/examples/torch/common/models/segmentation/enet.py @@ -135,10 +135,11 @@ def __init__( # Check in the internal_scale parameter is within the expected range # [1, channels] if internal_ratio <= 1 or internal_ratio > channels: - raise nncf.ValidationError( + msg = ( "Value out of range. Expected value in the " - "interval [1, {0}], got internal_scale={1}.".format(channels, internal_ratio) + f"interval [1, {channels}], got internal_scale={internal_ratio}." ) + raise nncf.ValidationError(msg) internal_channels = channels // internal_ratio @@ -293,10 +294,11 @@ def __init__( # Check in the internal_scale parameter is within the expected range # [1, channels] if internal_ratio <= 1 or internal_ratio > in_channels: - raise nncf.ValidationError( + msg = ( "Value out of range. Expected value in the " - "interval [1, {0}], got internal_scale={1}. ".format(in_channels, internal_ratio) + f"interval [1, {in_channels}], got internal_scale={internal_ratio}. " ) + raise nncf.ValidationError(msg) internal_channels = in_channels // internal_ratio @@ -427,10 +429,11 @@ def __init__( # Check in the internal_scale parameter is within the expected range # [1, channels] if internal_ratio <= 1 or internal_ratio > in_channels: - raise nncf.ValidationError( + msg = ( "Value out of range. Expected value in the " - "interval [1, {0}], got internal_scale={1}. ".format(in_channels, internal_ratio) + f"interval [1, {in_channels}], got internal_scale={internal_ratio}. " ) + raise nncf.ValidationError(msg) internal_channels = in_channels // internal_ratio diff --git a/examples/torch/common/models/segmentation/icnet.py b/examples/torch/common/models/segmentation/icnet.py index de81a30f5f2..e57f2f12b36 100644 --- a/examples/torch/common/models/segmentation/icnet.py +++ b/examples/torch/common/models/segmentation/icnet.py @@ -334,7 +334,8 @@ def __init__(self, input_size_hw, in_channels=3, n_classes=20, backbone="icnet") for bin_dim in self.ppm.bin_dimensions: required_alignment = lcm(required_alignment, bin_dim) if (input_size_hw[0] % required_alignment) or (input_size_hw[1] % required_alignment): - raise ValueError("ICNet may only operate on {}-aligned input resolutions".format(required_alignment)) + msg = f"ICNet may only operate on {required_alignment}-aligned input resolutions" + raise ValueError(msg) # Weight initialization # for module in self.modules(): # if isinstance(module, nn.Conv2d): diff --git a/examples/torch/common/models/segmentation/unet.py b/examples/torch/common/models/segmentation/unet.py index 9dc96ad93e4..4919d5dae7e 100644 --- a/examples/torch/common/models/segmentation/unet.py +++ b/examples/torch/common/models/segmentation/unet.py @@ -57,7 +57,8 @@ def __init__( super().__init__() assert up_mode in ("upconv", "upsample") if (input_size_hw[0] % 2 ** (depth - 1)) or (input_size_hw[1] % 2 ** (depth - 1)): - raise ValueError("UNet may only operate on input resolutions aligned to 2**(depth - 1)") + msg = "UNet may only operate on input resolutions aligned to 2**(depth - 1)" + raise ValueError(msg) self.padding = padding self.depth = depth prev_channels = in_channels diff --git a/examples/torch/common/optimizer.py b/examples/torch/common/optimizer.py index c9505c31840..ca56ea30555 100644 --- a/examples/torch/common/optimizer.py +++ b/examples/torch/common/optimizer.py @@ -53,7 +53,8 @@ def make_optimizer(params_to_optimize, config): optim_params = optim_config.get("optimizer_params", {"momentum": 0.9}) optim = SGD(params_to_optimize, **optim_params) else: - raise KeyError("Unknown optimizer type: {}".format(optim_type)) + msg = f"Unknown optimizer type: {optim_type}" + raise KeyError(msg) scheduler_type = optim_config.get("schedule_type", "step").lower() scheduler_params = optim_config.get("schedule_params", optim_config.get("scheduler_params", {})) @@ -76,7 +77,8 @@ def make_optimizer(params_to_optimize, config): elif scheduler_type == "exponential": scheduler = ExponentialLR(optim, gamma) else: - raise KeyError("Unknown scheduler type: {}".format(scheduler_type)) + msg = f"Unknown scheduler type: {scheduler_type}" + raise KeyError(msg) return optim, scheduler diff --git a/examples/torch/common/restricted_pickle_module.py b/examples/torch/common/restricted_pickle_module.py index d6c0af5239e..e3f43f657c5 100644 --- a/examples/torch/common/restricted_pickle_module.py +++ b/examples/torch/common/restricted_pickle_module.py @@ -57,4 +57,5 @@ def find_class(self, module_name, class_name): return getattr(module, class_name) # Forbid everything else. - raise pickle.UnpicklingError("global '%s.%s' is forbidden" % (module_name, class_name)) + msg = f"global '{module_name}.{class_name}' is forbidden" + raise pickle.UnpicklingError(msg) diff --git a/examples/torch/common/utils.py b/examples/torch/common/utils.py index 4ec77065e97..8c396403533 100644 --- a/examples/torch/common/utils.py +++ b/examples/torch/common/utils.py @@ -59,11 +59,11 @@ def get_run_name(config: SampleConfig) -> str: weights = algo_dict.get("weights", {}) w_bits = weights.get("bits", QUANTIZATION_BITS) if a_bits == w_bits: - retval += "_int{}".format(a_bits) + retval += f"_int{a_bits}" else: - retval += "_a_int{}_w_int{}".format(a_bits, w_bits) + retval += f"_a_int{a_bits}_w_int{w_bits}" else: - retval += "_{}".format(algo_name) + retval += f"_{algo_name}" return retval @@ -121,7 +121,7 @@ def create_code_snapshot(root, dst_path, extensions=(".py", ".json", ".cpp", ".c def print_args(config, logger=default_logger): logger.info("\nConfiguration parameters:") for arg in sorted(config): - logger.info("{: <27s}: {}".format(arg, config.get(arg))) + logger.info(f"{arg: <27s}: {config.get(arg)}") logger.info("\n") diff --git a/examples/torch/object_detection/datasets/coco.py b/examples/torch/object_detection/datasets/coco.py index feed62ceb7e..2a0d30e83d4 100644 --- a/examples/torch/object_detection/datasets/coco.py +++ b/examples/torch/object_detection/datasets/coco.py @@ -58,14 +58,15 @@ def _read_coco_annotation(annotation_file, images_folder): annotation = json_annotation["annotations"] for imgAnnotation in annotation: - img_path = images_folder / "{0:012d}.jpg".format(imgAnnotation["image_id"]) + img_path = images_folder / "{:012d}.jpg".format(imgAnnotation["image_id"]) name = str(imgAnnotation["category_id"]) label_idx = COCO_NAMES.index(name) bbox = imgAnnotation["bbox"] if bbox is None or bbox == "": - raise ValueError("No annotation for {}".format(img_path)) + msg = f"No annotation for {img_path}" + raise ValueError(msg) bbox[2] = bbox[0] + bbox[2] bbox[3] = bbox[1] + bbox[3] diff --git a/examples/torch/object_detection/datasets/voc0712.py b/examples/torch/object_detection/datasets/voc0712.py index 4cf31110ab0..d95dad507ad 100644 --- a/examples/torch/object_detection/datasets/voc0712.py +++ b/examples/torch/object_detection/datasets/voc0712.py @@ -10,23 +10,18 @@ # limitations under the License. import os import os.path -import sys from pathlib import Path from pathlib import PurePath from typing import Callable, Dict, Optional, Tuple import cv2 +import defusedxml.ElementTree as ET from PIL.Image import Image from torch.utils import data from torchvision import datasets from examples.torch.object_detection.utils.augmentations import Compose -if sys.version_info[0] == 2: - import defusedxml.cElementTree as ET -else: - import defusedxml.ElementTree as ET - VOC_CLASSES = ( # always index 0 "aeroplane", "bicycle", diff --git a/examples/torch/object_detection/eval.py b/examples/torch/object_detection/eval.py index 2e4b48aafd6..a851493bcd9 100644 --- a/examples/torch/object_detection/eval.py +++ b/examples/torch/object_detection/eval.py @@ -9,8 +9,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import json import os import pathlib @@ -91,9 +89,9 @@ def evaluate_detections(box_list, dataset, use_07=True): class_boxes, dataset, cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric ) aps += [ap] - logger.info("AP for {} = {:.4f}".format(cls, ap)) + logger.info(f"AP for {cls} = {ap:.4f}") mAp = np.mean(aps) - logger.info("Mean AP = {:.4f}".format(mAp)) + logger.info(f"Mean AP = {mAp:.4f}") return mAp @@ -195,7 +193,7 @@ def extract_gt_bboxes(classname, dataset, gt, imagenames): def load_detection_annotations(cachedir, dataset): - cachefile = os.path.join(cachedir, "annots_{}.json".format(dataset.name)) + cachefile = os.path.join(cachedir, f"annots_{dataset.name}.json") imagenames = dataset.get_img_names() if is_main_process() and not os.path.isfile(cachefile): # load annots @@ -204,15 +202,15 @@ def load_detection_annotations(cachedir, dataset): _, gt[imagename] = dataset.pull_anno(i) if i % 100 == 0: - logger.info("Reading annotation for {:d}/{:d}".format(i + 1, len(imagenames))) + logger.info(f"Reading annotation for {i + 1:d}/{len(imagenames):d}") # save - logger.info("Saving cached annotations to {:s}".format(cachefile)) + logger.info(f"Saving cached annotations to {cachefile:s}") pathlib.Path(cachedir).mkdir(parents=True, exist_ok=True) with open(cachefile, "w", encoding="utf8") as f: json.dump(gt, f) if is_dist_avail_and_initialized(): dist.barrier() - with open(cachefile, "r", encoding="utf8") as f: + with open(cachefile, encoding="utf8") as f: gt = json.load(f) return gt, imagenames @@ -293,7 +291,7 @@ def predict_detections(data_loader, device, net): batch_detections[..., 6] *= hs all_detections.append(batch_detections.cpu()) - logger.info("Detect for batch: {:d}/{:d} {:.3f}s".format(batch_ind + 1, num_batches, detect_time)) + logger.info(f"Detect for batch: {batch_ind + 1:d}/{num_batches:d} {detect_time:.3f}s") if all_detections: return torch.cat(all_detections) return None # No predictions @@ -331,17 +329,10 @@ def eval_net_loss(data_loader, device, net, criterion): if batch_ind % print_freq == 0: logger.info( - "Loss_inference: [{}/{}] || Time: {elapsed.val:.4f}s ({elapsed.avg:.4f}s)" - " || Conf Loss: {conf_loss.val:.3f} ({conf_loss.avg:.3f})" - " || Loc Loss: {loc_loss.val:.3f} ({loc_loss.avg:.3f})" - " || Model Loss: {model_loss.val:.3f} ({model_loss.avg:.3f})".format( - batch_ind, - num_batches, - elapsed=t_elapsed, - conf_loss=batch_loss_c, - loc_loss=batch_loss_l, - model_loss=batch_loss, - ) + f"Loss_inference: [{batch_ind}/{num_batches}] || Time: {t_elapsed.val:.4f}s ({t_elapsed.avg:.4f}s)" + f" || Conf Loss: {batch_loss_c.val:.3f} ({batch_loss_c.avg:.3f})" + f" || Loc Loss: {batch_loss_l.val:.3f} ({batch_loss_l.avg:.3f})" + f" || Model Loss: {batch_loss.val:.3f} ({batch_loss.avg:.3f})" ) model_loss = batch_loss_l.avg + batch_loss_c.avg @@ -370,7 +361,8 @@ def restore_bn_module_mode(module): if distributed: raise NotImplementedError if criterion is None: - raise ValueError("Missing loss inference function (criterion)") + msg = "Missing loss inference function (criterion)" + raise ValueError(msg) output = eval_net_loss(data_loader, device, net, criterion) net.apply(restore_bn_module_mode) return output diff --git a/examples/torch/object_detection/layers/functions/detection.py b/examples/torch/object_detection/layers/functions/detection.py index 728b7aaaadd..3781f702dfd 100644 --- a/examples/torch/object_detection/layers/functions/detection.py +++ b/examples/torch/object_detection/layers/functions/detection.py @@ -94,7 +94,8 @@ def forward(ctx, loc_data, conf_data, prior_data, detection_output_params): """ with no_jit_trace(), no_nncf_trace(): if detection_output_params.nms_threshold <= 0: - raise ValueError("nms_threshold must be non negative.") + msg = "nms_threshold must be non negative." + raise ValueError(msg) device = loc_data.device batch_size = loc_data.size(0) # batch size num_priors = int(loc_data.size(1) / 4) diff --git a/examples/torch/object_detection/layers/functions/prior_box.py b/examples/torch/object_detection/layers/functions/prior_box.py index bbac779586d..5ca6c7917c7 100644 --- a/examples/torch/object_detection/layers/functions/prior_box.py +++ b/examples/torch/object_detection/layers/functions/prior_box.py @@ -9,8 +9,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import division - from itertools import product from math import sqrt from typing import Any @@ -87,7 +85,8 @@ def symbolic(g, input_fm, img_tensor, priorbox_params): def forward(ctx, input_fm, img_tensor, priorbox_params): for v in priorbox_params.variance: if v <= 0: - raise ValueError("Variances must be greater than 0") + msg = "Variances must be greater than 0" + raise ValueError(msg) mean = [] variance_channel = [] diff --git a/examples/torch/object_detection/main.py b/examples/torch/object_detection/main.py index 2077934ff30..76cc0efb25c 100644 --- a/examples/torch/object_detection/main.py +++ b/examples/torch/object_detection/main.py @@ -290,7 +290,7 @@ def configure_optimizers_fn(): loss_inference=True, criterion=criterion, ) - logger.info("Final model loss: {:.3f}".format(model_loss)) + logger.info(f"Final model loss: {model_loss:.3f}") else: mAp = test_net(val_net, config.device, test_data_loader, distributed=config.distributed) if config.metrics_dump is not None: @@ -303,7 +303,7 @@ def configure_optimizers_fn(): def create_dataloaders(config): logger.info("Loading Dataset...") train_dataset = get_training_dataset(config.dataset, config.train_anno, config.train_imgs, config) - logger.info("Loaded {} training images".format(len(train_dataset))) + logger.info(f"Loaded {len(train_dataset)} training images") if config.distributed: sampler_seed = 0 if config.seed is None else config.seed dist_sampler_shuffle = config.seed is None @@ -337,7 +337,7 @@ def create_train_data_loader(batch_size): init_data_loader = deepcopy(train_data_loader) test_dataset = get_testing_dataset(config.dataset, config.test_anno, config.test_imgs, config) - logger.info("Loaded {} testing images".format(len(test_dataset))) + logger.info(f"Loaded {len(test_dataset)} testing images") if config.distributed: test_sampler = DistributedSampler(test_dataset, config.rank, config.world_size) else: @@ -415,7 +415,7 @@ def train(net, compression_ctrl, train_data_loader, test_data_loader, criterion, conf_loss = 0 epoch_size = len(train_data_loader) - logger.info("Training {} on {} dataset...".format(config.model, train_data_loader.dataset.name)) + logger.info(f"Training {config.model} on {train_data_loader.dataset.name} dataset...") best_mAp = 0 best_compression_stage = CompressionStage.UNCOMPRESSED @@ -461,9 +461,9 @@ def train(net, compression_ctrl, train_data_loader, test_data_loader, criterion, net.train() if is_on_first_rank(config): - logger.info("Saving state, epoch: {}".format(epoch)) + logger.info(f"Saving state, epoch: {epoch}") - checkpoint_file_path = osp.join(config.checkpoint_save_dir, "{}_last.pth".format(get_run_name(config))) + checkpoint_file_path = osp.join(config.checkpoint_save_dir, f"{get_run_name(config)}_last.pth") torch.save( { MODEL_STATE_ATTR: net.state_dict(), diff --git a/examples/torch/object_detection/models/ssd_mobilenet.py b/examples/torch/object_detection/models/ssd_mobilenet.py index 420e2f1fa16..cced1c92a84 100644 --- a/examples/torch/object_detection/models/ssd_mobilenet.py +++ b/examples/torch/object_detection/models/ssd_mobilenet.py @@ -98,7 +98,8 @@ def forward(self, x): def build_ssd_mobilenet(cfg, size, num_classes, config): if size != 300: - raise ValueError("Only Mobilenet-SSD with input size 300 is supported") + msg = "Only Mobilenet-SSD with input size 300 is supported" + raise ValueError(msg) mobilenet_ssd = MobileNetSSD(num_classes, cfg) if config.basenet and (config.resuming_checkpoint_path is None) and (config.weights is None): diff --git a/examples/torch/semantic_segmentation/datasets/camvid.py b/examples/torch/semantic_segmentation/datasets/camvid.py index 51b757e23e4..9fdfd56e560 100644 --- a/examples/torch/semantic_segmentation/datasets/camvid.py +++ b/examples/torch/semantic_segmentation/datasets/camvid.py @@ -105,7 +105,8 @@ def __init__(self, root, image_set="train", transforms=None, loader=data_utils.p os.path.join(self.root_dir, self.test_lbl_folder), extension_filter=self.img_extension ) else: - raise nncf.ValidationError("Unexpected dataset mode. Supported modes are: train, val and test") + msg = "Unexpected dataset mode. Supported modes are: train, val and test" + raise nncf.ValidationError(msg) def __getitem__(self, index): """ @@ -124,7 +125,8 @@ def __getitem__(self, index): elif self.mode.lower() == "test": data_path, label_path = self.test_data[index], self.test_labels[index] else: - raise nncf.ValidationError("Unexpected dataset mode. Supported modes are: train, val and test") + msg = "Unexpected dataset mode. Supported modes are: train, val and test" + raise nncf.ValidationError(msg) img, label = self.loader(data_path, label_path) @@ -142,4 +144,5 @@ def __len__(self): if self.mode.lower() == "test": return len(self.test_data) - raise nncf.ValidationError("Unexpected dataset mode. Supported modes are: train, val and test") + msg = "Unexpected dataset mode. Supported modes are: train, val and test" + raise nncf.ValidationError(msg) diff --git a/examples/torch/semantic_segmentation/datasets/cityscapes.py b/examples/torch/semantic_segmentation/datasets/cityscapes.py index 67360451435..3f352d45c98 100644 --- a/examples/torch/semantic_segmentation/datasets/cityscapes.py +++ b/examples/torch/semantic_segmentation/datasets/cityscapes.py @@ -194,7 +194,8 @@ def __init__(self, root, image_set="train", transforms=None, loader=data_utils.p extension_filter=self.img_extension, ) else: - raise nncf.ValidationError("Unexpected dataset mode. Supported modes are: train, val and test") + msg = "Unexpected dataset mode. Supported modes are: train, val and test" + raise nncf.ValidationError(msg) def __getitem__(self, index): """ @@ -213,7 +214,8 @@ def __getitem__(self, index): elif self.mode.lower() == "test": data_path, label_path = self.test_data[index], self.test_labels[index] else: - raise nncf.ValidationError("Unexpected dataset mode. Supported modes are: train, val and test") + msg = "Unexpected dataset mode. Supported modes are: train, val and test" + raise nncf.ValidationError(msg) img, label = self.loader(data_path, label_path) @@ -234,4 +236,5 @@ def __len__(self): if self.mode.lower() == "test": return len(self.test_data) - raise nncf.ValidationError("Unexpected dataset mode. Supported modes are: train, val and test") + msg = "Unexpected dataset mode. Supported modes are: train, val and test" + raise nncf.ValidationError(msg) diff --git a/examples/torch/semantic_segmentation/datasets/mapillary.py b/examples/torch/semantic_segmentation/datasets/mapillary.py index 7d0352e572b..fdac5f0485c 100644 --- a/examples/torch/semantic_segmentation/datasets/mapillary.py +++ b/examples/torch/semantic_segmentation/datasets/mapillary.py @@ -114,7 +114,8 @@ def __init__(self, root, image_set="train", transforms=None, loader=data_utils.p os.path.join(self.root_dir, self.test_lbl_folder), extension_filter=self.label_extension ) else: - raise nncf.ValidationError("Unexpected dataset mode. Supported modes are: train, val and test") + msg = "Unexpected dataset mode. Supported modes are: train, val and test" + raise nncf.ValidationError(msg) def __getitem__(self, index): """ @@ -133,7 +134,8 @@ def __getitem__(self, index): elif self.mode.lower() == "test": data_path, label_path = self.test_data[index], self.test_labels[index] else: - raise nncf.ValidationError("Unexpected dataset mode. Supported modes are: train, val and test") + msg = "Unexpected dataset mode. Supported modes are: train, val and test" + raise nncf.ValidationError(msg) img, color_labels = self.loader(data_path, label_path) label = data_utils.color_to_label(color_labels, self.color_encoding) @@ -152,4 +154,5 @@ def __len__(self): if self.mode.lower() == "test": return len(self.test_data) - raise nncf.ValidationError("Unexpected dataset mode. Supported modes are: train, val and test") + msg = "Unexpected dataset mode. Supported modes are: train, val and test" + raise nncf.ValidationError(msg) diff --git a/examples/torch/semantic_segmentation/main.py b/examples/torch/semantic_segmentation/main.py index 597f3abcdb8..7c5d1ea91f9 100644 --- a/examples/torch/semantic_segmentation/main.py +++ b/examples/torch/semantic_segmentation/main.py @@ -113,7 +113,7 @@ def get_joint_transforms(is_train, config): def get_class_weights(train_set, num_classes, config): # Get class weights from the selected weighing technique - logger.info("\nWeighing technique: {}".format(config.weighing)) + logger.info(f"\nWeighing technique: {config.weighing}") weighing = config.get("weighing", "none") if isinstance(weighing, list): # Class weights were directly specified in config @@ -148,15 +148,16 @@ def get_dataset(dataset_name: str) -> torch.utils.data.Dataset: from examples.torch.semantic_segmentation.datasets import Mapillary as dataset else: # Should never happen...but just in case it does - raise nncf.UnsupportedDatasetError('"{0}" is not a supported dataset.'.format(dataset_name)) + msg = f'"{dataset_name}" is not a supported dataset.' + raise nncf.UnsupportedDatasetError(msg) return dataset def load_dataset(dataset, config): logger.info("\nLoading dataset...\n") - logger.info("Selected dataset: {}".format(config.dataset)) - logger.info("Dataset directory: {}".format(config.dataset_dir)) + logger.info(f"Selected dataset: {config.dataset}") + logger.info(f"Dataset directory: {config.dataset_dir}") transforms_train = get_joint_transforms(is_train=True, config=config) transforms_val = get_joint_transforms(is_train=False, config=config) @@ -222,18 +223,18 @@ def create_train_data_loader(batch_size_): num_classes = len(class_encoding) # Print information for debugging - logger.info("Number of classes to predict: {}".format(num_classes)) - logger.info("Train dataset size: {}".format(len(train_set))) - logger.info("Validation dataset size: {}".format(len(val_set))) + logger.info(f"Number of classes to predict: {num_classes}") + logger.info(f"Train dataset size: {len(train_set)}") + logger.info(f"Validation dataset size: {len(val_set)}") # Get a batch of samples to display if "test" in config.mode and "train" not in config.mode: images, labels = next(iter(val_loader)) else: images, labels = next(iter(train_loader)) - logger.info("Image size: {}".format(images.size())) - logger.info("Label size: {}".format(labels.size())) - logger.info("Class-color encoding: {}".format(class_encoding)) + logger.info(f"Image size: {images.size()}") + logger.info(f"Label size: {labels.size()}") + logger.info(f"Class-color encoding: {class_encoding}") # Show a batch of samples and labels if config.imshow_batch and "test" not in config.mode: @@ -252,7 +253,7 @@ def create_train_data_loader(batch_size_): ignore_index = list(class_encoding).index("unlabeled") class_weights[ignore_index] = 0 - logger.info("Class weights: {}".format(class_weights)) + logger.info(f"Class weights: {class_weights}") return (train_loader, val_loader, init_loader), class_weights @@ -331,7 +332,7 @@ def train( start_epoch = resuming_checkpoint["epoch"] best_miou = resuming_checkpoint["miou"] - logger.info("Resuming from model: Start epoch = {0} | Best mean IoU = {1:.4f}".format(start_epoch, best_miou)) + logger.info(f"Resuming from model: Start epoch = {start_epoch} | Best mean IoU = {best_miou:.4f}") config.start_epoch = start_epoch # Start Training @@ -340,7 +341,7 @@ def train( for epoch in range(config.start_epoch, config.epochs): compression_ctrl.scheduler.epoch_step() - logger.info(">>>> [Epoch: {0:d}] Training".format(epoch)) + logger.info(f">>>> [Epoch: {epoch:d}] Training") if config.distributed: train_loader.sampler.set_epoch(epoch) @@ -350,7 +351,7 @@ def train( # Learning rate scheduling should be applied after optimizerā€™s update lr_scheduler.step(epoch) - logger.info(">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}".format(epoch, epoch_loss, miou)) + logger.info(f">>>> [Epoch: {epoch:d}] Avg. loss: {epoch_loss:.4f} | Mean IoU: {miou:.4f}") if is_main_process(): config.tb.add_scalar("train/loss", epoch_loss, epoch) @@ -360,20 +361,20 @@ def train( statistics = compression_ctrl.statistics(quickly_collected_only=True) for key, value in prepare_for_tensorboard(statistics).items(): - config.tb.add_scalar("compression/statistics/{0}".format(key), value, epoch) + config.tb.add_scalar(f"compression/statistics/{key}", value, epoch) if (epoch + 1) % config.save_freq == 0 or epoch + 1 == config.epochs: - logger.info(">>>> [Epoch: {0:d}] Validation".format(epoch)) + logger.info(f">>>> [Epoch: {epoch:d}] Validation") loss, (iou, miou) = val_obj.run_epoch(config.print_step) - logger.info(">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}".format(epoch, loss, miou)) + logger.info(f">>>> [Epoch: {epoch:d}] Avg. loss: {loss:.4f} | Mean IoU: {miou:.4f}") if is_main_process(): config.tb.add_scalar("val/mIoU", miou, epoch) config.tb.add_scalar("val/loss", loss, epoch) for i, (key, class_iou) in enumerate(zip(class_encoding.keys(), iou)): - config.tb.add_scalar("{}/mIoU_Cls{}_{}".format(config.dataset, i, key), class_iou, epoch) + config.tb.add_scalar(f"{config.dataset}/mIoU_Cls{i}_{key}", class_iou, epoch) compression_stage = compression_ctrl.compression_stage() is_best_by_miou = miou > best_miou and compression_stage == best_compression_stage @@ -392,7 +393,7 @@ def train( # Print per class IoU on last epoch or if best iou if epoch + 1 == config.epochs or is_best: for key, class_iou in zip(class_encoding.keys(), iou): - logger.info("{0}: {1:.4f}".format(key, class_iou)) + logger.info(f"{key}: {class_iou:.4f}") # Save the model if it's the best thus far if is_main_process(): @@ -424,13 +425,13 @@ def test(model, test_loader, criterion, class_encoding, config): loss, (iou, miou) = test_obj.run_epoch(config.print_step) class_iou = dict(zip(class_encoding.keys(), iou)) - logger.info(">>>> Avg. loss: {0:.4f} | Mean IoU: {1:.4f}".format(loss, miou)) + logger.info(f">>>> Avg. loss: {loss:.4f} | Mean IoU: {miou:.4f}") if config.metrics_dump is not None: write_metrics(miou, config.metrics_dump) # Print per class IoU for key, class_iou in zip(class_encoding.keys(), iou): - logger.info("{0}: {1:.4f}".format(key, class_iou)) + logger.info(f"{key}: {class_iou:.4f}") # Show a batch of samples and labels if config.imshow_batch: @@ -610,7 +611,7 @@ def configure_optimizers_fn(): val_model = model model_parameters = filter(lambda p: p.requires_grad, val_model.parameters()) params = sum(np.prod(p.size()) for p in model_parameters) - logger.info("Trainable argument count:{params}".format(params=params)) + logger.info(f"Trainable argument count:{params}") val_model = val_model.to(config.device) test(val_model, val_loader, criterion, color_encoding, config) @@ -630,7 +631,7 @@ def main(argv): config.log_dir = str(config.log_dir) configure_paths(config, get_run_name(config)) - logger.info("Save directory: {}".format(config.log_dir)) + logger.info(f"Save directory: {config.log_dir}") config.execution_mode = get_execution_mode(config) start_worker(main_worker, config) diff --git a/examples/torch/semantic_segmentation/metric/iou.py b/examples/torch/semantic_segmentation/metric/iou.py index d678f7d7e2e..82b03ab31d2 100644 --- a/examples/torch/semantic_segmentation/metric/iou.py +++ b/examples/torch/semantic_segmentation/metric/iou.py @@ -45,7 +45,8 @@ def __init__(self, num_classes, normalized=False, ignore_index=None): try: self.ignore_index = tuple(ignore_index) except TypeError as e: - raise ValueError("'ignore_index' must be an int or iterable") from e + msg = "'ignore_index' must be an int or iterable" + raise ValueError(msg) from e def reset(self): self.conf_metric.reset() diff --git a/examples/torch/semantic_segmentation/test.py b/examples/torch/semantic_segmentation/test.py index 2e48caf4cda..48a1eb9f46a 100644 --- a/examples/torch/semantic_segmentation/test.py +++ b/examples/torch/semantic_segmentation/test.py @@ -76,6 +76,6 @@ def run_epoch(self, iteration_loss=False): self.metric.add(metric_outputs.detach(), labels.detach()) if iteration_loss: - logger.info("[Step: {}] Iteration loss: {:.4f}".format(step, loss.item())) + logger.info(f"[Step: {step}] Iteration loss: {loss.item():.4f}") return epoch_loss / len(self.data_loader), self.metric.value() diff --git a/examples/torch/semantic_segmentation/train.py b/examples/torch/semantic_segmentation/train.py index 6bc6059e07f..5124005d105 100644 --- a/examples/torch/semantic_segmentation/train.py +++ b/examples/torch/semantic_segmentation/train.py @@ -84,6 +84,6 @@ def run_epoch(self, iteration_loss=False): self.metric.add(metric_outputs.detach(), labels.detach()) if iteration_loss: - logger.info("[Step: {}] Iteration loss: {:.4f}".format(step, loss.item())) + logger.info(f"[Step: {step}] Iteration loss: {loss.item():.4f}") return epoch_loss / len(self.data_loader), self.metric.value() diff --git a/examples/torch/semantic_segmentation/utils/checkpoint.py b/examples/torch/semantic_segmentation/utils/checkpoint.py index 562efe4a75a..777bbfb27c7 100644 --- a/examples/torch/semantic_segmentation/utils/checkpoint.py +++ b/examples/torch/semantic_segmentation/utils/checkpoint.py @@ -35,7 +35,7 @@ def save_checkpoint(model, compression_ctrl, optimizer, epoch, miou, config): name = config.name save_dir = config.checkpoint_save_dir - assert os.path.isdir(save_dir), 'The directory "{0}" doesn\'t exist.'.format(save_dir) + assert os.path.isdir(save_dir), f'The directory "{save_dir}" doesn\'t exist.' # Save model checkpoint_path = os.path.join(save_dir, name) + "_last.pth" diff --git a/examples/torch/semantic_segmentation/utils/data.py b/examples/torch/semantic_segmentation/utils/data.py index 24be1784f7b..68784f8937b 100644 --- a/examples/torch/semantic_segmentation/utils/data.py +++ b/examples/torch/semantic_segmentation/utils/data.py @@ -36,7 +36,8 @@ def get_files(folder, name_filter=None, extension_filter=None): """ if not os.path.isdir(folder): - raise nncf.InvalidPathError('"{0}" is not a folder.'.format(folder)) + msg = f'"{folder}" is not a folder.' + raise nncf.InvalidPathError(msg) # Filename filter: if not specified don't filter (condition always true); # otherwise, use a lambda expression to filter out files that do not @@ -233,10 +234,12 @@ def __call__(self, tensor): """ # Check if label_tensor is a LongTensor if not isinstance(tensor, torch.LongTensor): - raise TypeError("label_tensor should be torch.LongTensor. Got {}".format(type(tensor))) + msg = f"label_tensor should be torch.LongTensor. Got {type(tensor)}" + raise TypeError(msg) # Check if encoding is a ordered dictionary if not isinstance(self.rgb_encoding, OrderedDict): - raise TypeError("encoding should be an OrderedDict. Got {}".format(type(self.rgb_encoding))) + msg = f"encoding should be an OrderedDict. Got {type(self.rgb_encoding)}" + raise TypeError(msg) # label_tensor might be an image without a channel dimension, in this # case unsqueeze it @@ -365,9 +368,11 @@ def downsample_labels(labels, target_size=None, downsample_factor=None): H = labels.size()[1] W = labels.size()[2] if target_size is None and downsample_factor is None: - raise ValueError("Either target_size or downsample_factor must be specified") + msg = "Either target_size or downsample_factor must be specified" + raise ValueError(msg) if target_size is not None and downsample_factor is not None: - raise ValueError("Only one of the target_size and downsample_factor must be specified") + msg = "Only one of the target_size and downsample_factor must be specified" + raise ValueError(msg) if downsample_factor is None: h = target_size[0] diff --git a/nncf/common/accuracy_aware_training/runner.py b/nncf/common/accuracy_aware_training/runner.py index 1cc4e81c132..ddd522b3456 100644 --- a/nncf/common/accuracy_aware_training/runner.py +++ b/nncf/common/accuracy_aware_training/runner.py @@ -265,7 +265,7 @@ def __init__( [ TModel, CompressionAlgorithmController, - "TrainingRunner", + TrainingRunner, Optional[Union[str, pathlib.Path]], ], None, @@ -307,7 +307,7 @@ def dump_statistics(self, model: TModel, compression_controller: CompressionAlgo for key, value in prepare_for_tensorboard(statistics).items(): if isinstance(value, (int, float)): self.add_tensorboard_scalar( - "compression/statistics/{0}".format(key), + f"compression/statistics/{key}", value, self.cumulative_epoch_count, ) diff --git a/nncf/common/accuracy_aware_training/runner_factory.py b/nncf/common/accuracy_aware_training/runner_factory.py index 90695f38d23..e9ea348aa55 100644 --- a/nncf/common/accuracy_aware_training/runner_factory.py +++ b/nncf/common/accuracy_aware_training/runner_factory.py @@ -80,7 +80,8 @@ def create_training_loop(self) -> BaseAccuracyAwareTrainingRunner: self.dump_checkpoints, self.lr_updates_needed, ) - raise nncf.UnsupportedBackendError("Got an unsupported value of nncf_backend") + msg = "Got an unsupported value of nncf_backend" + raise nncf.UnsupportedBackendError(msg) class AdaptiveCompressionLevelTrainingRunnerCreator(TrainingRunnerCreator): @@ -140,4 +141,5 @@ def create_training_loop(self) -> BaseAdaptiveCompressionLevelTrainingRunner: self.minimal_compression_rate, self.maximal_compression_rate, ) - raise nncf.UnsupportedBackendError("Got an unsupported value of nncf_backend") + msg = "Got an unsupported value of nncf_backend" + raise nncf.UnsupportedBackendError(msg) diff --git a/nncf/common/accuracy_aware_training/training_loop.py b/nncf/common/accuracy_aware_training/training_loop.py index 1170c3ce7f2..a32e446fc1d 100644 --- a/nncf/common/accuracy_aware_training/training_loop.py +++ b/nncf/common/accuracy_aware_training/training_loop.py @@ -321,10 +321,11 @@ def __init__( super().__init__(compression_controller) self.adaptive_controller = self._get_adaptive_compression_ctrl(compression_controller) if self.adaptive_controller is None: - raise nncf.InternalError( + msg = ( "No compression algorithm supported by the accuracy-aware training " "runner was specified in the config" ) + raise nncf.InternalError(msg) maximal_compression_rate = min(maximal_compression_rate, self.adaptive_controller.maximal_compression_rate) @@ -350,11 +351,12 @@ def remove_registry_prefix(algo_name: str) -> str: for prefix in ("pt_", "tf_"): if algo_name.startswith(prefix): return algo_name[len(prefix) :] - raise nncf.ValidationError( + msg = ( "Compression algorithm names in the adaptive controllers " 'registry should be prefixed with "pt_" or "tf_" depending on the ' "backend framework" ) + raise nncf.ValidationError(msg) return { remove_registry_prefix(algo_name): cast(CompressionAlgorithmController, controller_cls) @@ -374,9 +376,8 @@ def remove_registry_prefix(algo_name: str) -> str: ): return compression_controller - raise nncf.InternalError( - "No compression algorithm that supports adaptive compression accuracy-aware training was specified" - ) + msg = "No compression algorithm that supports adaptive compression accuracy-aware training was specified" + raise nncf.InternalError(msg) def run( self, @@ -570,7 +571,8 @@ def _determine_compression_rate_step_value( current_compression_rate=runner.compression_rate_target, ) else: - raise ValueError("Wrong stepping mode to determine compression rate step value provided") + msg = "Wrong stepping mode to determine compression rate step value provided" + raise ValueError(msg) return compression_step_updater(runner, **kwargs) @staticmethod @@ -660,4 +662,5 @@ def create_accuracy_aware_training_loop( uncompressed_model_accuracy, **additional_runner_args, ) - raise nncf.InternalError("Incorrect accuracy aware mode in the config file") + msg = "Incorrect accuracy aware mode in the config file" + raise nncf.InternalError(msg) diff --git a/nncf/common/composite_compression.py b/nncf/common/composite_compression.py index e45355fffd3..97d4be6d7c7 100644 --- a/nncf/common/composite_compression.py +++ b/nncf/common/composite_compression.py @@ -77,7 +77,8 @@ def calculate(self, *args: Any, **kwargs: Any) -> Any: """ if len(self._child_losses) == 0: - raise nncf.InternalError("Cannot calculate the loss value because the number of child loss is 0.") + msg = "Cannot calculate the loss value because the number of child loss is 0." + raise nncf.InternalError(msg) result_loss = 0 for loss in self._child_losses: @@ -193,7 +194,8 @@ def child_ctrls(self) -> List[CompressionAlgorithmController]: @property def name(self) -> str: if self._name is None: - raise nncf.InternalError("Internal error: algorithm name is not set for the controller") + msg = "Internal error: algorithm name is not set for the controller" + raise nncf.InternalError(msg) return self._name def add(self, child_ctrl: CompressionAlgorithmController) -> None: @@ -203,9 +205,8 @@ def add(self, child_ctrl: CompressionAlgorithmController) -> None: :param child_ctrl: A `CompressionAlgorithmController` instance. """ if child_ctrl.model is not self.model: - raise nncf.InternalError( - "Cannot create a composite controller from controllers belonging to different models!" - ) + msg = "Cannot create a composite controller from controllers belonging to different models!" + raise nncf.InternalError(msg) self._child_ctrls.append(child_ctrl) self._loss.add(child_ctrl.loss) @@ -347,7 +348,8 @@ def disable_scheduler(self) -> None: def get_compression_state(self) -> Dict[str, Any]: if self._builder_state is None: - raise nncf.InternalError("Internal error: builder state is not set for the controller") + msg = "Internal error: builder state is not set for the controller" + raise nncf.InternalError(msg) return {self.BUILDER_STATE: self._builder_state, self.CONTROLLER_STATE: self.get_state()} diff --git a/nncf/common/compression.py b/nncf/common/compression.py index d0395d5cd38..a22c80b82f4 100644 --- a/nncf/common/compression.py +++ b/nncf/common/compression.py @@ -67,7 +67,8 @@ def __init__(self, target_model: TModel): @property def name(self) -> str: if self._name is None: - raise nncf.InternalError("Internal error: name of the controller is not set!") + msg = "Internal error: name of the controller is not set!" + raise nncf.InternalError(msg) return self._name @property @@ -178,7 +179,8 @@ def get_compression_state(self) -> Dict[str, Any]: :return: The compression state. """ if self._builder_state is None: - raise nncf.InternalError("Internal error: builder state is not set for the controller") + msg = "Internal error: builder state is not set for the controller" + raise nncf.InternalError(msg) return {self.BUILDER_STATE: self._builder_state, self.CONTROLLER_STATE: self.get_state()} diff --git a/nncf/common/deprecation.py b/nncf/common/deprecation.py index ad8b36ea524..74ccd7e1264 100644 --- a/nncf/common/deprecation.py +++ b/nncf/common/deprecation.py @@ -67,7 +67,8 @@ def wrapped_init(*args: Any, **kwargs: Any) -> Any: return cast(TObj, obj) - raise TypeError("The @deprecated decorator can only be used on functions or classes.") + err_msg = "The @deprecated decorator can only be used on functions or classes." + raise TypeError(err_msg) return decorator diff --git a/nncf/common/factory.py b/nncf/common/factory.py index 976842f6b33..d3a05add911 100644 --- a/nncf/common/factory.py +++ b/nncf/common/factory.py @@ -61,10 +61,10 @@ def create(model: TModel) -> NNCFGraph: return model.build_graph() if isinstance(model, NNCFNetwork): return model.nncf.get_graph() - raise nncf.InternalError(f"Unexpected type of model {type(model)} for TORCH backend") - raise nncf.UnsupportedBackendError( - f"Cannot create backend-specific graph because {model_backend.value} is not supported!" - ) + msg = f"Unexpected type of model {type(model)} for TORCH backend" + raise nncf.InternalError(msg) + msg = f"Cannot create backend-specific graph because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) class ModelTransformerFactory: @@ -108,9 +108,8 @@ def create(model: TModel, inplace: bool = False) -> ModelTransformer[Any]: from nncf.experimental.torch.fx.model_transformer import FXModelTransformer return FXModelTransformer(cast(GraphModule, model)) - raise nncf.UnsupportedBackendError( - "Cannot create backend-specific model transformer because {} is not supported!".format(model_backend.value) - ) + msg = f"Cannot create backend-specific model transformer because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) class EngineFactory: @@ -146,9 +145,8 @@ def create(model: TModel) -> Engine: else: pt_model = cast(Module, model) return PTEngine(pt_model) - raise nncf.UnsupportedBackendError( - f"Cannot create backend-specific engine because {model_backend.value} is not supported!" - ) + msg = f"Cannot create backend-specific engine because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) class CommandCreatorFactory: @@ -171,9 +169,8 @@ def create(model: TModel) -> CommandCreator: return ONNXCommandCreator() - raise nncf.UnsupportedBackendError( - "Cannot create backend-specific command creator because {} is not supported!".format(model_backend.value) - ) + msg = f"Cannot create backend-specific command creator because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) class StatisticsAggregatorFactory: @@ -206,8 +203,5 @@ def create(model: TModel, dataset: Dataset) -> aggregator.StatisticsAggregator: from nncf.experimental.torch.fx.statistics.aggregator import FXStatisticsAggregator return FXStatisticsAggregator(dataset) - raise nncf.UnsupportedBackendError( - "Cannot create backend-specific statistics aggregator because {} is not supported!".format( - model_backend.value - ) - ) + msg = f"Cannot create backend-specific statistics aggregator because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) diff --git a/nncf/common/graph/graph.py b/nncf/common/graph/graph.py index 350bcb98265..7848f8d0edc 100644 --- a/nncf/common/graph/graph.py +++ b/nncf/common/graph/graph.py @@ -371,14 +371,12 @@ def get_input_edge_by_port_id(self, node: NNCFNode, port_id: int) -> NNCFGraphEd """ edges = [e for e in self.get_input_edges(node) if e.input_port_id == port_id] if len(edges) == 0: - raise nncf.ValidationError( - f"Node {node.node_name} does not contain input edge connected to {port_id} port ID." - ) + msg = f"Node {node.node_name} does not contain input edge connected to {port_id} port ID." + raise nncf.ValidationError(msg) if len(edges) > 1: - raise nncf.InternalError( - "Unsupported graph. More than one edge was found for a given node by the specified input port ID." - ) + msg = "Unsupported graph. More than one edge was found for a given node by the specified input port ID." + raise nncf.InternalError(msg) return edges[0] def get_output_edges(self, node: NNCFNode) -> List[NNCFGraphEdge]: @@ -507,7 +505,8 @@ def add_nncf_node( node_id = 0 if node_id in self._node_id_to_key_dict: - raise ValueError(f"NNCF node with id {node_id} is already in the NNCFGraph") + msg = f"NNCF node with id {node_id} is already in the NNCFGraph" + raise ValueError(msg) node_ids = self._node_name_to_node_id_map.setdefault(node_name, []) node_ids.append(node_id) @@ -587,7 +586,8 @@ def add_edge_between_nncf_nodes( err_reason = "cannot add edges *to* input nodes" if err_reason is not None: - raise ValueError(f"Cannot add edge from {from_node_key} to {to_node_key} - {err_reason}!") + msg = f"Cannot add edge from {from_node_key} to {to_node_key} - {err_reason}!" + raise ValueError(msg) attrs = { NNCFGraph.ACTIVATION_SHAPE_EDGE_ATTR: tensor_shape, @@ -690,9 +690,11 @@ def _get_graph_for_visualization(self) -> nx.DiGraph: def get_node_by_name(self, name: NNCFNodeName) -> NNCFNode: node_ids = self._node_name_to_node_id_map.get(name, None) if node_ids is None: - raise nncf.InternalError("Could not find a node {} in NNCFGraph!".format(name)) + msg = f"Could not find a node {name} in NNCFGraph!" + raise nncf.InternalError(msg) if len(node_ids) > 1: - raise nncf.InternalError(f"More than one node in NNCFGraph matches name {name}") + msg = f"More than one node in NNCFGraph matches name {name}" + raise nncf.InternalError(msg) node_key = f"{node_ids[0]} {name}" return self._nodes[node_key] @@ -745,7 +747,8 @@ def get_nncf_graph_pattern_io(self, match: List[str]) -> NNCFGraphPatternIO: elif to_node_key in match: input_nncf_edges.append(nncf_edge) else: - raise nncf.InternalError("Invalid graph expression supplied!") + msg = "Invalid graph expression supplied!" + raise nncf.InternalError(msg) return NNCFGraphPatternIO(input_nncf_edges, output_nncf_edges) diff --git a/nncf/common/graph/operator_metatypes.py b/nncf/common/graph/operator_metatypes.py index ae7f8311872..bedbb9cadc2 100644 --- a/nncf/common/graph/operator_metatypes.py +++ b/nncf/common/graph/operator_metatypes.py @@ -105,10 +105,11 @@ def wrap(obj: Type[OperatorMetatype]) -> Type[OperatorMetatype]: op_names = obj.get_all_aliases() for name in op_names: if name in self._op_name_to_op_meta_dict: - raise nncf.InternalError( + msg = ( "Inconsistent operator metatype registry - single patched " f"op name `{name}` maps to multiple metatypes!" ) + raise nncf.InternalError(msg) self._op_name_to_op_meta_dict[name] = obj return obj diff --git a/nncf/common/graph/patterns/manager.py b/nncf/common/graph/patterns/manager.py index 709e2e630d8..1a8f0a0d7ff 100644 --- a/nncf/common/graph/patterns/manager.py +++ b/nncf/common/graph/patterns/manager.py @@ -52,7 +52,8 @@ def _get_backend_hw_patterns_map(backend: BackendType) -> Dict[HWFusedPatternNam registry = cast(Dict[HWFusedPatternNames, Callable[[], GraphPattern]], PT_HW_FUSED_PATTERNS.registry_dict) return registry - raise ValueError(f"Hardware-fused patterns not implemented for {backend} backend.") + msg = f"Hardware-fused patterns not implemented for {backend} backend." + raise ValueError(msg) @staticmethod def _get_backend_ignored_patterns_map( @@ -82,7 +83,8 @@ def _get_backend_ignored_patterns_map( registry = cast(Dict[IgnoredPatternNames, Callable[[], GraphPattern]], PT_IGNORED_PATTERNS.registry_dict) return registry - raise ValueError(f"Ignored patterns not implemented for {backend} backend.") + msg = f"Ignored patterns not implemented for {backend} backend." + raise ValueError(msg) @staticmethod def _filter_patterns( diff --git a/nncf/common/graph/patterns/patterns.py b/nncf/common/graph/patterns/patterns.py index e86d44a519b..5bfacf69ee4 100644 --- a/nncf/common/graph/patterns/patterns.py +++ b/nncf/common/graph/patterns/patterns.py @@ -46,7 +46,8 @@ def register(self, pattern: "GraphPattern", name: str, match: bool = True) -> No :param match: whether should the pattern used as fussing pattern """ if name in self._patterns_dict: - raise KeyError("{} is already registered".format(name)) + msg = f"{name} is already registered" + raise KeyError(msg) self._patterns_dict[name] = pattern if match: self._full_pattern_graph.add_pattern_alternative(pattern) @@ -260,7 +261,8 @@ def merge_two_types_of_operations(first_op: Dict[str, Any], second_op: Dict[str, res[GraphPattern.METATYPE_ATTR].extend(second_op[GraphPattern.METATYPE_ATTR]) res[GraphPattern.LABEL_ATTR] = label return res - raise nncf.InternalError("Incorrect dicts of operations") + msg = "Incorrect dicts of operations" + raise nncf.InternalError(msg) @dataclass diff --git a/nncf/common/hardware/config.py b/nncf/common/hardware/config.py index 799261ced0c..5ec7b819983 100644 --- a/nncf/common/hardware/config.py +++ b/nncf/common/hardware/config.py @@ -141,7 +141,8 @@ def get_quantization_mode_from_config_value(str_val: str) -> QuantizationMode: return QuantizationMode.SYMMETRIC if str_val == "asymmetric": return QuantizationMode.ASYMMETRIC - raise nncf.ValidationError("Invalid quantization type specified in HW config") + msg = "Invalid quantization type specified in HW config" + raise nncf.ValidationError(msg) @staticmethod def get_is_per_channel_from_config_value(str_val: str) -> bool: @@ -149,7 +150,8 @@ def get_is_per_channel_from_config_value(str_val: str) -> bool: return True if str_val == "pertensor": return False - raise nncf.ValidationError("Invalid quantization granularity specified in HW config") + msg = "Invalid quantization granularity specified in HW config" + raise nncf.ValidationError(msg) @staticmethod def get_qconf_from_hw_config_subdict(quantization_subdict: Dict[str, Any]) -> QuantizerConfig: @@ -204,8 +206,8 @@ def get_metatype_vs_quantizer_configs_map( metatypes = self._get_metatypes_for_hw_config_op(hw_config_op_name) if not metatypes: nncf_logger.debug( - "Operation name {} in HW config is not registered in NNCF under any supported operation " - "metatype - will be ignored".format(hw_config_op_name) + f"Operation name {hw_config_op_name} in HW config is not registered in NNCF" + " under any supported operation metatype - will be ignored" ) if self.QUANTIZATION_ALGORITHM_NAME in op_dict: @@ -239,8 +241,8 @@ def _get_operations_with_attribute_values( metatypes = self._get_metatypes_for_hw_config_op(hw_config_op_name) if not metatypes: nncf_logger.debug( - "Operation name {} in HW config is not registered in NNCF under any supported " - "operation metatype - will be ignored".format(hw_config_op_name) + f"Operation name {hw_config_op_name} in HW config is not registered in NNCF" + " under any supported operation metatype - will be ignored" ) result.update(metatypes) return result @@ -258,7 +260,7 @@ def _get_metatypes_for_hw_config_op(self, hw_config_op: HWConfigOpName) -> Set[T metatypes.add(op_meta) if not metatypes: nncf_logger.debug( - "Operation name {} in HW config is not registered in NNCF under any supported " - "operation metatype - will be ignored".format(hw_config_op) + f"Operation name {hw_config_op} in HW config is not registered in NNCF under any supported " + "operation metatype - will be ignored" ) return metatypes diff --git a/nncf/common/initialization/batchnorm_adaptation.py b/nncf/common/initialization/batchnorm_adaptation.py index febaef61bf7..eb51ddd7e58 100644 --- a/nncf/common/initialization/batchnorm_adaptation.py +++ b/nncf/common/initialization/batchnorm_adaptation.py @@ -70,7 +70,8 @@ def __init__(self, data_loader: NNCFDataLoader, num_bn_adaptation_samples: int, of the model parameters will be used. """ if num_bn_adaptation_samples < 0: - raise ValueError("Number of adaptation samples must be >= 0") + msg = "Number of adaptation samples must be >= 0" + raise ValueError(msg) self._device = device self._data_loader = data_loader diff --git a/nncf/common/logging/progress_bar.py b/nncf/common/logging/progress_bar.py index fb985ba5c88..2b27404f6e6 100644 --- a/nncf/common/logging/progress_bar.py +++ b/nncf/common/logging/progress_bar.py @@ -91,8 +91,4 @@ def _print_next(self) -> None: num_empty = self._width - num_filled filled = "ā–ˆ" * num_filled empty = " " * num_empty - self._logger.info( - "{desc} |{filled}{empty}| {index} / {total}".format( - desc=self._desc, filled=filled, empty=empty, index=self._index, total=self._total - ) - ) + self._logger.info(f"{self._desc} |{filled}{empty}| {self._index} / {self._total}") diff --git a/nncf/common/logging/track_progress.py b/nncf/common/logging/track_progress.py index 7c28284074c..c46cb45c802 100644 --- a/nncf/common/logging/track_progress.py +++ b/nncf/common/logging/track_progress.py @@ -103,7 +103,8 @@ def weighted_advance(task: Task, advance: float) -> float: Perform weighted advancement based on an integer step value. """ if advance % 1 != 0: - raise Exception(f"Unexpected `advance` value: {advance}.") + msg = f"Unexpected `advance` value: {advance}." + raise Exception(msg) advance = int(advance) current_step: int = task.fields["completed_steps"] weighted_advance: float = sum(task.fields["weights"][current_step : current_step + advance]) @@ -116,7 +117,8 @@ def get_weighted_completed(task: Task, completed: float) -> float: Get weighted `completed` corresponding to an integer `completed` field. """ if completed % 1 != 0: - raise Exception(f"Unexpected `completed` value: {completed}.") + msg = f"Unexpected `completed` value: {completed}." + raise Exception(msg) return float(sum(task.fields["weights"][: int(completed)])) @@ -227,7 +229,8 @@ def __init__( def __iter__(self) -> Iterator[ProgressType]: if self.sequence is None: - raise RuntimeError("__iter__ called without set sequence.") + msg = "__iter__ called without set sequence." + raise RuntimeError(msg) with self: yield from self.progress.track( self.sequence, @@ -254,5 +257,6 @@ def __exit__(self, *args: Any) -> None: def update(self, advance: float, **kwargs: Any) -> None: if self.task is None: - raise RuntimeError("update is available only inside context manager.") + msg = "update is available only inside context manager." + raise RuntimeError(msg) self.progress.update(self.task, advance=advance, **kwargs) diff --git a/nncf/common/pruning/clusterization.py b/nncf/common/pruning/clusterization.py index eed1d0466ed..83aa7fb156a 100644 --- a/nncf/common/pruning/clusterization.py +++ b/nncf/common/pruning/clusterization.py @@ -55,7 +55,8 @@ def get_cluster_by_id(self, cluster_id: int) -> Cluster[T]: :return: Cluster according to provided `cluster_id`. """ if cluster_id not in self.clusters: - raise IndexError("No cluster with id = {}".format(cluster_id)) + msg = f"No cluster with id = {cluster_id}" + raise IndexError(msg) return self.clusters[cluster_id] def get_cluster_containing_element(self, element_id: Hashable) -> Cluster[T]: @@ -66,7 +67,8 @@ def get_cluster_containing_element(self, element_id: Hashable) -> Cluster[T]: :return: Cluster containing element with provided `element_id`. """ if element_id not in self._element_to_cluster: - raise IndexError("No cluster for node with id = {}".format(element_id)) + msg = f"No cluster for node with id = {element_id}" + raise IndexError(msg) return self.get_cluster_by_id(self._element_to_cluster[element_id]) def is_node_in_clusterization(self, node_id: int) -> bool: @@ -86,7 +88,8 @@ def add_cluster(self, cluster: Cluster[T]) -> None: """ cluster_id = cluster.id if cluster_id in self.clusters: - raise IndexError("Cluster with index = {} already exist".format(cluster_id)) + msg = f"Cluster with index = {cluster_id} already exist" + raise IndexError(msg) self.clusters[cluster_id] = cluster for elt in cluster.elements: self._element_to_cluster[self._id_fn(elt)] = cluster_id # type: ignore[no-untyped-call] @@ -98,7 +101,8 @@ def delete_cluster(self, cluster_id: int) -> None: :param cluster_id: Id of a cluster to delete. """ if cluster_id not in self.clusters: - raise IndexError("No cluster with index = {} to delete".format(cluster_id)) + msg = f"No cluster with index = {cluster_id} to delete" + raise IndexError(msg) for elt in self.clusters[cluster_id].elements: node_id = self._id_fn(elt) # type: ignore[no-untyped-call] self._element_to_cluster.pop(node_id) diff --git a/nncf/common/pruning/schedulers.py b/nncf/common/pruning/schedulers.py index 9ce004f73e7..742a141bd05 100644 --- a/nncf/common/pruning/schedulers.py +++ b/nncf/common/pruning/schedulers.py @@ -63,7 +63,8 @@ def _calculate_pruning_level(self) -> float: :return: Pruning level that should be applied to the model. """ - raise NotImplementedError("PruningScheduler implementation must override _calculate_pruning_level method.") + msg = "PruningScheduler implementation must override _calculate_pruning_level method." + raise NotImplementedError(msg) def epoch_step(self, next_epoch: Optional[int] = None) -> None: """ diff --git a/nncf/common/pruning/shape_pruning_processor.py b/nncf/common/pruning/shape_pruning_processor.py index 4ef41d98a1d..69bf4900e2c 100644 --- a/nncf/common/pruning/shape_pruning_processor.py +++ b/nncf/common/pruning/shape_pruning_processor.py @@ -170,7 +170,8 @@ def _get_next_node_sparse_multiplier( if mask_producer.id in cluster_nodes_idxs: return mask_producer.sparse_multiplier - raise nncf.ValidationError(f"Next node for cluster {cluster.elements} doesn't have closing mask") + msg = f"Next node for cluster {cluster.elements} doesn't have closing mask" + raise nncf.ValidationError(msg) def get_next_nodes( self, graph: NNCFGraph, pruning_groups: Clusterization[PrunedLayerInfoBase] diff --git a/nncf/common/pruning/symbolic_mask.py b/nncf/common/pruning/symbolic_mask.py index 9c7ad4a4004..9397c833177 100644 --- a/nncf/common/pruning/symbolic_mask.py +++ b/nncf/common/pruning/symbolic_mask.py @@ -111,7 +111,8 @@ def concatenate(cls, tensors: List[SymbolicMask], axis: int) -> SymbolicMask: # def ones(cls, shape: Union[int, List[int]], device) -> SymbolicMask: # type: ignore if isinstance(shape, list): if len(shape) != 1: - raise nncf.ValidationError(f"Unexpected shape = {shape} for 1D symbolic mask") + msg = f"Unexpected shape = {shape} for 1D symbolic mask" + raise nncf.ValidationError(msg) shape = shape[0] return SymbolicMask(shape) @@ -149,10 +150,11 @@ def elementwise_mask_propagation(cls, input_masks: List[SymbolicMask]) -> Symbol @classmethod def split(cls, tensor: SymbolicMask, output_shapes: List[int]) -> List[SymbolicMask]: # type: ignore if any(shape <= 0 for shape in output_shapes) or tensor.shape[0] != sum(output_shapes): - raise AssertionError( + msg = ( "Symbolic mask split was called with" f"invalid parammeters: input mask shape: {tensor.shape[0]}, output masks shapes: {output_shapes}" ) + raise AssertionError(msg) producers = tensor.mask_producers return [SymbolicMask(output_shape, producers) for output_shape in output_shapes] diff --git a/nncf/common/pruning/utils.py b/nncf/common/pruning/utils.py index b295bc9aca6..02cda468396 100644 --- a/nncf/common/pruning/utils.py +++ b/nncf/common/pruning/utils.py @@ -394,7 +394,8 @@ def get_input_channels(node: NNCFNode) -> int: return layer_attrs.in_channels if isinstance(layer_attrs, LinearLayerAttributes): return layer_attrs.in_features - raise nncf.InternalError(f"Can't get count of input channels from node {node}") + msg = f"Can't get count of input channels from node {node}" + raise nncf.InternalError(msg) def get_output_channels(node: NNCFNode) -> int: @@ -409,7 +410,8 @@ def get_output_channels(node: NNCFNode) -> int: return layer_attrs.out_channels if isinstance(layer_attrs, LinearLayerAttributes): return layer_attrs.out_features - raise nncf.InternalError(f"Can't get count of output channels from node {node}") + msg = f"Can't get count of output channels from node {node}" + raise nncf.InternalError(msg) def identity_mask_propagation(node: NNCFNode, graph: NNCFGraph) -> None: diff --git a/nncf/common/pruning/weights_flops_calculator.py b/nncf/common/pruning/weights_flops_calculator.py index 69df80ad92a..2bdb7a9d87d 100644 --- a/nncf/common/pruning/weights_flops_calculator.py +++ b/nncf/common/pruning/weights_flops_calculator.py @@ -113,7 +113,8 @@ def count_flops_and_weights_per_node( continue layer_attr = node.layer_attributes if not isinstance(layer_attr, ConvolutionLayerAttributes): - raise nncf.InternalError(f"Unexpected layer attributes type for convolution layer: {type(layer_attr)}") + msg = f"Unexpected layer attributes type for convolution layer: {type(layer_attr)}" + raise nncf.InternalError(msg) num_in_channels = input_channels.get(name, layer_attr.in_channels) num_out_channels = output_channels.get(name, layer_attr.out_channels) kernel_size = kernel_sizes.get(name, layer_attr.kernel_size) @@ -145,7 +146,8 @@ def count_flops_and_weights_per_node( layer_attr = node.layer_attributes if not isinstance(layer_attr, LinearLayerAttributes): - raise nncf.InternalError(f"Unexpected layer attributes type for linear layer: {type(layer_attr)}") + msg = f"Unexpected layer attributes type for linear layer: {type(layer_attr)}" + raise nncf.InternalError(msg) num_in_features = input_channels.get(name, layer_attr.in_features) num_out_features = output_channels.get(name, layer_attr.out_features) diff --git a/nncf/common/quantization/initialization/range.py b/nncf/common/quantization/initialization/range.py index c21ec82b5c5..ff53ccc9427 100644 --- a/nncf/common/quantization/initialization/range.py +++ b/nncf/common/quantization/initialization/range.py @@ -56,7 +56,8 @@ def __eq__(self, other: object) -> bool: def from_dict(cls, dct: Dict[str, Any]) -> RangeInitConfig: num_init_samples = dct.get("num_init_samples", NUM_INIT_SAMPLES) if num_init_samples < 0: - raise ValueError("Number of initialization samples must be >= 0") + msg = "Number of initialization samples must be >= 0" + raise ValueError(msg) return cls(dct.get("type", "mixed_min_max"), num_init_samples, dct.get("params")) @@ -91,10 +92,11 @@ def __init__( range_init_config.init_type, range_init_config.num_init_samples, range_init_config.init_type_specific_params ) if target_scopes is None and ignored_scopes is None: - raise ValueError( + msg = ( "At least one of the (target_scopes, ignored_scopes) should be specified" " for a per-layer range init config!" ) + raise ValueError(msg) self.target_scopes = target_scopes self.ignored_scopes = ignored_scopes self.target_group = target_quantizer_group diff --git a/nncf/common/quantization/quantizer_propagation/graph.py b/nncf/common/quantization/quantizer_propagation/graph.py index bb1b217582e..b0a95ce26fe 100644 --- a/nncf/common/quantization/quantizer_propagation/graph.py +++ b/nncf/common/quantization/quantizer_propagation/graph.py @@ -223,7 +223,8 @@ def ipg_node_type_to_qpsg_node_type( return QuantizerPropagationStateGraphNodeType.POST_HOOK if ipg_node_type == InsertionPointGraphNodeType.OPERATOR: return QuantizerPropagationStateGraphNodeType.OPERATOR - raise nncf.ValidationError("Invalid insertion point graph node type.") + msg = "Invalid insertion point graph node type." + raise nncf.ValidationError(msg) @staticmethod def get_barrier_node_key(node_key: str) -> str: @@ -255,10 +256,11 @@ def mark_act_quantizer_as_dependent_on_weights(self, pq: PropagatingQuantizer, o pq in self._pqs_after_weight_dependent_output_quantized_nodes and self._pqs_after_weight_dependent_output_quantized_nodes[pq] != operator_node_key ): - raise nncf.InternalError( + msg = ( f"Propagating quantizer {pq.id} is already marked as depending on node " f"{operator_node_key} weight quantization!" ) + raise nncf.InternalError(msg) self._pqs_after_weight_dependent_output_quantized_nodes[pq] = operator_node_key @staticmethod @@ -337,10 +339,8 @@ def merge_quantizer_into_path(self, prop_quantizer: PropagatingQuantizer, path: affecting_quantizers.append(pq) self.remove_propagating_quantizer(prop_quantizer) else: - raise nncf.InternalError( - "Surviving_quantizers not found !" - " Nodes quantized with quantizer #{} will be lost".format(prop_quantizer.id) - ) + msg = f"Surviving_quantizers not found! Nodes quantized with quantizer #{prop_quantizer.id} will be lost" + raise nncf.InternalError(msg) @staticmethod def _get_major_unified_scale_type(type_list: List[Optional[UnifiedScaleType]]) -> Optional[UnifiedScaleType]: @@ -378,7 +378,8 @@ def merge_quantizers_for_branching_node( pre_hook_ip = edge_from_pre_hook_ip_to_op[0] target_ip_node_keys.append(pre_hook_ip) else: - raise nncf.InternalError("Unsupported branching QPSG node type: {}".format(branching_node_type)) + msg = f"Unsupported branching QPSG node type: {branching_node_type}" + raise nncf.InternalError(msg) if not target_ip_node_keys: return [] @@ -428,7 +429,8 @@ def merge_quantizers_for_branching_node( if merge_gid is not None: self._unified_scale_group_manager.add_to_group(merge_gid, merge_pq) else: - raise nncf.InternalError("Unsupported target type for merge PQ insertion: {}".format(target_type)) + msg = f"Unsupported target type for merge PQ insertion: {target_type}" + raise nncf.InternalError(msg) merge_pqs.append(merge_pq) @@ -438,7 +440,8 @@ def merge_quantizers_for_branching_node( if branch_qconf_list is None and pq.unified_scale_type is not None: gid = self._unified_scale_group_manager.get_group_id_by_propagating_quantizer_id(pq.id) if gid is None: - raise nncf.InternalError("gid is None") + msg = "gid is None" + raise nncf.InternalError(msg) unified_scale_gids_to_merge.add(gid) if unified_scale_gids_to_merge: @@ -575,7 +578,8 @@ def add_propagating_quantizer( if ip_type != QuantizerPropagationStateGraphNodeType.PRE_HOOK: # The insertion point key should immediately precede a quantizable op, # otherwise it is hard to determine affected node here (although possible) - raise nncf.InternalError("Can only add propagating quantizers into pre-hook spots!") + msg = "Can only add propagating quantizers into pre-hook spots!" + raise nncf.InternalError(msg) prop_quantizer = PropagatingQuantizer( self._get_next_prop_quantizer_id(), qconf_list, ip_node_key, unified_scale_type @@ -615,15 +619,13 @@ def _verify_nodes_and_edges_for_pq(self, prop_quantizer: PropagatingQuantizer) - for node_key in node_keys_to_verify: if node_key not in self.nodes: - raise nncf.InternalError( - "Unknown node referenced by propagating quantizer to be registered: {}".format(node_key) - ) + msg = f"Unknown node referenced by propagating quantizer to be registered: {node_key}" + raise nncf.InternalError(msg) edge_keys_to_verify = list(prop_quantizer.affected_edges) + list(prop_quantizer.propagation_path) for edge_key in edge_keys_to_verify: if edge_key not in self.edges: - raise nncf.InternalError( - "Unknown edge referenced by propagating quantizer to be registered: {}".format(edge_key) - ) + msg = f"Unknown edge referenced by propagating quantizer to be registered: {edge_key}" + raise nncf.InternalError(msg) @staticmethod def _verify_qconfig_matching( @@ -631,37 +633,39 @@ def _verify_qconfig_matching( ) -> None: for existing_pq in existing_prop_quantizers: if existing_pq.potential_quant_configs != prop_quantizer.potential_quant_configs: - raise nncf.InternalError( + msg = ( "Configurations of the quantizer to be registered are conflicting with " - "existing quantizer {}".format(existing_pq.id) + f"existing quantizer {existing_pq.id}" ) + raise nncf.InternalError(msg) def register_propagating_quantizer(self, prop_quantizer: PropagatingQuantizer) -> None: """Will only succeed if the new quantizer information is consistent with the rest of the graph state.""" all_pqs = self.collect_all_propagating_quantizers() for existing_pq_id in all_pqs: if prop_quantizer.id == existing_pq_id: - raise nncf.InternalError( + msg = ( "The propagating quantizer to be registered has an ID that is already assigned to " "an existing propagating quantizer!" ) + raise nncf.InternalError(msg) target_node = self.nodes[prop_quantizer.current_location_node_key] pq_in_target_node = target_node[QuantizerPropagationStateGraph.PROPAGATING_QUANTIZER_NODE_ATTR] if pq_in_target_node is not None: - raise nncf.InternalError( + msg = ( "The propagating quantizer to be registered is occupying the same position " - "as an existing propagating quantizer {}!".format(pq_in_target_node.id) + f"as an existing propagating quantizer {pq_in_target_node.id}!" ) + raise nncf.InternalError(msg) target_node_affecting_quantizers = target_node[ QuantizerPropagationStateGraph.AFFECTING_PROPAGATING_QUANTIZERS_ATTR ] if target_node_affecting_quantizers: - raise nncf.InternalError( - "Cannot register a propagating quantizer into a node that is already " - "affected by existing propagating quantizers (ids: {})!".format( - [pq.id for pq in target_node_affecting_quantizers] - ) + msg = ( + "Cannot register a propagating quantizer into a node that is already affected by existing" + f" propagating quantizers (ids: {[pq.id for pq in target_node_affecting_quantizers]})!" ) + raise nncf.InternalError(msg) self._verify_nodes_and_edges_for_pq(prop_quantizer) @@ -973,19 +977,19 @@ def get_visualized_graph(self) -> nx.DiGraph: insertion_point_data: TargetPoint = node[ QuantizerPropagationStateGraph.QUANT_INSERTION_POINT_DATA_NODE_ATTR ] - label = "TP: {}".format(str(insertion_point_data)) + label = f"TP: {str(insertion_point_data)}" out_graph.add_node(node_key, label=label, color="red") if node[QuantizerPropagationStateGraph.PROPAGATING_QUANTIZER_NODE_ATTR] is not None: prop_quantizer: PropagatingQuantizer = node[ QuantizerPropagationStateGraph.PROPAGATING_QUANTIZER_NODE_ATTR ] - quant_node_key = "Quantizer #{}".format(prop_quantizer.id) + quant_node_key = f"Quantizer #{prop_quantizer.id}" if prop_quantizer.potential_quant_configs: quant_configs_str_list = [str(conf) for conf in prop_quantizer.potential_quant_configs] else: quant_configs_str_list = ["!!! NONE !!!]"] sub_label = "[" + ",\n".join(quant_configs_str_list) + "]" - quant_node_label = quant_node_key + "\n" + "T: {}\n".format(sub_label) + quant_node_label = quant_node_key + "\n" + f"T: {sub_label}\n" quant_node_label += "Q-input sink ops: {}".format( "\n".join(prop_quantizer.quantized_input_sink_operator_nodes) ) @@ -1011,7 +1015,8 @@ def get_visualized_graph(self) -> nx.DiGraph: elif node_type == QuantizerPropagationStateGraphNodeType.AUXILIARY_BARRIER: out_graph.add_node(node_key, color="green", label=node["label"]) else: - raise nncf.InternalError("Invalid QuantizerPropagationStateGraph node!") + msg = "Invalid QuantizerPropagationStateGraph node!" + raise nncf.InternalError(msg) for u, v in self.edges: edge = self.edges[u, v] attrs = {} @@ -1043,7 +1048,7 @@ def get_visualized_graph(self) -> nx.DiGraph: next_pq_node_key, arrowhead="none", style="dotted", - label="Unified group {}".format(gid), + label=f"Unified group {gid}", ) return out_graph @@ -1362,7 +1367,7 @@ def create_quantizer_setup( # has information on which operation accepts recurrent inputs. nncf_logger.debug( "Could not find an associated input activation quantizer " - "for a weighted node with quantizable weights: {}\n".format(weighted_node_name) + f"for a weighted node with quantizable weights: {weighted_node_name}\n" ) else: associated_same_op_gid = qm_node_vs_same_op_gid[weighted_node_name] diff --git a/nncf/common/quantization/quantizer_propagation/grouping.py b/nncf/common/quantization/quantizer_propagation/grouping.py index 64f30fbb5d3..c0299532c89 100644 --- a/nncf/common/quantization/quantizer_propagation/grouping.py +++ b/nncf/common/quantization/quantizer_propagation/grouping.py @@ -39,9 +39,7 @@ def register_group(self, prop_quants: Set[PropagatingQuantizer]) -> int: """ for pq in prop_quants: for gid, group in self._group_vs_prop_quants_dict.items(): - assert pq not in group, "Propagating quantizer #{} is already registered in a group {}!".format( - pq.id, gid - ) + assert pq not in group, f"Propagating quantizer #{pq.id} is already registered in a group {gid}!" gid = self._get_next_gid() self._group_vs_prop_quants_dict[gid] = prop_quants return gid @@ -57,8 +55,8 @@ def add_to_group(self, target_gid: int, prop_quant: PropagatingQuantizer) -> Non for gid, group in self._group_vs_prop_quants_dict.items(): if target_gid != gid: assert prop_quant not in group, ( - "Tried to add propagating quantizer #{} to group #{}, " - "but it is already registered in a group {}!".format(prop_quant.id, target_gid, gid) + f"Tried to add propagating quantizer #{prop_quant.id} to group #{target_gid}, " + f"but it is already registered in a group {gid}!" ) self._group_vs_prop_quants_dict[target_gid].add(prop_quant) diff --git a/nncf/common/quantization/quantizer_propagation/solver.py b/nncf/common/quantization/quantizer_propagation/solver.py index 604e46fefcd..f7df16d8d9c 100644 --- a/nncf/common/quantization/quantizer_propagation/solver.py +++ b/nncf/common/quantization/quantizer_propagation/solver.py @@ -126,9 +126,8 @@ def constrain_quantizer_config_list_for_insertion( """ prior_list = self.quantizer_setup.quantization_points[quantization_point_id].possible_qconfigs if not all(qc in prior_list for qc in constrained_config_list): - raise nncf.InternalError( - "Constrained config list is incompatible with the result of the quantizer propagation!" - ) + msg = "Constrained config list is incompatible with the result of the quantizer propagation!" + raise nncf.InternalError(msg) # TODO (vshampor): only allow to constrain 'input-group'-wise? self.quantizer_setup.quantization_points[quantization_point_id].possible_qconfigs = constrained_config_list @@ -175,10 +174,11 @@ def is_final_qconfig_compatible_to_initial(initial_qconfig: QuantizerConfig) -> ) ) if not compatible_initial_qconfs: - raise nncf.InternalError( + msg = ( "The final quantizer setup has configurations that were not present in the " "initial proposal!" ) + raise nncf.InternalError(msg) if final_qconfig.signedness_to_force is None: initial_qconfs_signedness_values = {qc.signedness_to_force for qc in compatible_initial_qconfs} if None not in initial_qconfs_signedness_values and len(initial_qconfs_signedness_values) == 1: @@ -583,7 +583,8 @@ def get_final_quantizer_setup( if Counter(final_weight_quantizable_node_names_vs_qconfig_dict.keys()) != Counter( self._weight_quantizable_node_names_vs_qconfigs.keys() ): - raise nncf.InternalError("Final weight quantizer setup is inconsistent with initial solver assumptions!") + msg = "Final weight quantizer setup is inconsistent with initial solver assumptions!" + raise nncf.InternalError(msg) multi_setup_with_one_config_per_point = quant_prop_graph.create_quantizer_setup( final_weight_quantizable_node_names_vs_qconfig_dict @@ -914,9 +915,9 @@ def debug_visualize(self, quant_prop_graph: QuantizerPropagationStateGraph, dump if self._active_propagating_quantizers_queue: next_id_str = str(self._active_propagating_quantizers_queue[-1].id) out_graph.graph["graph"] = { - "label": "Propagating quantizers: {}\n" - "Next quantizer to be propagated: {}\n" - "Finished quantizers: {}".format(active_ids_str, next_id_str, finished_ids_str), + "label": f"Propagating quantizers: {active_ids_str}\n" + f"Next quantizer to be propagated: {next_id_str}\n" + f"Finished quantizers: {finished_ids_str}", "labelloc": "t", } pth = deepcopy(dump_path) @@ -1008,18 +1009,16 @@ def coalesce_insertion_points( ) ) if len(matching_indices) == 0: - raise nncf.ValidationError( - "No match for linked quantizer entry {} among activation quantizers!".format( - group_member_node_name - ) - ) + msg = f"No match for linked quantizer entry {group_member_node_name} among activation quantizers!" + raise nncf.ValidationError(msg) for target_idx in matching_indices: if target_idx in insertion_point_indices_vs_group_id: - raise nncf.InternalError( - "Linked activation quantizer groups {} and {} " - "overlap!".format(group_idx, insertion_point_indices_vs_group_id[target_idx]) + msg = ( + f"Linked activation quantizer groups {group_idx} and " + f" {insertion_point_indices_vs_group_id[target_idx]} overlap!" ) + raise nncf.InternalError(msg) for target_idx in matching_indices: insertion_point_indices_vs_group_id[target_idx] = group_idx @@ -1117,11 +1116,12 @@ def _setup_initial_quantizers_for_operator_node( op_meta_name = metatype.__class__.__name__ if len(per_tensor_qconf_list) != len(qconf_list): if not per_tensor_qconf_list: - raise nncf.InternalError( + msg = ( "Unified scales currently do not support per-channel configuration - dropping" - "per-channel configuration options for {} resulted in no valid quantization " - "configs!".format(op_meta_name) + f"per-channel configuration options for {op_meta_name} resulted in no valid quantization " + "configs!" ) + raise nncf.InternalError(msg) nncf_logger.warning( f"Unified scales currently do not support per-channel configuration - dropping" f"per-channel configuration options for {op_meta_name}" @@ -1424,7 +1424,8 @@ def compatible_wo_requant(qconf: QuantizerConfig, other_qconf_list: List[Quantiz elif self._propagation_strategy == QuantizerPropagationRule.MERGE_ALL_IN_ONE: compatible_fn = compatible_wo_requant else: - raise nncf.ValidationError(f"Unknown propagation strategy: {self._propagation_strategy}") + msg = f"Unknown propagation strategy: {self._propagation_strategy}" + raise nncf.ValidationError(msg) for qconf in qconfigs_union: if all(compatible_fn(qconf, qconf_list) for qconf_list in potential_qconfigs_for_each_branch): diff --git a/nncf/common/quantization/quantizer_propagation/visualizer.py b/nncf/common/quantization/quantizer_propagation/visualizer.py index b39b392b5af..d0888cc3cc8 100644 --- a/nncf/common/quantization/quantizer_propagation/visualizer.py +++ b/nncf/common/quantization/quantizer_propagation/visualizer.py @@ -29,5 +29,5 @@ def visualize_quantizer_propagation( self, prop_solver: QuantizerPropagationSolver, prop_graph: QuantizerPropagationStateGraph, iteration: str ) -> None: self.dump_dir.mkdir(parents=True, exist_ok=True) - fname = "quant_prop_iter_{}.dot".format(iteration) + fname = f"quant_prop_iter_{iteration}.dot" prop_solver.debug_visualize(prop_graph, str(self.dump_dir / Path(fname))) diff --git a/nncf/common/quantization/quantizer_setup.py b/nncf/common/quantization/quantizer_setup.py index 3545366c6ee..377b0d4c5c0 100644 --- a/nncf/common/quantization/quantizer_setup.py +++ b/nncf/common/quantization/quantizer_setup.py @@ -220,10 +220,10 @@ def select_qconfig(self, qconfig: QuantizerConfig) -> SingleConfigQuantizationPo qconfig_any = deepcopy(qconfig) qconfig_any.signedness_to_force = None if qconfig_any not in self.possible_qconfigs: - raise ValueError( - "Invalid selection for a quantizer config - " - "tried to select {} among [{}]".format(qconfig, ",".join([str(q) for q in self.possible_qconfigs])) + msg = "Invalid selection for a quantizer config - " "tried to select {} among [{}]".format( + qconfig, ",".join([str(q) for q in self.possible_qconfigs]) ) + raise ValueError(msg) qconfig = qconfig_any return SingleConfigQuantizationPoint(self.insertion_point, qconfig, self.directly_quantized_operator_node_names) @@ -253,7 +253,8 @@ def register_unified_scale_group(self, qp_group: List[QuantizationPointId]) -> i for qp_id in qp_group: usg_id = self.get_unified_scale_group_id(qp_id) if usg_id is not None: - raise nncf.InternalError(f"QP id {qp_id} is already in unified scale group {usg_id}") + msg = f"QP id {qp_id} is already in unified scale group {usg_id}" + raise nncf.InternalError(msg) gid = self._next_unified_scale_gid self.unified_scale_groups[self._next_unified_scale_gid] = set(qp_group) self._next_unified_scale_gid += 1 @@ -263,7 +264,8 @@ def register_shared_inputs_group(self, qp_group: List[QuantizationPointId]) -> i for qp_id in qp_group: usg_id = self.get_shared_inputs_group_id(qp_id) if usg_id is not None: - raise nncf.InternalError(f"QP id {qp_id} is already in unified scale group {usg_id}") + msg = f"QP id {qp_id} is already in unified scale group {usg_id}" + raise nncf.InternalError(msg) gid = self._next_shared_inputs_gid self.shared_input_operation_set_groups[self._next_shared_inputs_gid] = set(qp_group) self._next_shared_inputs_gid += 1 @@ -311,13 +313,15 @@ def register_existing_qp_id_in_unified_scale_group( ) -> None: gid = self.get_unified_scale_group_id(qp_id) if gid is not None: - raise nncf.InternalError("QP id {} is already in unified scale group {}".format(qp_id, gid)) + msg = f"QP id {qp_id} is already in unified scale group {gid}" + raise nncf.InternalError(msg) self.unified_scale_groups[unified_scale_gid].add(qp_id) def register_existing_qp_id_in_shared_input_group(self, qp_id: QuantizationPointId, shared_inputs_gid: int) -> None: gid = self.get_shared_inputs_group_id(qp_id) if gid is not None: - raise nncf.InternalError("QP id {} is already in shared inputs group {}".format(qp_id, gid)) + msg = f"QP id {qp_id} is already in shared inputs group {gid}" + raise nncf.InternalError(msg) self.shared_input_operation_set_groups[shared_inputs_gid].add(qp_id) def remove_unified_scale_from_point(self, qp_id: QuantizationPointId) -> None: @@ -468,10 +472,11 @@ def select_qconfigs( retval.shared_input_operation_set_groups = deepcopy(self.shared_input_operation_set_groups) if Counter(qp_id_vs_selected_qconfig_dict.keys()) != Counter(self.quantization_points.keys()): - raise ValueError( + msg = ( "The set of quantization points for a selection is inconsistent with quantization" "points in the quantizer setup!" ) + raise ValueError(msg) for qp_id, qp in self.quantization_points.items(): if strict: retval.quantization_points[qp_id] = qp.select_qconfig(qp_id_vs_selected_qconfig_dict[qp_id]) diff --git a/nncf/common/quantization/structs.py b/nncf/common/quantization/structs.py index 3a7cdf04728..954f6fa3b15 100644 --- a/nncf/common/quantization/structs.py +++ b/nncf/common/quantization/structs.py @@ -204,9 +204,8 @@ def __init__(self, **kwargs: Any) -> None: """ for attr_name in kwargs: if not hasattr(QuantizationConstraints.REF_QCONF_OBJ, attr_name): - raise nncf.ValidationError( - "Invalid constraint - QuantizerConfig has no attribute '{}'".format(attr_name) - ) + msg = f"Invalid constraint - QuantizerConfig has no attribute '{attr_name}'" + raise nncf.ValidationError(msg) self.qconf_attr_vs_constraint_dict = kwargs def apply_constraints_to(self, qconfig: QuantizerConfig) -> QuantizerConfig: @@ -321,7 +320,7 @@ def get_base(self) -> str: return self.target_node_name def get_suffix(self) -> str: - return "|OUTPUT" if self.input_port_id is None else "|INPUT{}".format(self.input_port_id) + return "|OUTPUT" if self.input_port_id is None else f"|INPUT{self.input_port_id}" class UnifiedScaleType(Enum): diff --git a/nncf/common/schedulers.py b/nncf/common/schedulers.py index 44847662aad..a7c83fd55cf 100644 --- a/nncf/common/schedulers.py +++ b/nncf/common/schedulers.py @@ -88,7 +88,8 @@ def __init__(self, boundaries: List[int], values: List[float]): equal to the number of elements in the `boundaries` list plus one. """ if len(boundaries) + 1 != len(values): - raise ValueError("The length of `values` should be 1 more than the length of `boundaries`") + msg = "The length of `values` should be 1 more than the length of `boundaries`" + raise ValueError(msg) self.boundaries = boundaries self.values = values diff --git a/nncf/common/sparsity/schedulers.py b/nncf/common/sparsity/schedulers.py index b890a5de731..2ae6f8b0d11 100644 --- a/nncf/common/sparsity/schedulers.py +++ b/nncf/common/sparsity/schedulers.py @@ -73,7 +73,8 @@ def _calculate_sparsity_level(self) -> float: :return: Sparsity level that should be applied to the weights for the `current_epoch` or for step in the `current_epoch`. """ - raise NotImplementedError("SparsityScheduler implementation must override _calculate_sparsity_level method.") + msg = "SparsityScheduler implementation must override _calculate_sparsity_level method." + raise NotImplementedError(msg) def _update_sparsity_level(self) -> None: """ @@ -187,10 +188,11 @@ def _maybe_should_skip(self) -> None: and self._steps_in_current_epoch > 0 and self._steps_per_epoch != self._steps_in_current_epoch ): - raise Exception( + msg = ( "Actual steps per epoch and steps per epoch from the scheduler " "parameters are different. Scheduling may be incorrect." ) + raise Exception(msg) if self._steps_per_epoch is None: self._should_skip = True diff --git a/nncf/common/stateful_classes_registry.py b/nncf/common/stateful_classes_registry.py index 7a03a63c27b..b106044565b 100644 --- a/nncf/common/stateful_classes_registry.py +++ b/nncf/common/stateful_classes_registry.py @@ -38,21 +38,19 @@ def decorator(cls: TObj) -> TObj: registered_name = name if name is not None else cls.__name__ if registered_name in self._name_vs_class_map: - raise ValueError( - "{} has already been registered to {}".format( - registered_name, self._name_vs_class_map[registered_name] - ) - ) + msg = f"{registered_name} has already been registered to {self._name_vs_class_map[registered_name]}" + raise ValueError(msg) if cls in self._class_vs_name_map: - raise ValueError("{} has already been registered to {}".format(cls, self._class_vs_name_map[cls])) + msg = f"{cls} has already been registered to {self._class_vs_name_map[cls]}" + raise ValueError(msg) if inspect.isclass(cls) and not hasattr(cls, self.REQUIRED_METHOD_NAME): - raise ValueError( - "Cannot register a class ({}) that does not have {}() method.".format( - registered_name, self.REQUIRED_METHOD_NAME - ) + msg = ( + f"Cannot register a class ({registered_name}) that does not have" + f" {self.REQUIRED_METHOD_NAME}() method." ) + raise ValueError(msg) self._class_vs_name_map[cls] = registered_name self._name_vs_class_map[registered_name] = cls @@ -70,7 +68,8 @@ def get_registered_class(self, registered_name: str) -> type: """ if registered_name in self._name_vs_class_map: return self._name_vs_class_map[registered_name] - raise KeyError("No registered stateful classes with {} name".format(registered_name)) + msg = f"No registered stateful classes with {registered_name} name" + raise KeyError(msg) def get_registered_name(self, stateful_cls: type) -> str: """ @@ -81,7 +80,8 @@ def get_registered_name(self, stateful_cls: type) -> str: """ if stateful_cls in self._class_vs_name_map: return self._class_vs_name_map[stateful_cls] - raise KeyError("The class {} was not registered.".format(stateful_cls.__name__)) + msg = f"The class {stateful_cls.__name__} was not registered." + raise KeyError(msg) class CommonStatefulClassesRegistry: diff --git a/nncf/common/statistics.py b/nncf/common/statistics.py index 4f6ceb71e27..b0c163e029d 100644 --- a/nncf/common/statistics.py +++ b/nncf/common/statistics.py @@ -54,9 +54,8 @@ def register(self, algorithm_name: str, stats: Statistics) -> None: """ available_algorithms = [f.name for f in fields(self)] if algorithm_name not in available_algorithms: - raise ValueError( - f"Can not register statistics for the algorithm. Unknown name of the algorithm: {algorithm_name}." - ) + msg = f"Can not register statistics for the algorithm. Unknown name of the algorithm: {algorithm_name}." + raise ValueError(msg) setattr(self, algorithm_name, stats) diff --git a/nncf/common/strip.py b/nncf/common/strip.py index c306a3f4ca9..3d5bee0168d 100644 --- a/nncf/common/strip.py +++ b/nncf/common/strip.py @@ -45,4 +45,5 @@ def strip(model: TModel, do_copy: bool = True) -> TModel: return strip_tf(model, do_copy) # type: ignore - raise nncf.UnsupportedBackendError(f"Method `strip` does not support for {model_backend.value} backend.") + msg = f"Method `strip` does not support for {model_backend.value} backend." + raise nncf.UnsupportedBackendError(msg) diff --git a/nncf/common/tensor.py b/nncf/common/tensor.py index 0e15c5b3f96..a4984e7e42c 100644 --- a/nncf/common/tensor.py +++ b/nncf/common/tensor.py @@ -28,7 +28,8 @@ def __init__(self, tensor: TensorType): def __eq__(self, other: object) -> bool: if not isinstance(other, NNCFTensor): - raise nncf.InternalError("Attempt to compare NNCFTensor with a non-NNCFTensor object") + msg = "Attempt to compare NNCFTensor with a non-NNCFTensor object" + raise nncf.InternalError(msg) return self._tensor == other.tensor @property @@ -38,7 +39,8 @@ def tensor(self) -> TensorType: # type: ignore @property def shape(self) -> List[int]: if self._tensor is None: - raise nncf.InternalError("Attempt to get shape of empty NNCFTensor") + msg = "Attempt to get shape of empty NNCFTensor" + raise nncf.InternalError(msg) return self._tensor.shape # type: ignore @property diff --git a/nncf/common/tensor_statistics/aggregator.py b/nncf/common/tensor_statistics/aggregator.py index 8f27f35847f..b71d441f2ec 100644 --- a/nncf/common/tensor_statistics/aggregator.py +++ b/nncf/common/tensor_statistics/aggregator.py @@ -115,7 +115,8 @@ def _load_statistics(self, data: Dict[str, Any]) -> None: statistics = tensor_collector.get_statistics() statistics_key = self._get_statistics_key(statistics, statistic_point.target_point) if statistics_key not in data: - raise nncf.ValidationError(f"Not found statistics for {statistics_key}") + msg = f"Not found statistics for {statistics_key}" + raise nncf.ValidationError(msg) statistics.load_data(data[statistics_key]) tensor_collector.set_cache(statistics) diff --git a/nncf/common/tensor_statistics/statistic_point.py b/nncf/common/tensor_statistics/statistic_point.py index 26a42d84c68..393dec2df88 100644 --- a/nncf/common/tensor_statistics/statistic_point.py +++ b/nncf/common/tensor_statistics/statistic_point.py @@ -117,5 +117,4 @@ def get_algo_statistics_for_node( :return: Iterable through all statistic collectors in node with target_node_name. """ for _statistic_point in self.iter_through_statistic_points_in_target_node(target_node_name, filter_fn): - for _tensor_collector in _statistic_point.algorithm_to_tensor_collectors[algorithm]: - yield _tensor_collector + yield from _statistic_point.algorithm_to_tensor_collectors[algorithm] diff --git a/nncf/common/tensor_statistics/statistics_serializer.py b/nncf/common/tensor_statistics/statistics_serializer.py index 0e635bbdb05..7e885ba2004 100644 --- a/nncf/common/tensor_statistics/statistics_serializer.py +++ b/nncf/common/tensor_statistics/statistics_serializer.py @@ -62,7 +62,8 @@ def load_metadata(dir_path: Path) -> Dict[str, Any]: if metadata_file.exists(): with safe_open(metadata_file, "r") as f: return cast(Dict[str, Any], json.load(f)) - raise nncf.StatisticsCacheError(f"Metadata file does not exist in the following path: {dir_path}") + msg = f"Metadata file does not exist in the following path: {dir_path}" + raise nncf.StatisticsCacheError(msg) def save_metadata(metadata: Dict[str, Any], dir_path: Path) -> None: diff --git a/nncf/common/tensor_statistics/statistics_validator.py b/nncf/common/tensor_statistics/statistics_validator.py index c521359af69..539d4a9f3ce 100644 --- a/nncf/common/tensor_statistics/statistics_validator.py +++ b/nncf/common/tensor_statistics/statistics_validator.py @@ -22,12 +22,12 @@ def validate_backend(metadata: Dict[str, Any], backend: BackendType) -> None: :param backend: Provided backend. """ if "backend" not in metadata: - raise ValueError("The provided metadata has no information about backend.") + msg = "The provided metadata has no information about backend." + raise ValueError(msg) data_backend = metadata["backend"] if data_backend != backend.value: - raise ValueError( - f"Backend in loaded statistics {data_backend} does not match the expected backend {backend.value}." - ) + msg = f"Backend in loaded statistics {data_backend} does not match the expected backend {backend.value}." + raise ValueError(msg) def validate_statistics_files_exist(metadata: Dict[str, Any], dir_path: Path) -> None: @@ -40,7 +40,8 @@ def validate_statistics_files_exist(metadata: Dict[str, Any], dir_path: Path) -> for file_name in metadata["mapping"]: file_path = dir_path / file_name if not file_path.exists(): - raise FileNotFoundError(f"One of the statistics file: {file_path} does not exist.") + msg = f"One of the statistics file: {file_path} does not exist." + raise FileNotFoundError(msg) def validate_cache(metadata: Dict[str, Any], dir_path: Path, backend: BackendType) -> None: diff --git a/nncf/common/utils/backend.py b/nncf/common/utils/backend.py index c2e459765e2..9c8c6cdb057 100644 --- a/nncf/common/utils/backend.py +++ b/nncf/common/utils/backend.py @@ -147,10 +147,11 @@ def get_backend(model: Any) -> BackendType: if backend_call(model): return backend - raise nncf.UnsupportedBackendError( + msg = ( "Could not infer the backend framework from the model type because " "the framework is not available or corrupted, or the model type is unsupported. " ) + raise nncf.UnsupportedBackendError(msg) def copy_model(model: TModel) -> TModel: diff --git a/nncf/common/utils/helpers.py b/nncf/common/utils/helpers.py index f465f02653a..0adab41dd46 100644 --- a/nncf/common/utils/helpers.py +++ b/nncf/common/utils/helpers.py @@ -55,8 +55,8 @@ def configure_accuracy_aware_paths(log_dir: Union[str, Path]) -> Union[str, Path :return: Path to the accuracy-aware training subdirectory. """ d = datetime.datetime.now() - run_id = "{:%Y-%m-%d__%H-%M-%S}".format(d) - acc_aware_log_dir = osp.join(log_dir, "accuracy_aware_training/{run_id}".format(run_id=run_id)) + run_id = f"{d:%Y-%m-%d__%H-%M-%S}" + acc_aware_log_dir = osp.join(log_dir, f"accuracy_aware_training/{run_id}") os.makedirs(acc_aware_log_dir, exist_ok=True) return acc_aware_log_dir diff --git a/nncf/common/utils/os.py b/nncf/common/utils/os.py index 613dfb9a547..69ac5a413b4 100644 --- a/nncf/common/utils/os.py +++ b/nncf/common/utils/os.py @@ -20,7 +20,8 @@ def fail_if_symlink(file: Path) -> None: if file.is_symlink(): - raise nncf.ValidationError("File {} is a symbolic link, aborting.".format(str(file))) + msg = f"File {str(file)} is a symbolic link, aborting." + raise nncf.ValidationError(msg) @contextmanager diff --git a/nncf/common/utils/registry.py b/nncf/common/utils/registry.py index ffcb7ad53ac..01f7dcd3954 100644 --- a/nncf/common/utils/registry.py +++ b/nncf/common/utils/registry.py @@ -29,7 +29,8 @@ def values(self) -> Any: def _register(self, obj: Any, name: str) -> None: if name in self._registry_dict: - raise KeyError("{} is already registered in {}".format(name, self._name)) + msg = f"{name} is already registered in {self._name}" + raise KeyError(msg) self._registry_dict[name] = obj def register(self, name: str = None) -> Callable[[Any], Any]: @@ -50,7 +51,8 @@ def get(self, name: str) -> Any: return self._registry_dict[name] def _key_not_found(self, name: str) -> None: - raise KeyError("{} is unknown type of {} ".format(name, self._name)) + msg = f"{name} is unknown type of {self._name} " + raise KeyError(msg) def __contains__(self, item: Any) -> bool: return item in self._registry_dict.values() diff --git a/nncf/config/config.py b/nncf/config/config.py index a351d52526b..b068362b398 100644 --- a/nncf/config/config.py +++ b/nncf/config/config.py @@ -72,7 +72,8 @@ def register_extra_structs(self, struct_list: List[NNCFExtraConfigStruct]) -> No for struct in struct_list: struct_id = struct.get_id() if struct_id in self.__nncf_extra_structs: - raise nncf.InternalError(f"{struct_id} is already registered as extra struct in NNCFConfig!") + msg = f"{struct_id} is already registered as extra struct in NNCFConfig!" + raise nncf.InternalError(msg) self.__nncf_extra_structs[struct_id] = struct def get_extra_struct(self, struct_cls: Type[NNCFExtraConfigStruct]) -> NNCFExtraConfigStruct: diff --git a/nncf/config/extractors.py b/nncf/config/extractors.py index 4232c96d286..a4aa53a7b95 100644 --- a/nncf/config/extractors.py +++ b/nncf/config/extractors.py @@ -53,10 +53,11 @@ def extract_algo_specific_config(config: NNCFConfig, algo_name_to_match: str) -> if algo_name_to_match == NO_COMPRESSION_ALGORITHM_NAME: if len(algo_list) > 0: - raise nncf.ValidationError( + msg = ( f"No algorithm configuration should be specified " f"when you try to extract {algo_name_to_match} from the NNCF config!" ) + raise nncf.ValidationError(msg) return {} matches = [] @@ -66,14 +67,14 @@ def extract_algo_specific_config(config: NNCFConfig, algo_name_to_match: str) -> matches.append(compression_algo_dict) if len(matches) > 1: - raise nncf.ValidationError( + msg = ( f"Multiple algorithm configurations specified for the same " f"algo {algo_name_to_match} in the NNCF config!" ) + raise nncf.ValidationError(msg) if not matches: - raise nncf.InternalError( - f"Did not find an algorithm configuration for algo {algo_name_to_match} in the NNCF config!" - ) + msg = f"Did not find an algorithm configuration for algo {algo_name_to_match} in the NNCF config!" + raise nncf.InternalError(msg) return next(iter(matches)) @@ -120,11 +121,12 @@ def extract_range_init_params(config: NNCFConfig, algorithm_name: str = "quantiz if max_num_init_samples == 0: return None if not isinstance(range_init_args, QuantizationRangeInitArgs): - raise ValueError( + msg = ( "Should run range initialization as specified via config," "but the initializing data loader is not provided as an extra struct. " "Refer to `NNCFConfig.register_extra_structs` and the `QuantizationRangeInitArgs` class" ) + raise ValueError(msg) params = { "init_range_data_loader": range_init_args.data_loader, @@ -169,16 +171,16 @@ def get_bn_adapt_algo_kwargs(nncf_config: NNCFConfig, params: Dict[str, Any]) -> try: args = nncf_config.get_extra_struct(BNAdaptationInitArgs) except KeyError: - raise BNAdaptDataLoaderNotFoundError( + msg = ( "Unable to create the batch-norm statistics adaptation algorithm " "because the data loader is not provided as an extra struct. Refer to the " "`NNCFConfig.register_extra_structs` method and the `BNAdaptationInitArgs` class." - ) from None + ) + raise BNAdaptDataLoaderNotFoundError(msg) from None if not isinstance(args, BNAdaptationInitArgs): - raise BNAdaptDataLoaderNotFoundError( - "The extra struct for batch-norm adaptation must be an instance of the BNAdaptationInitArgs class." - ) + msg = "The extra struct for batch-norm adaptation must be an instance of the BNAdaptationInitArgs class." + raise BNAdaptDataLoaderNotFoundError(msg) params = { "num_bn_adaptation_samples": num_bn_adaptation_samples, "data_loader": args.data_loader, @@ -210,13 +212,15 @@ def validate_accuracy_aware_schema(config: NNCFConfig, params: Dict[str, Any]) - if NNCFAlgorithmNames.FILTER_PRUNING in algorithms and any( algo in NNCFAlgorithmNames.SPARSITY for algo in algorithms ): - raise nncf.ValidationError( + msg = ( "adaptive_compression_level mode supports filter_pruning or sparsity algorithms" "separately. Please, choose only one algorithm with adaptive compression level. " "Take a note that you still can use it combined with quantization." ) + raise nncf.ValidationError(msg) if len(algorithms) == 1 and algorithms[0] == NNCFAlgorithmNames.QUANTIZATION: - raise nncf.ValidationError("adaptive_compression_level mode doesn't support quantization") + msg = "adaptive_compression_level mode doesn't support quantization" + raise nncf.ValidationError(msg) accuracy_aware_training_config = config.get("accuracy_aware_training", None) diff --git a/nncf/config/schema.py b/nncf/config/schema.py index 196d35f470f..2c2e0da5d38 100644 --- a/nncf/config/schema.py +++ b/nncf/config/schema.py @@ -155,9 +155,8 @@ def validate_single_compression_algo_schema( algo-specific properties""" algo_name = single_compression_algo_dict["algorithm"] if algo_name not in ref_vs_algo_schema: - raise jsonschema.ValidationError( - f"Incorrect algorithm name - must be one of {str(list(ref_vs_algo_schema.keys()))}" - ) + msg = f"Incorrect algorithm name - must be one of {str(list(ref_vs_algo_schema.keys()))}" + raise jsonschema.ValidationError(msg) try: jsonschema.validate(single_compression_algo_dict, schema=ref_vs_algo_schema[algo_name]) except jsonschema.ValidationError as e: @@ -181,11 +180,10 @@ def validate_accuracy_aware_training_schema(single_compression_algo_dict: Dict[s jsonschema.validate(single_compression_algo_dict, schema=ACCURACY_AWARE_TRAINING_SCHEMA) accuracy_aware_mode = single_compression_algo_dict.get("mode") if accuracy_aware_mode not in ACCURACY_AWARE_MODES_VS_SCHEMA: - raise jsonschema.ValidationError( - "Incorrect Accuracy Aware mode - must be one of ({})".format( - ", ".join(ACCURACY_AWARE_MODES_VS_SCHEMA.keys()) - ) + msg = "Incorrect Accuracy Aware mode - must be one of ({})".format( + ", ".join(ACCURACY_AWARE_MODES_VS_SCHEMA.keys()) ) + raise jsonschema.ValidationError(msg) try: jsonschema.validate(single_compression_algo_dict, schema=ACCURACY_AWARE_MODES_VS_SCHEMA[accuracy_aware_mode]) except Exception as e: diff --git a/nncf/data/generators.py b/nncf/data/generators.py index cd05d7a2e64..0ea10facd17 100644 --- a/nncf/data/generators.py +++ b/nncf/data/generators.py @@ -51,7 +51,8 @@ def generate_text_data( try: import torch except ImportError: - raise nncf.ModuleNotFoundError("torch is required in order to generate text data: `pip install torch`.") + msg = "torch is required in order to generate text data: `pip install torch`." + raise nncf.ModuleNotFoundError(msg) try: from transformers import PreTrainedModel # type: ignore @@ -60,15 +61,16 @@ def generate_text_data( logging.set_verbosity_error() except ImportError: - raise nncf.ModuleNotFoundError( - "transformers is required in order to generate text data: `pip install transformers`." - ) + msg = "transformers is required in order to generate text data: `pip install transformers`." + raise nncf.ModuleNotFoundError(msg) if not isinstance(model, PreTrainedModel.__bases__): - raise nncf.ValidationError("Model should be instance of the `transformers.PreTrainedModel`.") + msg = "Model should be instance of the `transformers.PreTrainedModel`." + raise nncf.ValidationError(msg) if not isinstance(tokenizer, PreTrainedTokenizerBase.__bases__): - raise nncf.ValidationError("tokenizer should be instance of the `transformers.PreTrainedTokenizerBase`.") + msg = "tokenizer should be instance of the `transformers.PreTrainedTokenizerBase`." + raise nncf.ValidationError(msg) generated_data: List[str] = [] diff --git a/nncf/experimental/common/pruning/operations.py b/nncf/experimental/common/pruning/operations.py index d7474d6abab..7d04e6307df 100644 --- a/nncf/experimental/common/pruning/operations.py +++ b/nncf/experimental/common/pruning/operations.py @@ -506,7 +506,8 @@ def mask_propagation( elif mode == ReshapeMode.EXTEND: for dim, groups in input_mask.dim_groups_map.items(): if len(groups) > 1: - raise NotImplementedError("Extend reshape for several groups is not supported yet") + msg = "Extend reshape for several groups is not supported yet" + raise NotImplementedError(msg) if len(in_map[dim]) == 1: # pruning dimension is not extended, just assign a new location in the output shifted_dim = in_map[dim][0] @@ -517,7 +518,8 @@ def mask_propagation( list_output_channels = [output_shape[x] for x in in_map[dim]] group = groups[0] if group.has_children(): - raise NotImplementedError("Splitting BlockGroup with children is not implemented yet") + msg = "Splitting BlockGroup with children is not implemented yet" + raise NotImplementedError(msg) child_groups = cls._split_group(group, input_channels, list_output_channels) for child_group, in_dim in zip(child_groups, in_map[dim]): output_mask.dim_groups_map[in_dim] = [child_group] diff --git a/nncf/experimental/common/pruning/propagation_data.py b/nncf/experimental/common/pruning/propagation_data.py index 640fc91d505..c9d3340da3d 100644 --- a/nncf/experimental/common/pruning/propagation_data.py +++ b/nncf/experimental/common/pruning/propagation_data.py @@ -149,7 +149,7 @@ def __init__( consumers: Optional[Set[ConsumerInfo]] = None, ) -> None: self.block = block - self._children: List["PropagationGroup"] = [] + self._children: List[PropagationGroup] = [] self._is_invalid = False self._producers = set() if producers is None else producers self._consumers = set() if consumers is None else consumers diff --git a/nncf/experimental/common/tensor_statistics/collectors.py b/nncf/experimental/common/tensor_statistics/collectors.py index 670ef65e4a8..3c7108c63c4 100644 --- a/nncf/experimental/common/tensor_statistics/collectors.py +++ b/nncf/experimental/common/tensor_statistics/collectors.py @@ -246,11 +246,11 @@ def register_statistic_branch( :reducer_output_port_id: Reducer target output port id. """ if container_key in self._stat_container_kwargs_map: - raise nncf.InternalError( - f"Two different statistic branches for one container key {container_key} are encountered" - ) + msg = f"Two different statistic branches for one container key {container_key} are encountered" + raise nncf.InternalError(msg) if any(aggr is aggregator for aggr in self._aggregators.values()): - raise nncf.InternalError(f"One aggregator instance {aggregator} for different branches is encountered") + msg = f"One aggregator instance {aggregator} for different branches is encountered" + raise nncf.InternalError(msg) self._reducers.add(reducer) key = (hash(reducer), reducer_output_port_id, hash(aggregator)) @@ -733,9 +733,8 @@ def __init__( window_size=window_size, ) if 0 not in self._aggregation_axes: - raise NotImplementedError( - "Aggregation without 0 dim is not supported yet for MedianAbsoluteDeviationAggregator" - ) + msg = "Aggregation without 0 dim is not supported yet for MedianAbsoluteDeviationAggregator" + raise NotImplementedError(msg) def _register_reduced_input_impl(self, x: TensorType) -> None: return self._container.append(x) @@ -774,7 +773,8 @@ def __init__( ): super().__init__(aggregation_axes=aggregation_axes, num_samples=num_samples) if 0 not in self._aggregation_axes: - raise NotImplementedError("Aggregation without 0 dim is not supported yet for PercentileAggregator") + msg = "Aggregation without 0 dim is not supported yet for PercentileAggregator" + raise NotImplementedError(msg) self._percentiles_to_collect = percentiles_to_collect self._window_size = window_size self._container = deque(maxlen=window_size) diff --git a/nncf/experimental/common/tensor_statistics/statistical_functions.py b/nncf/experimental/common/tensor_statistics/statistical_functions.py index e13fc44fdc9..d75e60ab188 100644 --- a/nncf/experimental/common/tensor_statistics/statistical_functions.py +++ b/nncf/experimental/common/tensor_statistics/statistical_functions.py @@ -30,6 +30,7 @@ def mean_per_channel(x: Tensor, axis: int, dtype: Optional[TensorDataType] = Non pos_axis = axis + x.ndim if axis < 0 else axis if pos_axis < 0 or pos_axis >= x.ndim: - raise ValueError(f"axis {axis} is out of bounds for array of dimension {x.ndim}") + msg = f"axis {axis} is out of bounds for array of dimension {x.ndim}" + raise ValueError(msg) axis = tuple(i for i in range(x.ndim) if i != pos_axis) return fns.mean(x, axis=axis, dtype=dtype) diff --git a/nncf/experimental/common/tensor_statistics/statistics.py b/nncf/experimental/common/tensor_statistics/statistics.py index ed608c17cd1..b36e70ece2d 100644 --- a/nncf/experimental/common/tensor_statistics/statistics.py +++ b/nncf/experimental/common/tensor_statistics/statistics.py @@ -54,7 +54,8 @@ def _get_serialized_data(self) -> Dict[str, Tensor]: if isinstance(value, Tensor): serialized_data[key] = value else: - raise nncf.InternalError(f"Unsupported type of value: {type(value)}") + msg = f"Unsupported type of value: {type(value)}" + raise nncf.InternalError(msg) return serialized_data def load_data(self, data: Dict[str, Tensor]) -> None: diff --git a/nncf/experimental/quantization/algorithms/post_training/algorithm.py b/nncf/experimental/quantization/algorithms/post_training/algorithm.py index 2215926e245..7d2b2fdb091 100644 --- a/nncf/experimental/quantization/algorithms/post_training/algorithm.py +++ b/nncf/experimental/quantization/algorithms/post_training/algorithm.py @@ -94,10 +94,11 @@ def apply( dataset: Optional[Dataset] = None, ) -> TModel: if dataset is None and len(self._pipeline.pipeline_steps) > 1: - raise ValueError( + msg = ( "A dataset is required for the post-training quantization " "algorithm to collect statistics for intermediate models." ) + raise ValueError(msg) step_index_to_statistics = None if statistic_points: diff --git a/nncf/experimental/quantization/algorithms/range_estimator/algorithm.py b/nncf/experimental/quantization/algorithms/range_estimator/algorithm.py index 24017991699..b422cedb4c7 100644 --- a/nncf/experimental/quantization/algorithms/range_estimator/algorithm.py +++ b/nncf/experimental/quantization/algorithms/range_estimator/algorithm.py @@ -69,10 +69,11 @@ def apply( dataset: Optional[Dataset] = None, ) -> TModel: if self._min_max_algo._quantization_target_points_to_qconfig is None: - raise RuntimeError( + msg = ( "Statistic points are not available." " Please call `get_statistic_points` before calling the `apply` method." ) + raise RuntimeError(msg) return self._min_max_algo.apply(model=model, graph=graph, statistic_points=statistic_points) def get_statistic_points(self, model: TModel, graph: NNCFGraph) -> StatisticPointsContainer: diff --git a/nncf/experimental/quantization/quantizers/openvino_quantizer.py b/nncf/experimental/quantization/quantizers/openvino_quantizer.py index 9d941593fe2..20415da33dd 100644 --- a/nncf/experimental/quantization/quantizers/openvino_quantizer.py +++ b/nncf/experimental/quantization/quantizers/openvino_quantizer.py @@ -131,10 +131,11 @@ def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule: if any(root_qp.qconfig != quantization_setup.quantization_points[q_id].qconfig for q_id in quantizer_ids): qps = [quantization_setup.quantization_points[q_id] for q_id in quantizer_ids] - raise nncf.InternalError( + msg = ( "Different quantization configs are set to one unified scale group:" f"{[(qp.insertion_point.__dict__, str(qp.qconfig)) for qp in qps]}" ) + raise nncf.InternalError(msg) root_target_node = get_graph_node_by_name(graph, root_qp.insertion_point.target_node_name) root_edge_or_node = self._get_edge_or_node(root_target_node, root_qp, nncf_graph) diff --git a/nncf/experimental/quantization/quantizers/torch_ao_adapter.py b/nncf/experimental/quantization/quantizers/torch_ao_adapter.py index 695e4b8a4c1..a1628b273ee 100644 --- a/nncf/experimental/quantization/quantizers/torch_ao_adapter.py +++ b/nncf/experimental/quantization/quantizers/torch_ao_adapter.py @@ -123,7 +123,8 @@ def get_quantizer_config_from_annotated_model(anotated_model: torch.fx.GraphModu elif qspec.qscheme in [torch.per_tensor_affine, torch.per_tensor_symmetric]: per_channel = False else: - raise nncf.InternalError(f"Unknown qscheme: {qspec.qscheme}") + msg = f"Unknown qscheme: {qspec.qscheme}" + raise nncf.InternalError(msg) signed = qspec.dtype is torch.int8 mode = ( QuantizationMode.SYMMETRIC @@ -139,10 +140,11 @@ def get_quantizer_config_from_annotated_model(anotated_model: torch.fx.GraphModu elif isinstance(qspec, SharedQuantizationSpec): # TODO(dlyakhov): Support SharedQuantizationSpec nncf_logger.warning( - "SharedQuantizationSpec is not supported yet;" f" edges {from_n} -> {to_nodes} won't be quantized." + f"SharedQuantizationSpec is not supported yet; edges {from_n} -> {to_nodes} won't be quantized." ) else: - raise nncf.InternalError(f"Unknown torch.ao quantization spec: {qspec}") + msg = f"Unknown torch.ao quantization spec: {qspec}" + raise nncf.InternalError(msg) return q_setup diff --git a/nncf/experimental/tensorflow/context.py b/nncf/experimental/tensorflow/context.py index 0357e3ba6ef..26bfe66fd6c 100644 --- a/nncf/experimental/tensorflow/context.py +++ b/nncf/experimental/tensorflow/context.py @@ -50,11 +50,12 @@ def __init__(self, in_call: bool = False, wrap_ops: bool = False, model: Optiona self._wrap_ops = wrap_ops if model is None and in_call: - raise ValueError( + msg = ( f"Inconsisten values `{in_call}` and `{model}` for `in_call` and `model` parameters. " "The `None` value is specified that model is undefined at this moment. This is only " "possible when `in_call` is equal to `False`." ) + raise ValueError(msg) self._model = model diff --git a/nncf/experimental/tensorflow/graph/argprovider.py b/nncf/experimental/tensorflow/graph/argprovider.py index 7ad8e4edbcb..b7ffdcfa961 100644 --- a/nncf/experimental/tensorflow/graph/argprovider.py +++ b/nncf/experimental/tensorflow/graph/argprovider.py @@ -31,7 +31,8 @@ def replace_value_by_index(xs: Tuple[Any, ...], pos: int, value: Any) -> Tuple[A def check_port_id(port_id: int, min_port_id: int, max_port_id: int): if min_port_id <= port_id <= max_port_id: return - raise ValueError(f"Unexpected `port_id`: {port_id}") + msg = f"Unexpected `port_id`: {port_id}" + raise ValueError(msg) TF_ARG_PROVIDERS = Registry("TF_ARG_PROVIDERS") @@ -97,7 +98,8 @@ def get_output(self, output_port_id: int, args, kwargs) -> tf.Tensor: check_port_id(output_port_id, min_port_id=0, max_port_id=0) if len(args) > 1: - raise ValueError(f"Unexpected `args`: {args}") + msg = f"Unexpected `args`: {args}" + raise ValueError(msg) return args[output_port_id] @@ -105,7 +107,8 @@ def set_output(self, output_port_id: int, value: tf.Tensor, args, kwargs): check_port_id(output_port_id, min_port_id=0, max_port_id=0) if len(args) > 1: - raise ValueError(f"Unexpected `args`: {args}") + msg = f"Unexpected `args`: {args}" + raise ValueError(msg) return replace_value_by_index(args, output_port_id, value), kwargs @@ -131,7 +134,8 @@ def get_output(self, output_port_id: int, args, kwargs) -> tf.Tensor: check_port_id(output_port_id, min_port_id=0, max_port_id=0) if len(args) > 1: - raise ValueError(f"Unexpected `args`: {args}") + msg = f"Unexpected `args`: {args}" + raise ValueError(msg) return args[output_port_id] @@ -139,7 +143,8 @@ def set_output(self, output_port_id: int, value: tf.Tensor, args, kwargs): check_port_id(output_port_id, min_port_id=0, max_port_id=0) if len(args) > 1: - raise ValueError(f"Unexpected `args`: {args}") + msg = f"Unexpected `args`: {args}" + raise ValueError(msg) return replace_value_by_index(args, output_port_id, value), kwargs @@ -184,7 +189,8 @@ def get_output(self, output_port_id: int, args, kwargs) -> tf.Tensor: check_port_id(output_port_id, min_port_id=0, max_port_id=0) if len(args) > 1: - raise ValueError(f"Unexpected `args`: {args}") + msg = f"Unexpected `args`: {args}" + raise ValueError(msg) return args[output_port_id] @@ -192,7 +198,8 @@ def set_output(self, output_port_id: int, value: tf.Tensor, args, kwargs): check_port_id(output_port_id, min_port_id=0, max_port_id=0) if len(args) > 1: - raise ValueError(f"Unexpected `args`: {args}") + msg = f"Unexpected `args`: {args}" + raise ValueError(msg) return replace_value_by_index(args, output_port_id, value), kwargs diff --git a/nncf/experimental/tensorflow/graph/converter.py b/nncf/experimental/tensorflow/graph/converter.py index c8985bd03f7..87b333cfeb5 100644 --- a/nncf/experimental/tensorflow/graph/converter.py +++ b/nncf/experimental/tensorflow/graph/converter.py @@ -378,7 +378,8 @@ def _convert_dtype_to_nncf_format(dtype: tf.dtypes.DType) -> Dtype: elif dtype.is_integer: tensor_dtype = Dtype.INTEGER else: - raise nncf.InternalError(f"Unexpected dtype of tensor: {dtype}") + msg = f"Unexpected dtype of tensor: {dtype}" + raise nncf.InternalError(msg) return tensor_dtype diff --git a/nncf/experimental/tensorflow/graph/model_transformer.py b/nncf/experimental/tensorflow/graph/model_transformer.py index 1e71367d37c..7f45e95273c 100644 --- a/nncf/experimental/tensorflow/graph/model_transformer.py +++ b/nncf/experimental/tensorflow/graph/model_transformer.py @@ -48,6 +48,7 @@ def transform(self, transformation_layout: TFTransformationLayoutV2) -> NNCFNetw # TODO(andrey-churkin): Add support pass else: - raise ValueError(f"Transformation type {command.type} does not support.") + msg = f"Transformation type {command.type} does not support." + raise ValueError(msg) return self._model diff --git a/nncf/experimental/tensorflow/graph/transformations/layout.py b/nncf/experimental/tensorflow/graph/transformations/layout.py index c0a9d374a41..33b542c3028 100644 --- a/nncf/experimental/tensorflow/graph/transformations/layout.py +++ b/nncf/experimental/tensorflow/graph/transformations/layout.py @@ -32,7 +32,8 @@ def register(self, transformation: TransformationCommand) -> None: elif transformation.type == TransformationType.INSERT: self._register_insertion_transformation(transformation) else: - raise ValueError(f"Unknown type of transformation command: {transformation.type}") + msg = f"Unknown type of transformation command: {transformation.type}" + raise ValueError(msg) def _register_insertion_transformation(self, transformation: TransformationCommand) -> None: idx = None diff --git a/nncf/experimental/tensorflow/patch_tf.py b/nncf/experimental/tensorflow/patch_tf.py index 379ea235deb..63ce2954afe 100644 --- a/nncf/experimental/tensorflow/patch_tf.py +++ b/nncf/experimental/tensorflow/patch_tf.py @@ -49,11 +49,12 @@ def __init__(self, operations: List[NNCFOperation], target_point: TFTargetPoint, arg_provider_cls = TF_ARG_PROVIDERS.registry_dict.get(self._target_point.op_type_name) if arg_provider_cls is None: - raise ValueError( + msg = ( f"Unexpected type of the TensorFlow operation: {self._target_point.op_type_name}. " "Register an `ArgProvider` instance for this type in the " "`TF_ARG_PROVIDERS` registry, please." ) + raise ValueError(msg) self._arg_provider = arg_provider_cls() diff --git a/nncf/experimental/tensorflow/quantization/algorithm.py b/nncf/experimental/tensorflow/quantization/algorithm.py index 876a6ccdc0d..9e45ad084d7 100644 --- a/nncf/experimental/tensorflow/quantization/algorithm.py +++ b/nncf/experimental/tensorflow/quantization/algorithm.py @@ -191,7 +191,8 @@ def _build_insertion_commands_for_quantizer_setup( for qp_id in unified_scales_group: if was_processed[qp_id]: - raise nncf.InternalError("Unexpected behavior") + msg = "Unexpected behavior" + raise nncf.InternalError(msg) was_processed[qp_id] = True curr_qp = quantization_points[qp_id] @@ -246,9 +247,7 @@ def _get_quantizer_setup(self, model: NNCFNetwork) -> TFQuantizationSetupV2: # Find out which metatypes unsupported by the quantization algorithm for node in nncf_graph.get_all_nodes(): if node.metatype in UNSUPPORTED_TF_OP_METATYPES: - nncf_logger.warning( - "The operation {} is unsupported by the quantization algorithm.".format(node.node_name) - ) + nncf_logger.warning(f"The operation {node.node_name} is unsupported by the quantization algorithm.") # Possible configurations of quantizer for nodes with weights. possible_qconfigs_for_nodes_with_weight = self._get_quantizable_weighted_layer_nodes(nncf_graph) @@ -272,12 +271,13 @@ def _get_quantizer_setup(self, model: NNCFNetwork) -> TFQuantizationSetupV2: if target_node.node_name in node_name_to_qconfig_map: assigned_qconfig = node_name_to_qconfig_map[target_node.node_name] if qp.qconfig != assigned_qconfig: - raise nncf.InternalError( + msg = ( "Inconsistent quantizer configurations selected by solver for one " f"and the same quantizable op! Tried to assign {qp.qconfig} to " f"{target_node.node_name} as specified by QP {qp_id}, but the op " f"already has quantizer config {assigned_qconfig} assigned to it!" ) + raise nncf.InternalError(msg) continue # The operation has already been quantized node_name_to_qconfig_map[target_node.node_name] = qp.qconfig @@ -286,7 +286,8 @@ def _get_quantizer_setup(self, model: NNCFNetwork) -> TFQuantizationSetupV2: narrow_range = not half_range target_type = TargetType.OPERATOR_PRE_HOOK if not issubclass(target_node.metatype, TFOpWithWeightsMetatype): - raise nncf.InternalError(f"Unexpected type of metatype: {type(target_node.metatype)}") + msg = f"Unexpected type of metatype: {type(target_node.metatype)}" + raise nncf.InternalError(msg) port_ids = [weight_def.port_id for weight_def in target_node.metatype.weight_definitions] else: @@ -294,7 +295,8 @@ def _get_quantizer_setup(self, model: NNCFNetwork) -> TFQuantizationSetupV2: # Check correctness if not isinstance(qp.insertion_point, ActivationQuantizationInsertionPoint): - raise nncf.InternalError(f"Unexpected type of insertion point: {type(qp.insertion_point)}") + msg = f"Unexpected type of insertion point: {type(qp.insertion_point)}" + raise nncf.InternalError(msg) # Parameters half_range = False diff --git a/nncf/experimental/tensorflow/quantization/quantizers.py b/nncf/experimental/tensorflow/quantization/quantizers.py index d469f0d4b68..6cf6ccda8af 100644 --- a/nncf/experimental/tensorflow/quantization/quantizers.py +++ b/nncf/experimental/tensorflow/quantization/quantizers.py @@ -51,9 +51,8 @@ def create_variables(self, layer: tf.keras.layers.Layer) -> Dict[str, tf.Variabl :return: Quantizer variables. """ if self.per_channel and (self.input_shape is None or self.channel_axes is None): - raise ValueError( - "The `input_shape` and `channel_axes` arguments are required when using per-channel quantization." - ) + msg = "The `input_shape` and `channel_axes` arguments are required when using per-channel quantization." + raise ValueError(msg) prefix = self.name return self._create_variables(layer, self.input_shape, self.channel_axes, prefix) @@ -86,9 +85,8 @@ def create_variables(self, layer: tf.keras.layers.Layer) -> Dict[str, tf.Variabl :return: Quantizer variables. """ if self.per_channel and (self.input_shape is None or self.channel_axes is None): - raise ValueError( - "The `input_shape` and `channel_axes` arguments are required when using per-channel quantization." - ) + msg = "The `input_shape` and `channel_axes` arguments are required when using per-channel quantization." + raise ValueError(msg) prefix = self.name return self._create_variables(layer, self.input_shape, self.channel_axes, prefix) diff --git a/nncf/experimental/torch/fx/node_utils.py b/nncf/experimental/torch/fx/node_utils.py index 050dc84cfb1..16b5127e5f9 100644 --- a/nncf/experimental/torch/fx/node_utils.py +++ b/nncf/experimental/torch/fx/node_utils.py @@ -33,7 +33,8 @@ def get_graph_node_by_name(graph: torch.fx.Graph, name: str) -> torch.fx.Node: for node in graph.nodes: if node.name == name: return node - raise RuntimeError(f"Node with name {name} is not found") + msg = f"Node with name {name} is not found" + raise RuntimeError(msg) def get_tensor_constant_from_node(constant_node: torch.fx.Node, model: torch.fx.GraphModule) -> torch.nn.Parameter: @@ -47,12 +48,14 @@ def get_tensor_constant_from_node(constant_node: torch.fx.Node, model: torch.fx. if constant_node is None: return None if constant_node.op != "get_attr": - raise RuntimeError(f"Given node op == {constant_node.op}, but get_attr is expected.") + msg = f"Given node op == {constant_node.op}, but get_attr is expected." + raise RuntimeError(msg) target_atoms = constant_node.target.split(".") attr_itr = model for i, atom in enumerate(target_atoms): if not hasattr(attr_itr, atom): - raise RuntimeError(f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}") + msg = f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}" + raise RuntimeError(msg) attr_itr = getattr(attr_itr, atom) return attr_itr diff --git a/nncf/experimental/torch/fx/quantization/quantize_model.py b/nncf/experimental/torch/fx/quantization/quantize_model.py index 83f8efb5fe7..5b97bc42b73 100644 --- a/nncf/experimental/torch/fx/quantization/quantize_model.py +++ b/nncf/experimental/torch/fx/quantization/quantize_model.py @@ -66,9 +66,11 @@ def quantize_impl( " in case of errors or a poor model performance." ) if target_device == TargetDevice.CPU_SPR: - raise nncf.InternalError("target_device == CPU_SPR is not supported") + msg = "target_device == CPU_SPR is not supported" + raise nncf.InternalError(msg) if mode is not None: - raise ValueError(f"mode={mode} is not supported") + msg = f"mode={mode} is not supported" + raise ValueError(msg) original_graph_meta = model.meta diff --git a/nncf/experimental/torch/fx/quantization/quantize_pt2e.py b/nncf/experimental/torch/fx/quantization/quantize_pt2e.py index 0d9adc60ab9..683130786a7 100644 --- a/nncf/experimental/torch/fx/quantization/quantize_pt2e.py +++ b/nncf/experimental/torch/fx/quantization/quantize_pt2e.py @@ -81,7 +81,8 @@ def quantize_pt2e( nncf_logger.warning("This is an experimental feature and may change in the future without notice.") if subset_size < 1: - raise nncf.ValidationError("Subset size must be positive.") + msg = "Subset size must be positive." + raise nncf.ValidationError(msg) batch_size = calibration_dataset.get_batch_size() if batchwise_statistics is None: diff --git a/nncf/experimental/torch/fx/transformations.py b/nncf/experimental/torch/fx/transformations.py index 0121c535187..8bc4df052d8 100644 --- a/nncf/experimental/torch/fx/transformations.py +++ b/nncf/experimental/torch/fx/transformations.py @@ -200,9 +200,8 @@ def constant_update_fn( old_const = _get_node_by_input_port_id(node, input_port_id) if old_const.op != "get_attr": - raise nncf.InternalError( - f"Constant on input port {input_port_id} for {node} is expected," f" but node {old_const} is present." - ) + msg = f"Constant on input port {input_port_id} for {node} is expected, but node {old_const} is present." + raise nncf.InternalError(msg) node_name = updated_node_name if updated_node_name else old_const.name + "_updated_constant" # Update metadata of the new constant node. @@ -234,10 +233,11 @@ def qdq_insertion_transformation_builder( def qdq_insertion_transformation(model: torch.fx.GraphModule): if any(tp.target_type != TargetType.OPERATION_WITH_WEIGHTS for tp in target_points) and len(target_points) > 1: - raise nncf.InternalError( + msg = ( "Insertion of shared qdq pair for the weights is not supported." " Please use non shared qdq pairs for the weights quantization." ) + raise nncf.InternalError(msg) for target_point in target_points: insert_one_qdq(model, target_point, quantizer) @@ -403,7 +403,8 @@ def insert_one_qdq(model: torch.fx.GraphModule, target_point: PTTargetPoint, qua target_node.replace_input_with(input_node, dq_node) else: - raise nncf.InternalError(f"Unexpected target type: {target_point.target_type}") + msg = f"Unexpected target type: {target_point.target_type}" + raise nncf.InternalError(msg) def _insert_call_module( @@ -439,7 +440,8 @@ def get_input_node(target_point: PTTargetPoint, target_node: torch.fx.Node) -> t TargetType.OPERATOR_POST_HOOK, TargetType.OPERATION_WITH_WEIGHTS, ]: - raise nncf.InternalError(f"Unexpected target type: {target_type}") + msg = f"Unexpected target type: {target_type}" + raise nncf.InternalError(msg) if target_type == TargetType.OPERATOR_POST_HOOK: return target_node @@ -474,7 +476,8 @@ def get_ctx_manager(graph: torch.fx.Graph, target_point: PTTargetPoint) -> Calla TargetType.OPERATOR_POST_HOOK, TargetType.OPERATION_WITH_WEIGHTS, ]: - raise nncf.InternalError(f"Unexpected target type: {target_point.target_type}") + msg = f"Unexpected target type: {target_point.target_type}" + raise nncf.InternalError(msg) if target_point.target_type == TargetType.OPERATOR_POST_HOOK: return graph.inserting_after diff --git a/nncf/experimental/torch/nas/bootstrapNAS/elasticity/elastic_kernel.py b/nncf/experimental/torch/nas/bootstrapNAS/elasticity/elastic_kernel.py index 5e357fbe4d4..98028fd3768 100644 --- a/nncf/experimental/torch/nas/bootstrapNAS/elasticity/elastic_kernel.py +++ b/nncf/experimental/torch/nas/bootstrapNAS/elasticity/elastic_kernel.py @@ -129,11 +129,11 @@ def set_active_kernel_size(self, kernel_size: KernelSizeType) -> None: :param kernel_size: kernel size value """ if kernel_size is None or kernel_size > self.max_kernel_size or kernel_size < 1: - raise AttributeError( - "Invalid kernel size={} in scope={}.\nIt should be within the range: [1, {}]".format( - kernel_size, self.node_name, self.max_kernel_size - ) + msg = ( + f"Invalid kernel size={kernel_size} in scope={self.node_name}.\n" + f"It should be within the range: [1, {self.max_kernel_size}]" ) + raise AttributeError(msg) self._active_kernel_size = kernel_size @@ -195,9 +195,9 @@ def __init__( for i in range(len(self._ks_set) - 1): ks_small = self._ks_set[i] ks_larger = self._ks_set[i + 1] - param_name = "%dto%d" % (ks_larger, ks_small) + param_name = f"{ks_larger}to{ks_small}" # noinspection PyArgumentList - scale_params["%s_matrix" % param_name] = Parameter(torch.eye(ks_small**2)) + scale_params[f"{param_name}_matrix"] = Parameter(torch.eye(ks_small**2)) for name, param in scale_params.items(): self.register_parameter(name, param) @@ -255,7 +255,8 @@ def set_active_kernel_size(self, kernel_size: KernelSizeType) -> None: nncf_logger.debug(f"set active elastic_kernel={kernel_size} in scope={self.node_name}") assert kernel_size % 2 > 0, "kernel size should be odd number" if kernel_size not in self.kernel_size_list and kernel_size != self.max_kernel_size: - raise ValueError("invalid kernel size to set. Should be a number in {}".format(self.kernel_size_list)) + msg = f"invalid kernel size to set. Should be a number in {self.kernel_size_list}" + raise ValueError(msg) super().set_active_kernel_size(kernel_size) @staticmethod @@ -284,7 +285,7 @@ def _get_active_filter(self, kernel_size, weight): _input_filter = _input_filter.view(-1, _input_filter.size(2)) _input_filter = F.linear( _input_filter, - self.__getattr__("%dto%d_matrix" % (src_ks, target_ks)), + self.__getattr__(f"{src_ks}to{target_ks}_matrix"), ) _input_filter = _input_filter.view(filters.size(0), filters.size(1), target_ks**2) _input_filter = _input_filter.view(filters.size(0), filters.size(1), target_ks, target_ks) diff --git a/nncf/experimental/torch/nas/bootstrapNAS/elasticity/elastic_width.py b/nncf/experimental/torch/nas/bootstrapNAS/elasticity/elastic_width.py index 0a0e6376cc8..de3a28ddc54 100644 --- a/nncf/experimental/torch/nas/bootstrapNAS/elasticity/elastic_width.py +++ b/nncf/experimental/torch/nas/bootstrapNAS/elasticity/elastic_width.py @@ -128,11 +128,11 @@ def set_active_width(self, width: WidthType) -> None: :param width: number of channels """ if width is None or width > self._max_width or width < 1: - raise AttributeError( - "Invalid width={} in scope={}.\nIt should be within the range: [1, {}]".format( - width, self._node_name, self._max_width - ) + msg = ( + f"Invalid width={width} in scope={self._node_name}.\n" + f"It should be within the range: [1, {self._max_width}]" ) + raise AttributeError(msg) self._active_width = width @@ -282,11 +282,11 @@ def __init__( if fixed_width_list: fixed_width_list.sort(reverse=True) if fixed_width_list[0] > max_width: - raise nncf.InternalError( - f"Width list for {node_name} contains invalid values: {fixed_width_list}, {max_width}" - ) + msg = f"Width list for {node_name} contains invalid values: {fixed_width_list}, {max_width}" + raise nncf.InternalError(msg) if fixed_width_list[0] != max_width: - raise nncf.ValidationError(f"Max width for {node_name} is not aligned with pre-trained model") + msg = f"Max width for {node_name} is not aligned with pre-trained model" + raise nncf.ValidationError(msg) self._width_list = fixed_width_list else: self._width_list = self._generate_width_list(self._max_width, params) @@ -315,10 +315,11 @@ def set_active_width(self, width: int) -> None: :param width: number of output channels """ if width not in self.width_list and width != self.max_width: - raise ValueError( + msg = ( f"Invalid number of output channels to set: {width} in scope={self._node_name}. " f"Should be a number in {self.width_list}" ) + raise ValueError(msg) super().set_active_width(width) @staticmethod @@ -360,7 +361,8 @@ def _generate_width_list(max_width: int, params: ElasticWidthParams) -> List[int if p.max_num_widths == len(width_list): break if 0 >= multiplier > 1: - raise nncf.InternalError(f"Wrong value for multiplier: {multiplier}") + msg = f"Wrong value for multiplier: {multiplier}" + raise nncf.InternalError(msg) w = int(max_width * multiplier) w = w - (w % ALIGNMENT_CONSTANT_FOR_MULTIPLIERS) w = max(w, p.min_width) @@ -579,7 +581,8 @@ def width_num_params_indicator(self): @width_num_params_indicator.setter def width_num_params_indicator(self, width_num_params_indicator): if width_num_params_indicator == 0 or width_num_params_indicator < -1: - raise nncf.InternalError(f"Invalid width indicator: {width_num_params_indicator}") + msg = f"Invalid width indicator: {width_num_params_indicator}" + raise nncf.InternalError(msg) self._width_num_params_indicator = width_num_params_indicator @property @@ -909,7 +912,8 @@ def _collect_ops_data_by_selection_rule(self, selection_rule: Callable) -> Dict[ for cluster in self._pruned_module_groups_info.get_all_clusters(): all_max_out_channels = {el.elastic_op.max_width for el in cluster.elements} if len(all_max_out_channels) != 1: - raise nncf.InternalError("Invalid grouping of layers with different number of output channels") + msg = "Invalid grouping of layers with different number of output channels" + raise nncf.InternalError(msg) first_elastic_width_info = next(iter(cluster.elements)) op = first_elastic_width_info.elastic_op @@ -1040,7 +1044,8 @@ def build(self, target_model: NNCFNetwork) -> ElasticWidthHandler: list_of_node_ids.append(node.node_id) layer_attrs = node.layer_attributes if metatype not in metatype_vs_elastic_op_creator: - raise nncf.InternalError(f"Elastic width is not supported for {metatype}") + msg = f"Elastic width is not supported for {metatype}" + raise nncf.InternalError(msg) elastic_op_creator = metatype_vs_elastic_op_creator[metatype] elastic_width_operation = elastic_op_creator( @@ -1130,7 +1135,8 @@ def load_state(self, state: Dict[str, Any]) -> None: self._overwrite_groups_widths = params_from_state[self._state_names.OVERWRITE_GROUP_WIDTHS] self._overwriting_pruning_groups = True if len(self._grouped_node_names_to_prune) != len(self._overwrite_groups_widths): - raise nncf.InternalError("Mismatch between number of groups for pruning and their corresponding widths") + msg = "Mismatch between number of groups for pruning and their corresponding widths" + raise nncf.InternalError(msg) if params_from_state.get(self._state_names.ADD_DYNAMIC_INPUTS, None) is not None: self._add_dynamic_inputs = params_from_state[self._state_names.ADD_DYNAMIC_INPUTS] diff --git a/nncf/experimental/torch/nas/bootstrapNAS/search/search.py b/nncf/experimental/torch/nas/bootstrapNAS/search/search.py index 0c78d9a0349..69837029b2a 100644 --- a/nncf/experimental/torch/nas/bootstrapNAS/search/search.py +++ b/nncf/experimental/torch/nas/bootstrapNAS/search/search.py @@ -102,7 +102,8 @@ def __init__( self.num_constraints = num_constraints self.population = population if population > num_evals: - raise ValueError("Population size must not be greater than number of evaluations.") + msg = "Population size must not be greater than number of evaluations." + raise ValueError(msg) self.num_evals = num_evals // population * population self.seed = seed self.crossover_prob = crossover_prob @@ -249,14 +250,16 @@ def from_config(cls, model, elasticity_ctrl, nncf_config): algo_name = search_config.get("algorithm") algo_cls = SEARCH_ALGORITHMS.get(algo_name) if not algo_name: - raise NotImplementedError(f"Evolutionary Search Algorithm {algo_name} not implemented") + msg = f"Evolutionary Search Algorithm {algo_name} not implemented" + raise NotImplementedError(msg) return algo_cls(model, elasticity_ctrl, nncf_config) @classmethod def from_checkpoint( cls, model: NNCFNetwork, elasticity_ctrl: ElasticityController, bn_adapt_args, resuming_checkpoint_path: str ) -> "BaseSearchAlgorithm": - raise NotImplementedError("Evolutionary Search Algorithm from checkpoint not implemented") + msg = "Evolutionary Search Algorithm from checkpoint not implemented" + raise NotImplementedError(msg) @property def search_records(self): @@ -334,7 +337,8 @@ def __init__( self._num_vars, self._vars_upper = self._elasticity_ctrl.multi_elasticity_handler.get_design_vars_info() if self._num_vars == 0 or self._vars_lower is None: - raise nncf.InternalError("Search space is empty") + msg = "Search space is empty" + raise nncf.InternalError(msg) self._result = None bn_adapt_params = search_config.get("batchnorm_adaptation", {}) @@ -354,7 +358,8 @@ def evaluator_handlers(self) -> List[BaseEvaluatorHandler]: """ if self._evaluator_handlers: return self._evaluator_handlers - raise nncf.ValidationError("Evaluator handlers haven't been defined") + msg = "Evaluator handlers haven't been defined" + raise nncf.ValidationError(msg) @property def acc_delta(self) -> float: diff --git a/nncf/experimental/torch/nas/bootstrapNAS/training/lr_scheduler.py b/nncf/experimental/torch/nas/bootstrapNAS/training/lr_scheduler.py index 79923d73ac0..e9c70251f3f 100644 --- a/nncf/experimental/torch/nas/bootstrapNAS/training/lr_scheduler.py +++ b/nncf/experimental/torch/nas/bootstrapNAS/training/lr_scheduler.py @@ -65,7 +65,8 @@ def calc_learning_rate( elif lr_schedule_type is None: lr = init_lr else: - raise ValueError("do not support: %s" % lr_schedule_type) + msg = f"do not support: {lr_schedule_type}" + raise ValueError(msg) return lr diff --git a/nncf/experimental/torch/nas/bootstrapNAS/training/progressive_shrinking_builder.py b/nncf/experimental/torch/nas/bootstrapNAS/training/progressive_shrinking_builder.py index 115f5628779..3cc72c9b291 100644 --- a/nncf/experimental/torch/nas/bootstrapNAS/training/progressive_shrinking_builder.py +++ b/nncf/experimental/torch/nas/bootstrapNAS/training/progressive_shrinking_builder.py @@ -71,12 +71,13 @@ def check_elasticity_dims_consistency( """ for dim in available_elasticity_dims: if dim not in progressivity_of_elasticity: - raise ValueError( + msg = ( f"Invalid elasticity dimension {dim} specified as available in `elasticity` section." f" This dimension is not part of the progressivity_of_elasticity=" f"{progressivity_of_elasticity} which defines order of adding elasticity dimension" f" by going from one training stage to another." ) + raise ValueError(msg) def initialize(self, model: NNCFNetwork) -> None: """ diff --git a/nncf/experimental/torch/nas/bootstrapNAS/training/scheduler.py b/nncf/experimental/torch/nas/bootstrapNAS/training/scheduler.py index 233f261cc1f..843df7e2d37 100644 --- a/nncf/experimental/torch/nas/bootstrapNAS/training/scheduler.py +++ b/nncf/experimental/torch/nas/bootstrapNAS/training/scheduler.py @@ -241,13 +241,14 @@ def _validate_elasticity_dims( stages_covered = [] for train_dim in desc.train_dims: if train_dim not in available_elasticity_dims: - raise ValueError( + msg = ( f"Invalid training elasticity dimension {train_dim} in the scheduler.\n" f"The elasticity for this dimension is not enabled.\n" f"It can be enabled by specifying `available_elasticity_dims` param in the `elasticity` " f"section of config.\n" f"List of currently available dimensions: {[dim.value for dim in available_elasticity_dims]}" ) + raise ValueError(msg) dim_idx = progressivity_of_elasticity.index(train_dim) if dim_idx not in stages_covered: stages_covered.append(dim_idx) @@ -256,15 +257,15 @@ def _validate_elasticity_dims( if dim_idx < low_priority_dim_idx: low_priority_dim_idx = dim_idx if high_priority_dim_idx < last_stage or low_priority_dim_idx > first_stage: - raise ValueError( - f"stage {progressivity_of_elasticity[high_priority_dim_idx]} violates progressivity of elasticity" - ) + msg = f"stage {progressivity_of_elasticity[high_priority_dim_idx]} violates progressivity of elasticity" + raise ValueError(msg) for i in range(low_priority_dim_idx, high_priority_dim_idx): if i not in stages_covered and progressivity_of_elasticity[i] in available_elasticity_dims: - raise ValueError( + msg = ( f"Missed to call {progressivity_of_elasticity[i]} in {desc.train_dims} which violates " f"progressivity of elasticity {progressivity_of_elasticity}" ) + raise ValueError(msg) last_stage = high_priority_dim_idx first_stage = low_priority_dim_idx @@ -272,9 +273,8 @@ def _validate_lr(self): for desc in self._list_stage_descriptors: # Check if global learning rate has been set if desc.init_lr is not None and bool(self._training_ctrl.lr_schedule_config): - raise ValueError( - f"Global learning rate scheduler is in use. Cannot set stage learning rate: {desc.init_lr}" - ) + msg = f"Global learning rate scheduler is in use. Cannot set stage learning rate: {desc.init_lr}" + raise ValueError(msg) # Check if stage learning rate has been set if desc.init_lr is None and not bool(self._training_ctrl.lr_schedule_config): nncf_logger.warning( diff --git a/nncf/experimental/torch/nas/bootstrapNAS/training/training_algorithm.py b/nncf/experimental/torch/nas/bootstrapNAS/training/training_algorithm.py index 977f60c6cce..ff72bc194d6 100644 --- a/nncf/experimental/torch/nas/bootstrapNAS/training/training_algorithm.py +++ b/nncf/experimental/torch/nas/bootstrapNAS/training/training_algorithm.py @@ -253,7 +253,8 @@ def from_checkpoint( :return: the training algorithm """ if not Path(resuming_checkpoint_path).is_file(): - raise FileNotFoundError("no checkpoint found at '{}'".format(resuming_checkpoint_path)) + msg = f"no checkpoint found at '{resuming_checkpoint_path}'" + raise FileNotFoundError(msg) nncf_logger.info(f"=> loading checkpoint '{resuming_checkpoint_path}'") checkpoint = torch.load(resuming_checkpoint_path, map_location="cpu") diff --git a/nncf/experimental/torch/search_building_blocks/search_blocks.py b/nncf/experimental/torch/search_building_blocks/search_blocks.py index edb91aaecbc..e21c8496498 100644 --- a/nncf/experimental/torch/search_building_blocks/search_blocks.py +++ b/nncf/experimental/torch/search_building_blocks/search_blocks.py @@ -78,7 +78,7 @@ def __repr__(self) -> str: return str(self) def __str__(self) -> str: - return "[START NODE: {}, END_NODE: {}]".format(self.start_node_name, self.end_node_name) + return f"[START NODE: {self.start_node_name}, END_NODE: {self.end_node_name}]" def get_state(self) -> Dict[str, Any]: """ @@ -372,10 +372,11 @@ def get_building_blocks( does not lead to duplicate activation layers """ if min_block_size > max_block_size: - raise AttributeError( + msg = ( f"Minimal value for block size {min_block_size} can not be more than maximum one " f"{max_block_size}. Change max_block_size or min_block_size." ) + raise AttributeError(msg) orig_graph = compressed_model.nncf.get_original_graph() # PTNNCFGraph blocks = get_potential_building_blocks(orig_graph, hw_fused_ops, min_block_size, max_block_size) sorted_blocks = sorted(blocks, key=cmp_to_key(compare_for_building_block)) diff --git a/nncf/experimental/torch/sparsify_activations/sparsify_activations_impl.py b/nncf/experimental/torch/sparsify_activations/sparsify_activations_impl.py index 01050652a08..56c81a041e8 100644 --- a/nncf/experimental/torch/sparsify_activations/sparsify_activations_impl.py +++ b/nncf/experimental/torch/sparsify_activations/sparsify_activations_impl.py @@ -170,9 +170,8 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = PTSparsifyActivationsAlgoBackend() else: - raise nncf.UnsupportedBackendError( - f"{model_backend.value} backend is not supported for `sparsify_activations`." - ) + msg = f"{model_backend.value} backend is not supported for `sparsify_activations`." + raise nncf.UnsupportedBackendError(msg) def _get_target_sparsity_by_node(self, graph: NNCFGraph) -> Dict[NNCFNode, float]: """ @@ -195,12 +194,12 @@ def _get_target_sparsity_by_node(self, graph: NNCFGraph) -> Dict[NNCFNode, float ): continue if node in target_sparsity_by_node: - raise nncf.ValidationError( - f'"{node.node_name}" is matched by multiple items in `target_sparsity_by_scope`.' - ) + msg = f'"{node.node_name}" is matched by multiple items in `target_sparsity_by_scope`.' + raise nncf.ValidationError(msg) target_sparsity_by_node[node] = target_sparsity if not target_sparsity_by_node: - raise nncf.ValidationError("No layers to conduct activation sparsification.") + msg = "No layers to conduct activation sparsification." + raise nncf.ValidationError(msg) return target_sparsity_by_node @@ -243,7 +242,8 @@ def sparsify_activations( for scope, target_sparsity in target_sparsity_by_scope.items(): if target_sparsity < 0.0 or target_sparsity > 1.0: - raise ValueError(f'Target sparsity for scope "{scope}" should be in range [0, 1].') + msg = f'Target sparsity for scope "{scope}" should be in range [0, 1].' + raise ValueError(msg) if ignored_scope is None: ignored_scope = IgnoredScope() diff --git a/nncf/experimental/torch/sparsify_activations/torch_backend.py b/nncf/experimental/torch/sparsify_activations/torch_backend.py index 93f73fd3eb4..5ab8e86f708 100644 --- a/nncf/experimental/torch/sparsify_activations/torch_backend.py +++ b/nncf/experimental/torch/sparsify_activations/torch_backend.py @@ -49,7 +49,8 @@ def __init__(self, target_sparsity: float, alpha: float = 0.2): super().__init__() self.target_sparsity = target_sparsity if alpha <= 0.0 or alpha >= 1.0: - raise ValueError("The decay factor `alpha` should be in range (0, 1).") + msg = "The decay factor `alpha` should be in range (0, 1)." + raise ValueError(msg) self.alpha = alpha self.register_buffer("running_threshold", torch.tensor(float("-inf"))) self.register_buffer("num_batches_tracked", torch.tensor(0)) @@ -195,5 +196,6 @@ def _get_activation_port_id(node: NNCFNode, graph: NNCFGraph) -> int: continue activation_ports.append(edge.input_port_id) if len(activation_ports) != 1: - raise nncf.InternalError(f'Cannot find activation port for node "{node}".') + msg = f'Cannot find activation port for node "{node}".' + raise nncf.InternalError(msg) return activation_ports[0] diff --git a/nncf/experimental/torch/sparsity/movement/algo.py b/nncf/experimental/torch/sparsity/movement/algo.py index 8c59362021b..94a1d29944e 100644 --- a/nncf/experimental/torch/sparsity/movement/algo.py +++ b/nncf/experimental/torch/sparsity/movement/algo.py @@ -70,7 +70,8 @@ def create_weight_sparsifying_operation( sparse_cfg = configs_per_scopes.sparse_config matched_scopes.append(target_scopes) if len(matched_scopes) >= 2: - raise nncf.InternalError(f'"{node_name}" is matched by multiple items in `sparse_structure_by_scopes`.') + msg = f'"{node_name}" is matched by multiple items in `sparse_structure_by_scopes`.' + raise nncf.InternalError(msg) return MovementSparsifier( target_module_node, @@ -110,7 +111,8 @@ def _sparsify_weights(self, target_model: NNCFNetwork) -> List[PTInsertionComman self._sparsified_module_info.append(SparseModuleInfo(node_name, sparsified_module, sparsifying_operation)) if not insertion_commands: - raise nncf.InternalError("No sparsifiable layer found for movement sparsity algorithm.") + msg = "No sparsifiable layer found for movement sparsity algorithm." + raise nncf.InternalError(msg) return insertion_commands def _build_controller(self, model: NNCFNetwork) -> PTCompressionAlgorithmController: @@ -150,10 +152,11 @@ def __init__(self, target_model: NNCFNetwork, sparsified_module_info: List[Spars if self._scheduler.enable_structured_masking: if not is_supported_model_family(self.model): - raise nncf.UnsupportedModelError( + msg = ( "You set `enable_structured_masking=True`, but no supported model is detected. " f"Supported model families: {MODEL_FAMILIES}." ) + raise nncf.UnsupportedModelError(msg) self._structured_mask_handler = StructuredMaskHandler(self.model, self.sparsified_module_info) @property @@ -191,10 +194,11 @@ def compression_stage(self) -> CompressionStage: def distributed(self): if not dist.is_initialized(): - raise KeyError( + msg = ( "Could not set distributed mode for the compression algorithm " "because the default process group has not been initialized." ) + raise KeyError(msg) if next(self._model.parameters()).is_cuda: state = torch.cuda.get_rng_state() diff --git a/nncf/experimental/torch/sparsity/movement/layers.py b/nncf/experimental/torch/sparsity/movement/layers.py index 8d63e9d03a2..23702382058 100644 --- a/nncf/experimental/torch/sparsity/movement/layers.py +++ b/nncf/experimental/torch/sparsity/movement/layers.py @@ -56,36 +56,35 @@ def __init__( (isinstance(sparse_factors, (tuple, list)) and tuple(sparse_factors) == (1, 1)) or sparse_factors is None ): - raise ValueError( - f"{error_prefix} Fine sparse structure expects `sparse_factors` to be [1, 1] or unspecified." - ) + msg = f"{error_prefix} Fine sparse structure expects `sparse_factors` to be [1, 1] or unspecified." + raise ValueError(msg) if sparse_axis is not None: - raise ValueError(f"{error_prefix} Fine sparse structure does not expect specified `axis`.") + msg = f"{error_prefix} Fine sparse structure does not expect specified `axis`." + raise ValueError(msg) self.sparse_factors = (1, 1) if self.mode == SparseStructure.BLOCK: if sparse_factors is None: - raise ValueError( - f"{error_prefix} Missing `sparse_factors`. Block sparsity structure expects it specified." - ) + msg = f"{error_prefix} Missing `sparse_factors`. Block sparsity structure expects it specified." + raise ValueError(msg) if not (isinstance(sparse_factors, (tuple, list)) and len(sparse_factors) == 2): - raise ValueError( + msg = ( f"{error_prefix} Invalid format of `sparse_factors. " "Block sparsity structure expects tuple of two numbers." ) + raise ValueError(msg) if sparse_axis is not None: - raise ValueError(f"{error_prefix} Block sparse structure does not expect specified `axis`.") + msg = f"{error_prefix} Block sparse structure does not expect specified `axis`." + raise ValueError(msg) self.sparse_factors = tuple(sparse_factors) if self.mode == SparseStructure.PER_DIM: if sparse_axis is None: - raise ValueError( - f"{error_prefix} Missing `axis`. Per-dim sparsity structure expects it to be specified." - ) + msg = f"{error_prefix} Missing `axis`. Per-dim sparsity structure expects it to be specified." + raise ValueError(msg) if sparse_factors is not None: - raise ValueError( - f"{error_prefix} Per-dim sparsity structure does not expect specified `sparse_factors`." - ) + msg = f"{error_prefix} Per-dim sparsity structure does not expect specified `sparse_factors`." + raise ValueError(msg) self.sparse_axis = int(sparse_axis) @classmethod @@ -131,7 +130,8 @@ def from_config(cls, config: Dict[str, Any]) -> "SparseConfigByScope": error_prefix = f"Invalid sparse structure by scopes {config}." target_scopes = config.get("target_scopes") if not target_scopes: - raise ValueError(f"{error_prefix} Missing `target_scopes`.") + msg = f"{error_prefix} Missing `target_scopes`." + raise ValueError(msg) sparse_config = SparseConfig.from_config(config) return cls(sparse_config, target_scopes) @@ -240,7 +240,8 @@ def get_importance(self, is_bias: bool = False, expanded: bool = True) -> torch. :param expanded: Whether should expand the importance to the same shape as module weight or bias. """ if is_bias and (not self.prune_bias): - raise ValueError("The layer to sparsify does not contain bias.") + msg = "The layer to sparsify does not contain bias." + raise ValueError(msg) importance = self.bias_importance if is_bias else self.weight_importance if (not expanded) or self.sparse_factors == [1, 1]: return importance @@ -305,7 +306,8 @@ def _get_weight_importance_shape( score_shape.append(dim // factor) return tuple(score_shape) - raise nncf.InternalError("Unknown sparse structure.") + msg = "Unknown sparse structure." + raise nncf.InternalError(msg) @staticmethod def _get_sparse_factors(weight_shape: List[int], sparse_config: SparseConfig) -> Tuple[int, int]: @@ -317,9 +319,8 @@ def _get_sparse_factors(weight_shape: List[int], sparse_config: SparseConfig) -> if sparse_config.mode == SparseStructure.PER_DIM: if sparse_config.sparse_axis < 0 or sparse_config.sparse_axis >= len(weight_shape): - raise ValueError( - "Invalid axis id {}, axes range is [0, {}]".format(sparse_config.sparse_axis, len(weight_shape)) - ) + msg = f"Invalid axis id {sparse_config.sparse_axis}, axes range is [0, {len(weight_shape)}]" + raise ValueError(msg) sparse_factors = deepcopy(weight_shape) sparse_factors[sparse_config.sparse_axis] = 1 sparse_factors = tuple(sparse_factors) diff --git a/nncf/experimental/torch/sparsity/movement/scheduler.py b/nncf/experimental/torch/sparsity/movement/scheduler.py index 8c3c1133525..8480ea629ba 100644 --- a/nncf/experimental/torch/sparsity/movement/scheduler.py +++ b/nncf/experimental/torch/sparsity/movement/scheduler.py @@ -62,17 +62,20 @@ def __init__( """ if steps_per_epoch is None and warmup_start_epoch < 1: - raise ValueError( + msg = ( "`warmup_start_epoch` must be >= 1 to enable the auto calculation of " "`steps_per_epoch`. Please either change `warmup_start_epoch` to a larger " "number or specify `steps_per_epoch` in the config." ) + raise ValueError(msg) if warmup_start_epoch < 0 or warmup_end_epoch <= warmup_start_epoch: - raise ValueError("Movement sparsity requires 0 <= warmup_start_epoch < warmup_end_epoch.") + msg = "Movement sparsity requires 0 <= warmup_start_epoch < warmup_end_epoch." + raise ValueError(msg) if importance_regularization_factor < 0: - raise ValueError("`importance_regularization_factor` should not be a negative number.") + msg = "`importance_regularization_factor` should not be a negative number." + raise ValueError(msg) if init_importance_threshold is not None and init_importance_threshold >= final_importance_threshold: nncf_logger.warning( @@ -109,10 +112,11 @@ def from_dict(cls, params: Dict[str, Any]) -> "MovementSchedulerParams": steps_per_epoch: Optional[int] = params.get("steps_per_epoch") if None in [warmup_start_epoch, warmup_end_epoch, importance_regularization_factor]: - raise ValueError( + msg = ( "`warmup_start_epoch`, `warmup_start_epoch` and `importance_regularization_factor` " "are required in config for Movement Sparsity." ) + raise ValueError(msg) return cls( warmup_start_epoch=warmup_start_epoch, @@ -320,10 +324,11 @@ def _maybe_should_skip(self) -> None: and self._steps_in_current_epoch > 0 and self._steps_per_epoch != self._steps_in_current_epoch ): - raise Exception( + msg = ( "Actual steps per epoch and steps per epoch from the scheduler " "parameters are different. Scheduling may be incorrect." ) + raise Exception(msg) if self._steps_per_epoch is None: self._should_skip = True diff --git a/nncf/experimental/torch/sparsity/movement/structured_mask_handler.py b/nncf/experimental/torch/sparsity/movement/structured_mask_handler.py index 65705c53f3e..a9b7fc7e50e 100644 --- a/nncf/experimental/torch/sparsity/movement/structured_mask_handler.py +++ b/nncf/experimental/torch/sparsity/movement/structured_mask_handler.py @@ -119,7 +119,8 @@ def independent_structured_mask(self) -> Optional[torch.Tensor]: @torch.no_grad() def independent_structured_mask(self, tensor: torch.Tensor): if self.structured_mask_shape != tensor.shape: - raise ValueError("Wrong shape about independent structured mask.") + msg = "Wrong shape about independent structured mask." + raise ValueError(msg) if self._independent_structured_mask is None: self._independent_structured_mask = tensor.clone() else: @@ -138,7 +139,8 @@ def dependent_structured_mask(self) -> Optional[torch.Tensor]: @torch.no_grad() def dependent_structured_mask(self, tensor: torch.Tensor): if self.structured_mask_shape != tensor.shape: - raise ValueError("Wrong shape about dependent structured mask.") + msg = "Wrong shape about dependent structured mask." + raise ValueError(msg) if self._dependent_structured_mask is None: self._dependent_structured_mask = tensor.clone() else: diff --git a/nncf/experimental/torch2/function_hook/graph/graph_visualization.py b/nncf/experimental/torch2/function_hook/graph/graph_visualization.py index 44f47a9bcd5..96ada0022d4 100644 --- a/nncf/experimental/torch2/function_hook/graph/graph_visualization.py +++ b/nncf/experimental/torch2/function_hook/graph/graph_visualization.py @@ -120,7 +120,8 @@ def get_label_from_node_data(node_data: Dict[str, Any], style: PydotStyleTemplat return f"{meta.name_in_model}" if isinstance(meta, FunctionMeta): return f"{meta.op_name}" - raise ValueError(f"Unknown meta node {type(meta)}") + msg = f"Unknown meta node {type(meta)}" + raise ValueError(msg) def get_label_from_edge_data(node_data: Dict[str, Any], style: PydotStyleTemplate) -> str: @@ -201,7 +202,8 @@ def get_style(node: Dict[str, Any], style: PydotStyleTemplate) -> Dict[str, str] "style": '"filled,rounded"', } - raise ValueError(f"Unknown meta node {type(meta)}") + msg = f"Unknown meta node {type(meta)}" + raise ValueError(msg) def to_pydot(nx_graph: nx.MultiDiGraph, style_template: PydotStyleTemplate = PydotStyleTemplate.full) -> pydot.Graph: diff --git a/nncf/experimental/torch2/function_hook/nncf_graph/nncf_graph_builder.py b/nncf/experimental/torch2/function_hook/nncf_graph/nncf_graph_builder.py index edbb71d32c7..b298ad5a7b0 100644 --- a/nncf/experimental/torch2/function_hook/nncf_graph/nncf_graph_builder.py +++ b/nncf/experimental/torch2/function_hook/nncf_graph/nncf_graph_builder.py @@ -48,7 +48,8 @@ def get_node_type(type: NodeType, meta: Union[ConstMeta, FunctionMeta, InOutMeta return "nncf_model_const" if isinstance(meta, FunctionMeta): return meta.func_name - raise nncf.InternalError("Unexpected metadata type") + msg = "Unexpected metadata type" + raise nncf.InternalError(msg) def get_name_of_node(meta: Union[ConstMeta, FunctionMeta, InOutMeta]) -> str: @@ -64,7 +65,8 @@ def get_name_of_node(meta: Union[ConstMeta, FunctionMeta, InOutMeta]) -> str: return meta.op_name if isinstance(meta, InOutMeta): return meta.name - raise nncf.InternalError("Unexpected metadata type") + msg = "Unexpected metadata type" + raise nncf.InternalError(msg) def get_dtype(dtype: torch.dtype) -> Dtype: @@ -172,7 +174,8 @@ def convert_to_nncf_graph(nx_graph: nx.MultiDiGraph) -> PTNNCFGraph: for node, data in nx_graph.nodes(data=True): meta = data["meta"] if not isinstance(meta, (ConstMeta, FunctionMeta, InOutMeta)): - raise nncf.InternalError(f"Unknown metadata type: {type(meta)}") + msg = f"Unknown metadata type: {type(meta)}" + raise nncf.InternalError(msg) node_name = get_name_of_node(meta) node_type = get_node_type(data["type"], meta) meta_type = get_meta_type(node_type, meta) diff --git a/nncf/experimental/torch2/function_hook/wrapper.py b/nncf/experimental/torch2/function_hook/wrapper.py index 971a33a9a4a..c671b606d49 100644 --- a/nncf/experimental/torch2/function_hook/wrapper.py +++ b/nncf/experimental/torch2/function_hook/wrapper.py @@ -34,12 +34,14 @@ class ForwardWithHooks: def __new__(cls, orig_forward: Callable[..., Any]) -> ForwardWithHooks: if not callable(orig_forward): - raise TypeError("the first argument must be callable") + msg = "the first argument must be callable" + raise TypeError(msg) if isinstance(orig_forward, ForwardWithHooks): - raise TypeError("Func already wrapped") + msg = "Func already wrapped" + raise TypeError(msg) - self = super(ForwardWithHooks, cls).__new__(cls) + self = super().__new__(cls) self._func = orig_forward return self @@ -60,12 +62,15 @@ def __reduce__(self) -> Tuple[Callable[..., Any], Tuple[Any, ...], Tuple[Any, .. def __setstate__(self, state: Tuple[Any, Any]) -> None: if not isinstance(state, tuple): - raise TypeError("argument to __setstate__ must be a tuple") + msg = "argument to __setstate__ must be a tuple" + raise TypeError(msg) if len(state) != 2: - raise TypeError(f"expected 2 items in state, got {len(state)}") + msg = f"expected 2 items in state, got {len(state)}" + raise TypeError(msg) func, namespace = state if not callable(func) or (namespace is not None and not isinstance(namespace, dict)): - raise TypeError("invalid partial state") + msg = "invalid partial state" + raise TypeError(msg) if namespace is None: namespace = {} @@ -101,12 +106,14 @@ class ReplicateForDataParallel: def __new__(cls, func: Callable[..., Any]) -> ReplicateForDataParallel: if not callable(func): - raise TypeError("the first argument must be callable") + msg = "the first argument must be callable" + raise TypeError(msg) if isinstance(func, ReplicateForDataParallel): - raise TypeError("Func already wrapped") + msg = "Func already wrapped" + raise TypeError(msg) - self = super(ReplicateForDataParallel, cls).__new__(cls) + self = super().__new__(cls) self._func = func return self @@ -131,12 +138,15 @@ def __reduce__(self) -> Tuple[Callable[..., Any], Tuple[Any, ...], Tuple[Any, .. def __setstate__(self, state: Dict[str, Any]) -> None: if not isinstance(state, tuple): - raise TypeError("argument to __setstate__ must be a tuple") + msg = "argument to __setstate__ must be a tuple" + raise TypeError(msg) if len(state) != 2: - raise TypeError(f"expected 2 items in state, got {len(state)}") + msg = f"expected 2 items in state, got {len(state)}" + raise TypeError(msg) func, namespace = state if not callable(func) or (namespace is not None and not isinstance(namespace, dict)): - raise TypeError("invalid partial state") + msg = "invalid partial state" + raise TypeError(msg) if namespace is None: namespace = {} @@ -166,7 +176,8 @@ def wrap_model(model: nn.Module) -> nn.Module: """ if "forward" in model.__dict__: - raise nncf.InternalError("Wrapper does not supported models with overrided forward function") + msg = "Wrapper does not supported models with overrided forward function" + raise nncf.InternalError(msg) model.forward = ForwardWithHooks(model.forward) model._replicate_for_data_parallel = ReplicateForDataParallel(model._replicate_for_data_parallel) # type: ignore model.add_module(ATR_HOOK_STORAGE, HookStorage()) @@ -197,7 +208,8 @@ def get_hook_storage(model: nn.Module) -> HookStorage: """ storage = getattr(model, ATR_HOOK_STORAGE) if storage is None: - raise nncf.InstallationError("Hook storage is not exist in the model") + msg = "Hook storage is not exist in the model" + raise nncf.InstallationError(msg) return cast(HookStorage, getattr(model, ATR_HOOK_STORAGE)) diff --git a/nncf/experimental/torch2/model_transformer.py b/nncf/experimental/torch2/model_transformer.py index d0cca131b7c..6654ae6550e 100644 --- a/nncf/experimental/torch2/model_transformer.py +++ b/nncf/experimental/torch2/model_transformer.py @@ -54,7 +54,8 @@ def transform(self, transformation_layout: TransformationLayout) -> GraphModelWr for transformation in transformations: transformation_cls = transformation.__class__ if transformation_cls not in [x[0] for x in self._command_transformation_ordered_pairs]: - raise ValueError(f"Unsupported transformation: {transformation_cls}") + msg = f"Unsupported transformation: {transformation_cls}" + raise ValueError(msg) aggregated_transformations[transformation.__class__].append(transformation) model = self._model.model diff --git a/nncf/experimental/torch2/quantization/quantize_model.py b/nncf/experimental/torch2/quantization/quantize_model.py index 6c5990ec853..efa17fc11c9 100644 --- a/nncf/experimental/torch2/quantization/quantize_model.py +++ b/nncf/experimental/torch2/quantization/quantize_model.py @@ -45,11 +45,14 @@ def quantize_impl( Implementation of the `quantize()` method for the PyTorch backend. """ if fast_bias_correction is False: - raise ValueError(f"fast_bias_correction={fast_bias_correction} is not supported") + msg = f"fast_bias_correction={fast_bias_correction} is not supported" + raise ValueError(msg) if target_device == TargetDevice.CPU_SPR: - raise nncf.InternalError("target_device == CPU_SPR is not supported") + msg = "target_device == CPU_SPR is not supported" + raise nncf.InternalError(msg) if mode is not None: - raise ValueError(f"mode={mode} is not supported") + msg = f"mode={mode} is not supported" + raise ValueError(msg) copied_model = deepcopy(model) example_input = next(iter(calibration_dataset.get_inference_data())) diff --git a/nncf/experimental/torch2/statistics/aggregator.py b/nncf/experimental/torch2/statistics/aggregator.py index b85a513a9f2..9c1ff4c7d23 100644 --- a/nncf/experimental/torch2/statistics/aggregator.py +++ b/nncf/experimental/torch2/statistics/aggregator.py @@ -100,6 +100,7 @@ def _get_statistics_key(self, statistics: TensorStatistic, target_point: TargetP :return: Statistics key. """ if not isinstance(target_point, PTTargetPoint): - raise nncf.InternalError(f"Unexpected target point type: {type(target_point)}") + msg = f"Unexpected target point type: {type(target_point)}" + raise nncf.InternalError(msg) target_point_id = f"{target_point.target_node_name}_{target_point.type}_{target_point.input_port_id}" return f"{statistics.__class__.__name__}_{target_point_id}" diff --git a/nncf/onnx/graph/model_transformer.py b/nncf/onnx/graph/model_transformer.py index bcdd6c610b7..0243d911dfd 100644 --- a/nncf/onnx/graph/model_transformer.py +++ b/nncf/onnx/graph/model_transformer.py @@ -273,7 +273,8 @@ def _get_scale_zero_point_tensors( elif tensor_type == np.int8: onnx_tensor_type = onnx.TensorProto.INT8 else: - raise nncf.ValidationError(f"Incorrect tensor type - {tensor_type}.") + msg = f"Incorrect tensor type - {tensor_type}." + raise nncf.ValidationError(msg) assert quantizer.input[1] == dequantizer.input[1] and quantizer.input[2] == dequantizer.input[2] scale_tensor_name = quantizer.input[1] zero_point_tensor_name = quantizer.input[2] @@ -327,9 +328,8 @@ def _insert_quantizer_dequantizer( input_nodes = [] input_nodes.extend(children_node_mapping[target_edge_name]) if not input_nodes: - raise nncf.InternalError( - f"Can not add the quantizer to the {target_edge_name} edge. This edge does not have end node." - ) + msg = f"Can not add the quantizer to the {target_edge_name} edge. This edge does not have end node." + raise nncf.InternalError(msg) if transformation.target_point.type == TargetType.PRE_LAYER_OPERATION: # If we need to change only target nodes input @@ -431,7 +431,8 @@ def _apply_qdq_node_removing_transformations( was_processed[dequantize_node_proto.name] = True if not all(was_processed.values()): - raise RuntimeError("Invalid transformation commands.") + msg = "Invalid transformation commands." + raise RuntimeError(msg) initializers = {i.name: i for i in model.graph.initializer} value_infos = {i.name: i for i in model.graph.value_info} diff --git a/nncf/onnx/graph/nncf_graph_builder.py b/nncf/onnx/graph/nncf_graph_builder.py index 8a8719949e1..b23cdfd90b2 100644 --- a/nncf/onnx/graph/nncf_graph_builder.py +++ b/nncf/onnx/graph/nncf_graph_builder.py @@ -225,11 +225,12 @@ def _replace_empty_node_name(model: onnx.ModelProto) -> onnx.ModelProto: name_counter = Counter([node.name for node in model.graph.node]) if max(name_counter.values()) > 1: - raise nncf.ValidationError( + msg = ( f"Nodes {[(name, cnt) for name, cnt in name_counter.items() if cnt > 1]} " "(name, counts) occurred more than once. " "NNCF expects every node to have a unique name." ) + raise nncf.ValidationError(msg) return model diff --git a/nncf/onnx/graph/node_utils.py b/nncf/onnx/graph/node_utils.py index 53cd5e3f7d5..9aaab20b6ea 100644 --- a/nncf/onnx/graph/node_utils.py +++ b/nncf/onnx/graph/node_utils.py @@ -180,7 +180,8 @@ def _get_activation_tensor_shape( edges = nncf_graph.get_output_edges_by_port_id(node, target_point.port_id) shape = edges[0].tensor_shape else: - raise NotImplementedError(f"Unsupported target point type {target_point.type}.") + msg = f"Unsupported target point type {target_point.type}." + raise NotImplementedError(msg) if not shape: # ONNX model can not have a shape of a edge, even after shape inference. if target_point.type == TargetType.PRE_LAYER_OPERATION: nncf_logger.info( diff --git a/nncf/onnx/graph/onnx_helper.py b/nncf/onnx/graph/onnx_helper.py index be8499f13dd..27e94ff1fbc 100644 --- a/nncf/onnx/graph/onnx_helper.py +++ b/nncf/onnx/graph/onnx_helper.py @@ -97,7 +97,8 @@ def get_input_port_id_for_node_after_input(input_name: str, to_node: onnx.NodePr for input_port_id, port in enumerate(to_node.input): if port == input_name: return input_port_id - raise nncf.ValidationError(f"The node {to_node} does not have input edge with the name {input_name}") + msg = f"The node {to_node} does not have input edge with the name {input_name}" + raise nncf.ValidationError(msg) def get_output_port_id_for_node_before_output(output_name: str, from_node: onnx.NodeProto) -> int: @@ -111,7 +112,8 @@ def get_output_port_id_for_node_before_output(output_name: str, from_node: onnx. for output_port_id, port in enumerate(from_node.output): if port == output_name: return output_port_id - raise nncf.ValidationError(f"The node {from_node} does not have output edge with the name {output_name}") + msg = f"The node {from_node} does not have output edge with the name {output_name}" + raise nncf.ValidationError(msg) def get_node_index(model: onnx.ModelProto, node_name: str) -> Optional[int]: @@ -135,8 +137,7 @@ def _get_all_tensors(model: onnx.ModelProto) -> Iterator[onnx.TensorProto]: :param model: ONNX model. :yield: tensors of ONNX model. """ - for initializer in model.graph.initializer: - yield initializer + yield from model.graph.initializer for node in model.graph.node: for attribute in node.attribute: if attribute.HasField("t"): @@ -169,7 +170,8 @@ def get_tensor(model: onnx.ModelProto, tensor_name: str) -> onnx.TensorProto: for tensor in _get_all_tensors(model): if tensor.name == tensor_name: return tensor - raise nncf.ValidationError("There is no tensor with the name {}".format(tensor_name)) + msg = f"There is no tensor with the name {tensor_name}" + raise nncf.ValidationError(msg) def get_tensor_value(model: onnx.ModelProto, tensor_name: str) -> np.ndarray: diff --git a/nncf/onnx/quantization/quantize_model.py b/nncf/onnx/quantization/quantize_model.py index a1748a45f8d..6fe92206444 100644 --- a/nncf/onnx/quantization/quantize_model.py +++ b/nncf/onnx/quantization/quantize_model.py @@ -53,11 +53,14 @@ def quantize_impl( Implementation of the `quantize()` method for the ONNX backend. """ if target_device == TargetDevice.CPU_SPR: - raise nncf.ValidationError("target_device == CPU_SPR is not supported.") + msg = "target_device == CPU_SPR is not supported." + raise nncf.ValidationError(msg) if mode is not None: - raise ValueError(f"mode={mode} is not supported") + msg = f"mode={mode} is not supported" + raise ValueError(msg) if model.opset_import[0].version < 10: - raise nncf.ValidationError("ONNX models with opset version < 10 do not support quantization.") + msg = "ONNX models with opset version < 10 do not support quantization." + raise nncf.ValidationError(msg) if model.opset_import[0].version < 13: nncf_logger.warning( "ONNX models with 10 < opset version < 13 do not support per-channel quantization." diff --git a/nncf/onnx/quantization/quantizer_parameters.py b/nncf/onnx/quantization/quantizer_parameters.py index 39aad11064c..f7471c1b029 100644 --- a/nncf/onnx/quantization/quantizer_parameters.py +++ b/nncf/onnx/quantization/quantizer_parameters.py @@ -49,18 +49,19 @@ def convert_fq_params_to_onnx_params( :return: Quantizer layer attributes. """ if num_bits != 8: - raise ValueError("Can only export to INT8/UIN8 8 bits ONNX Quantize/Dequantize pairs.") + msg = "Can only export to INT8/UIN8 8 bits ONNX Quantize/Dequantize pairs." + raise ValueError(msg) levels = parameters.levels if levels not in [255, 256]: - raise ValueError("Can only export to INT8/UIN8 256-level ONNX Quantize/Dequantize pairs.") + msg = "Can only export to INT8/UIN8 256-level ONNX Quantize/Dequantize pairs." + raise ValueError(msg) input_low, input_high = parameters.input_low, parameters.input_high output_low, output_high = parameters.output_low, parameters.output_high if not fns.allclose(input_high, output_high) or not fns.allclose(input_low, output_low): - raise ValueError( - "ONNX Quantize/Dequantize pairs only support input_high == output_high and input_low == output_low." - ) + msg = "ONNX Quantize/Dequantize pairs only support input_high == output_high and input_low == output_low." + raise ValueError(msg) level_low, level_high = get_level_low_level_high(tensor_type) narrow_range = levels == 2**num_bits - 1 diff --git a/nncf/openvino/graph/metatypes/openvino_metatypes.py b/nncf/openvino/graph/metatypes/openvino_metatypes.py index 6c800db70a8..9c6909c7b30 100644 --- a/nncf/openvino/graph/metatypes/openvino_metatypes.py +++ b/nncf/openvino/graph/metatypes/openvino_metatypes.py @@ -48,7 +48,8 @@ def determine_subtype(cls, node: ov.Node) -> Optional[Type[OperatorMetatype]]: if subtype.matches(node): matches.append(subtype) if len(matches) > 1: - raise nncf.InternalError("Multiple subtypes match operator call - can not determine single subtype.") + msg = "Multiple subtypes match operator call - can not determine single subtype." + raise nncf.InternalError(msg) if not matches: return None return matches[0] diff --git a/nncf/openvino/graph/model_transformer.py b/nncf/openvino/graph/model_transformer.py index 8a7c30cace1..389c6529263 100644 --- a/nncf/openvino/graph/model_transformer.py +++ b/nncf/openvino/graph/model_transformer.py @@ -183,7 +183,8 @@ def _get_extra_model_outputs( output = node.input_value(port_id) extra_model_outputs.append((output, output.get_index(), output_dtype)) else: - raise NotImplementedError(f"Unsupported target point type {transformation.target_point.type}") + msg = f"Unsupported target point type {transformation.target_point.type}" + raise NotImplementedError(msg) return extra_model_outputs @@ -414,7 +415,8 @@ def _insert_fake_quantize_op( for inp_node in target_inputs: inp_node.replace_source_output(fq.output(0)) else: - raise nncf.InternalError(f"Incorrect target point type {transform_type}") + msg = f"Incorrect target point type {transform_type}" + raise nncf.InternalError(msg) @staticmethod def _insert_fake_convert_op( @@ -468,7 +470,8 @@ def _insert_fake_convert_op( for inp_node in target_inputs: inp_node.replace_source_output(fc.output(0)) else: - raise nncf.InternalError(f"Incorrect target point type {transform_type}") + msg = f"Incorrect target point type {transform_type}" + raise nncf.InternalError(msg) @staticmethod def _apply_bias_correction_transformations(model, transformations: List[OVBiasCorrectionCommand]) -> ov.Model: @@ -513,7 +516,8 @@ def _set_const_value(node_with_const: ov.Node, const_port_id: int, const_value: queue.append((curr_node.input(0), curr_node.input_value(0).get_node())) if const_node is None: - raise nncf.InternalError("Constant node was expected but could not find it.") + msg = "Constant node was expected but could not find it." + raise nncf.InternalError(msg) const_value = np.reshape(const_value, const_node.data.shape) @@ -668,7 +672,8 @@ def _apply_stateless_model_extraction_transformation( for target_in in op_output.get_target_inputs(): target_in.replace_source_output(op_input_values[0]) else: - raise RuntimeError("ReadValue has no initial value.") + msg = "ReadValue has no initial value." + raise RuntimeError(msg) return extracted_model @@ -713,7 +718,8 @@ def _insert_inplace_operation( output.get_node(), output.get_index(), transformation.last_inplace_node_name ) return (new_node.output(fn_output_port_id), fn_output_port_id, output_dtype) - raise nncf.InternalError(f"Transform type {transform_type} is not supported") + msg = f"Transform type {transform_type} is not supported" + raise nncf.InternalError(msg) @staticmethod def _apply_bias_insertion_transformations( diff --git a/nncf/openvino/graph/nncf_graph_builder.py b/nncf/openvino/graph/nncf_graph_builder.py index e9b3b3ee4a7..a4df03ccd7e 100644 --- a/nncf/openvino/graph/nncf_graph_builder.py +++ b/nncf/openvino/graph/nncf_graph_builder.py @@ -64,7 +64,8 @@ def convert_to_nncf_dtype(ov_type: ov.Type) -> Dtype: "string": "int", } if type_name not in conversion_map: - raise NotImplementedError(f"NNCF is not yet supported OpenVINO data type: {type_name}.") + msg = f"NNCF is not yet supported OpenVINO data type: {type_name}." + raise NotImplementedError(msg) return Dtype(conversion_map[type_name]) @staticmethod diff --git a/nncf/openvino/graph/node_utils.py b/nncf/openvino/graph/node_utils.py index bfceacaa3fa..0eeaf5c956f 100644 --- a/nncf/openvino/graph/node_utils.py +++ b/nncf/openvino/graph/node_utils.py @@ -486,9 +486,8 @@ def get_reduce_op(node: ov.Node, output_port_id: int, output_node_name: str) -> def get_partial_shape_safe(node, port_id) -> Tuple[int, ...]: partial_shape = node.get_output_partial_shape(port_id) if partial_shape.rank.is_dynamic or not partial_shape.all_non_negative: - raise nncf.ValidationError( - f"Could not collect statistics for the node {node} because its output shape rank is dynamic or negative" - ) + msg = f"Could not collect statistics for the node {node} because its output shape rank is dynamic or negative" + raise nncf.ValidationError(msg) return partial_shape @@ -521,7 +520,8 @@ def get_weight_channel_axes(node: NNCFNode) -> List[int]: :return: Axes numbers of the weight tensor which correspond to its channels. """ if node.metatype not in OPERATIONS_WITH_WEIGHTS: - raise ValueError("Channel axis cannot be defined for operation without weights.") + msg = "Channel axis cannot be defined for operation without weights." + raise ValueError(msg) if node.metatype in CONV_OPERATIONS: weights_layout = get_conv_weights_layout_from_node(node) diff --git a/nncf/openvino/optimized_functions/models.py b/nncf/openvino/optimized_functions/models.py index d221c003744..795549476ba 100644 --- a/nncf/openvino/optimized_functions/models.py +++ b/nncf/openvino/optimized_functions/models.py @@ -131,7 +131,8 @@ def _infer_ov_model( actual_dtype = inputs[i].dtype expected_dtype = ov_model_params.input_dtypes[input_name] if actual_dtype != expected_dtype: - raise ValueError(f"Expected input '{input_name}' to be {expected_dtype}. But found: {actual_dtype}.") + msg = f"Expected input '{input_name}' to be {expected_dtype}. But found: {actual_dtype}." + raise ValueError(msg) # Infer the model if compiled_model._infer_request is None: @@ -165,9 +166,11 @@ def _prepare_compression_model_inputs( Do some input checks and convert static shapes to dynamic shapes if needed. """ if scale_shape is None and zero_point_shape is not None: - raise Exception("Zero point shape can only be provided if scale shape is provided.") + msg = "Zero point shape can only be provided if scale shape is provided." + raise Exception(msg) if scale_shape is None and reduction_axes is None: - raise ValueError("Reduction axes must be provided if scale shape is not provided.") + msg = "Reduction axes must be provided if scale shape is not provided." + raise ValueError(msg) # Set dynamic shapes if needed if ov_model_params.dynamic_shapes: @@ -292,7 +295,8 @@ def _build_compress_model( ov_model_params.output_dtypes = {**default_output_dtypes, **ov_model_params.output_dtypes} if "weight" not in ov_model_params.input_dtypes: - raise ValueError("Input weight dtype is required!") + msg = "Input weight dtype is required!" + raise ValueError(msg) weight_dtype = ov_model_params.input_dtypes["weight"] input_scale_dtype = ov_model_params.input_dtypes["scale"] @@ -304,13 +308,14 @@ def _build_compress_model( # Validate input dtypes valid_weight_dtypes = [TensorDataType.float32, TensorDataType.float16, TensorDataType.bfloat16] if weight_dtype not in valid_weight_dtypes: - raise ValueError( - f"Weight must be one of the following data types: {valid_weight_dtypes}. But found: {weight_dtype}." - ) + msg = f"Weight must be one of the following data types: {valid_weight_dtypes}. But found: {weight_dtype}." + raise ValueError(msg) if scale_shape is not None and input_scale_dtype != TensorDataType.float32: - raise ValueError(f"Input scale must be of float32 data type. But found: {input_scale_dtype}.") + msg = f"Input scale must be of float32 data type. But found: {input_scale_dtype}." + raise ValueError(msg) if zero_point_shape is not None and input_zero_point_dtype not in [TensorDataType.int32, TensorDataType.float32]: - raise ValueError(f"Input zero point must be of int32/float32 data type. But found: {input_zero_point_dtype}.") + msg = f"Input zero point must be of int32/float32 data type. But found: {input_zero_point_dtype}." + raise ValueError(msg) # Validate output dtypes valid_compressed_weight_dtypes = [ @@ -322,17 +327,20 @@ def _build_compress_model( TensorDataType.uint4, ] if compressed_weight_dtype not in valid_compressed_weight_dtypes: - raise ValueError( + msg = ( f"Compressed weight must be one of the following data types: {valid_compressed_weight_dtypes}. " f"But found: {compressed_weight_dtype}." ) + raise ValueError(msg) if scale_shape is None and output_scale_dtype != TensorDataType.float32: - raise ValueError(f"Output scale must be of float32 data type. But found: {output_scale_dtype}.") + msg = f"Output scale must be of float32 data type. But found: {output_scale_dtype}." + raise ValueError(msg) if is_asym_mode and zero_point_shape is None and output_zero_point_dtype not in valid_compressed_weight_dtypes: - raise ValueError( + msg = ( f"Output zero point must be of one of the following data types: {valid_compressed_weight_dtypes}. " f"But found: {output_zero_point_dtype}." ) + raise ValueError(msg) # Build OV model weight = opset.parameter(weight_shape, name="weight", dtype=DTYPE_MAP_OV[weight_dtype]) @@ -428,7 +436,8 @@ def _build_compress_decompress_model( decompressed_weight_dtype = ov_model_params.output_dtypes["decompressed_weight"] if decompressed_weight_dtype != TensorDataType.float32: - raise ValueError(f"Decompressed weight must be of float32 data type. But found: {decompressed_weight_dtype}.") + msg = f"Decompressed weight must be of float32 data type. But found: {decompressed_weight_dtype}." + raise ValueError(msg) # Get compression model as input/result nodes and potentially modified ov model parameters ov_parameters, ov_results, ov_model_params = get_compress_weight_model( @@ -484,14 +493,18 @@ def get_astype_model(ov_model_params: OVModelParameters, input_shape: Tuple) -> def _build_astype_model(ov_model_params: OVModelParameters, arg_shape: Tuple) -> ModelCallable: input_dtypes = ov_model_params.input_dtypes if input_dtypes is None: - raise ValueError("Input dtypes must be provided.") + msg = "Input dtypes must be provided." + raise ValueError(msg) output_dtypes = ov_model_params.output_dtypes if output_dtypes is None: - raise ValueError("Output dtypes must be provided.") + msg = "Output dtypes must be provided." + raise ValueError(msg) if "input" not in input_dtypes: - raise ValueError("Input dtype is required.") + msg = "Input dtype is required." + raise ValueError(msg) if "output" not in output_dtypes: - raise ValueError("Output dtype is required.") + msg = "Output dtype is required." + raise ValueError(msg) arg = opset.parameter(arg_shape, dtype=DTYPE_MAP_OV[input_dtypes["input"]], name="input") res = opset.convert(arg, DTYPE_MAP_OV[output_dtypes["output"]]) diff --git a/nncf/openvino/quantization/quantize_model.py b/nncf/openvino/quantization/quantize_model.py index bb61e3c04b0..423cc026a5e 100644 --- a/nncf/openvino/quantization/quantize_model.py +++ b/nncf/openvino/quantization/quantize_model.py @@ -75,9 +75,8 @@ def native_quantize_if_op_impl( Implementation of the `quantize()` method for the OpenVINO backend via the OpenVINO Runtime API. """ if not fast_bias_correction: - raise NotImplementedError( - "The BiasCorrection algorithm is not supported for OpenVINO models with If operation." - ) + msg = "The BiasCorrection algorithm is not supported for OpenVINO models with If operation." + raise NotImplementedError(msg) graphs = {} def _extract_all_subgraphs(model: ov.Model, current_id: str) -> None: diff --git a/nncf/quantization/advanced_parameters.py b/nncf/quantization/advanced_parameters.py index 2a6511aa0c6..afa7b6fe61f 100644 --- a/nncf/quantization/advanced_parameters.py +++ b/nncf/quantization/advanced_parameters.py @@ -478,7 +478,8 @@ def convert_quantization_parameters_to_dict(params: Optional[QuantizationParamet if params.per_channel is not None: result["per_channel"] = params.per_channel if params.narrow_range is not None: - raise nncf.ParameterNotSupportedError("narrow_range parameter is not supported in the legacy format") + msg = "narrow_range parameter is not supported in the legacy format" + raise nncf.ParameterNotSupportedError(msg) return result @@ -490,7 +491,8 @@ def convert_range_estimator_parameters_to_dict(params: RangeEstimatorParameters) :return: range estimator parameters as dict in the legacy format """ if params.min.clipping_value is not None or params.max.clipping_value is not None: - raise nncf.ParameterNotSupportedError("clipping_value parameter is not supported in the legacy format") + msg = "clipping_value parameter is not supported in the legacy format" + raise nncf.ParameterNotSupportedError(msg) result: Dict[str, Any] = {} if ( @@ -526,9 +528,8 @@ def convert_range_estimator_parameters_to_dict(params: RangeEstimatorParameters) ): return {} else: - raise nncf.ParameterNotSupportedError( - f"The following range estimator parameters are not supported: {str(params)}" - ) + msg = f"The following range estimator parameters are not supported: {str(params)}" + raise nncf.ParameterNotSupportedError(msg) return result @@ -590,13 +591,11 @@ def apply_advanced_parameters_to_config( config["initializer"] = initializer if params.bias_correction_params.apply_for_all_nodes: - raise nncf.ParameterNotSupportedError( - "apply_for_all_nodes parameter of the BiasCorrection algorithm is not supported in the legacy format" - ) + msg = "apply_for_all_nodes parameter of the BiasCorrection algorithm is not supported in the legacy format" + raise nncf.ParameterNotSupportedError(msg) if params.bias_correction_params.threshold is not None: - raise nncf.ParameterNotSupportedError( - "threshold parameter of the BiasCorrection algorithm is not supported in the legacy format" - ) + msg = "threshold parameter of the BiasCorrection algorithm is not supported in the legacy format" + raise nncf.ParameterNotSupportedError(msg) return config diff --git a/nncf/quantization/algorithms/accuracy_control/algorithm.py b/nncf/quantization/algorithms/accuracy_control/algorithm.py index 114eeaa3cd0..f508652d87f 100644 --- a/nncf/quantization/algorithms/accuracy_control/algorithm.py +++ b/nncf/quantization/algorithms/accuracy_control/algorithm.py @@ -55,9 +55,8 @@ def get_algo_backend(backend: BackendType) -> AccuracyControlAlgoBackend: return ONNXAccuracyControlAlgoBackend() - raise nncf.UnsupportedBackendError( - f"Cannot create the backend for the accuracy control algorithm because {backend} is not supported." - ) + msg = f"Cannot create the backend for the accuracy control algorithm because {backend} is not supported." + raise nncf.UnsupportedBackendError(msg) def _create_message(nodes: Iterable[NNCFNode]) -> str: diff --git a/nncf/quantization/algorithms/accuracy_control/evaluator.py b/nncf/quantization/algorithms/accuracy_control/evaluator.py index 71be5710ad8..348318fdd3e 100644 --- a/nncf/quantization/algorithms/accuracy_control/evaluator.py +++ b/nncf/quantization/algorithms/accuracy_control/evaluator.py @@ -131,9 +131,8 @@ def prepare_model(self, model: TModel) -> PreparedModel: return ONNXPreparedModel(model) - raise NotImplementedError( - f"The `prepare_model_for_inference()` method is not implemented for the {backend} backend." - ) + msg = f"The `prepare_model_for_inference()` method is not implemented for the {backend} backend." + raise NotImplementedError(msg) def validate_prepared_model( self, prepared_model: PreparedModel, dataset: Dataset, indices: Optional[List[int]] = None @@ -156,7 +155,8 @@ def validate_prepared_model( self._metric_mode = Evaluator.determine_mode(prepared_model, dataset, self._validation_fn) if not self.is_metric_mode() and indices is not None: - raise ValueError("The `indices` parameter can be used only if Evaluator.is_metric_mode() = True") + msg = "The `indices` parameter can be used only if Evaluator.is_metric_mode() = True" + raise ValueError(msg) validation_dataset = dataset.get_data(indices) if self._enable_iteration_count: @@ -230,10 +230,11 @@ def determine_mode( try: metric_value = metric_value if metric_value is None else float(metric_value) except Exception as ex: - raise nncf.InternalError( + msg = ( f"Metric value of {type(metric_value)} type was returned from the `validation_fn` " "but the float value is expected." - ) from ex + ) + raise nncf.InternalError(msg) from ex convert_to_float_possible = True if values_for_each_item is not None: @@ -263,7 +264,8 @@ def determine_mode( if isinstance(metric_value, float) and (values_for_each_item is None or convert_to_float_possible): metric_mode = True elif values_for_each_item is not None and not isinstance(values_for_each_item[0], list): - raise nncf.InternalError("Unexpected return value from provided validation function.") + msg = "Unexpected return value from provided validation function." + raise nncf.InternalError(msg) return metric_mode diff --git a/nncf/quantization/algorithms/accuracy_control/rank_functions.py b/nncf/quantization/algorithms/accuracy_control/rank_functions.py index 0045d9317b8..3ec2adb4621 100644 --- a/nncf/quantization/algorithms/accuracy_control/rank_functions.py +++ b/nncf/quantization/algorithms/accuracy_control/rank_functions.py @@ -29,9 +29,8 @@ def create_normalized_mse_func(backend: BackendType) -> Callable[[List[TTensor], if backend == BackendType.OPENVINO: return normalized_mse - raise nncf.UnsupportedBackendError( - f"Could not create backend-specific implementation! {backend} backend is not supported!" - ) + msg = f"Could not create backend-specific implementation! {backend} backend is not supported!" + raise nncf.UnsupportedBackendError(msg) def normalized_mse(ref_outputs: List[np.ndarray], approx_outputs: List[np.ndarray]) -> float: diff --git a/nncf/quantization/algorithms/bias_correction/algorithm.py b/nncf/quantization/algorithms/bias_correction/algorithm.py index 6bc34c08fe9..3634fc6b1ac 100644 --- a/nncf/quantization/algorithms/bias_correction/algorithm.py +++ b/nncf/quantization/algorithms/bias_correction/algorithm.py @@ -101,7 +101,8 @@ def __init__( self._algorithm_key = f"BC_{hash(self)}" if self.apply_for_all_nodes: - raise nncf.InternalError("BiasCorrection algorithm does not support apply_for_all_nodes=True yet") + msg = "BiasCorrection algorithm does not support apply_for_all_nodes=True yet" + raise nncf.InternalError(msg) @property def available_backends(self) -> List[BackendType]: @@ -127,9 +128,8 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = FXBiasCorrectionAlgoBackend() else: - raise nncf.UnsupportedBackendError( - "Cannot return backend-specific entity because {} is not supported!".format(model_backend.value) - ) + msg = f"Cannot return backend-specific entity because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) def apply( self, diff --git a/nncf/quantization/algorithms/bias_correction/torch_fx_backend.py b/nncf/quantization/algorithms/bias_correction/torch_fx_backend.py index 74e1d61675d..2c59ab700b2 100644 --- a/nncf/quantization/algorithms/bias_correction/torch_fx_backend.py +++ b/nncf/quantization/algorithms/bias_correction/torch_fx_backend.py @@ -105,7 +105,8 @@ def get_output_name(model: torch.fx.GraphModule, node_name: str, output_port_id: elif node.op == "output": return node.all_input_nodes.index(graph_node) - raise nncf.InternalError(f"Node with name {node_name} expected to have an output," " no outputs were found.") + msg = f"Node with name {node_name} expected to have an output," " no outputs were found." + raise nncf.InternalError(msg) @staticmethod def is_quantized_weights(node: NNCFNode, nncf_graph: NNCFGraph) -> bool: diff --git a/nncf/quantization/algorithms/channel_alignment/openvino_backend.py b/nncf/quantization/algorithms/channel_alignment/openvino_backend.py index 88d46ed4254..ad1977cb671 100644 --- a/nncf/quantization/algorithms/channel_alignment/openvino_backend.py +++ b/nncf/quantization/algorithms/channel_alignment/openvino_backend.py @@ -99,9 +99,11 @@ def get_dims_descriptor(node: NNCFNode) -> LayoutDescriptor: elif node.metatype == OVMatMulMetatype: weights_layout = get_linear_weights_layout_from_node(node=node) else: - raise nncf.InternalError( - f"Metatype {node.metatype} of node {node.node_name} dimensions description retrieving is not supported" + msg = ( + f"Metatype {node.metatype} of node {node.node_name} dimensions" + " description retrieving is not supported" ) + raise nncf.InternalError(msg) if OVLayoutElem.GROUPS in weights_layout: # Using groups dim as output channels dim for ChannelAlignment algorithm diff --git a/nncf/quantization/algorithms/fast_bias_correction/algorithm.py b/nncf/quantization/algorithms/fast_bias_correction/algorithm.py index 89ef99e431d..38270bf5a2f 100644 --- a/nncf/quantization/algorithms/fast_bias_correction/algorithm.py +++ b/nncf/quantization/algorithms/fast_bias_correction/algorithm.py @@ -88,7 +88,8 @@ def __init__( self._algorithm_key = f"FBC_{hash(self)}" if self.apply_for_all_nodes: - raise nncf.InternalError("FastBiasCorrection algorithm does not support apply_for_all_nodes=True yet") + msg = "FastBiasCorrection algorithm does not support apply_for_all_nodes=True yet" + raise nncf.InternalError(msg) @property def available_backends(self) -> List[BackendType]: @@ -122,9 +123,8 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = FXFastBiasCorrectionAlgoBackend() else: - raise nncf.UnsupportedBackendError( - "Cannot return backend-specific entity because {} is not supported!".format(model_backend.value) - ) + msg = f"Cannot return backend-specific entity because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) def apply( self, diff --git a/nncf/quantization/algorithms/layerwise/engine.py b/nncf/quantization/algorithms/layerwise/engine.py index 6b754f2ba0e..20a35bd1f9d 100644 --- a/nncf/quantization/algorithms/layerwise/engine.py +++ b/nncf/quantization/algorithms/layerwise/engine.py @@ -66,9 +66,8 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = OVLayerwiseEngineBackend() else: - raise UnsupportedBackendError( - "Cannot return backend-specific entity because {} is not supported!".format(model_backend.value) - ) + msg = f"Cannot return backend-specific entity because {model_backend.value} is not supported!" + raise UnsupportedBackendError(msg) def _get_statistics(self, statistic_points: StatisticPointsContainer, node_name: str, port_id: int) -> List[Tensor]: """ @@ -112,7 +111,8 @@ def _create_cache( for out in outputs: x = self._get_statistics(statistic_points, out.node_name, out.output_port) if not x: - raise RuntimeError(f"Statistics for {out.node_name} node on {out.output_port} port is not provided.") + msg = f"Statistics for {out.node_name} node on {out.output_port} port is not provided." + raise RuntimeError(msg) cache[out] = x return cache diff --git a/nncf/quantization/algorithms/layerwise/openvino_iterator.py b/nncf/quantization/algorithms/layerwise/openvino_iterator.py index ed86c784ac6..41310bdab5e 100644 --- a/nncf/quantization/algorithms/layerwise/openvino_iterator.py +++ b/nncf/quantization/algorithms/layerwise/openvino_iterator.py @@ -186,9 +186,8 @@ def collect_output_tensors(self, step: LayerwiseStep) -> Dict[NodeOutputPort, Li if input_id in self._model_input_ids: subgraph_model_input_ids.append(input_id) else: - raise RuntimeError( - f"{input.node_name}:{input.output_port} is not found in the input cache and is not a model input" - ) + msg = f"{input.node_name}:{input.output_port} is not found in the input cache and is not a model input" + raise RuntimeError(msg) if subgraph_model_input_ids: subgraph_inputs = self._model_input_ids diff --git a/nncf/quantization/algorithms/min_max/algorithm.py b/nncf/quantization/algorithms/min_max/algorithm.py index e334972dca3..4c923477fef 100644 --- a/nncf/quantization/algorithms/min_max/algorithm.py +++ b/nncf/quantization/algorithms/min_max/algorithm.py @@ -272,50 +272,52 @@ def _review_mode_based_defaults(self): nncf_logger.warning(f"You're using experimental option mode with {self._mode} value.") if self._preset != QuantizationPreset.PERFORMANCE: - raise nncf.ParameterNotSupportedError( - f"preset option with {self._preset} value is not supported with the mode option!" - ) + msg = f"preset option with {self._preset} value is not supported with the mode option!" + raise nncf.ParameterNotSupportedError(msg) if self._target_device not in [TargetDevice.CPU, TargetDevice.ANY]: - raise nncf.ParameterNotSupportedError( - f"target_device option with {self._target_device} value is not supported with the mode option!" - ) + msg = f"target_device option with {self._target_device} value is not supported with the mode option!" + raise nncf.ParameterNotSupportedError(msg) if self._overflow_fix != OverflowFix.DISABLE: - raise nncf.ParameterNotSupportedError( - f"overflow_fix option with {self._overflow_fix} value is not supported with the mode option!" - ) + msg = f"overflow_fix option with {self._overflow_fix} value is not supported with the mode option!" + raise nncf.ParameterNotSupportedError(msg) if self._quantize_outputs: - raise nncf.ParameterNotSupportedError("quantize_outputs option is not supported with the mode option!") + msg = "quantize_outputs option is not supported with the mode option!" + raise nncf.ParameterNotSupportedError(msg) if isinstance(self._weights_quantization_params, QuantizationParameters): - raise nncf.ParameterNotSupportedError( + msg = ( "quantization_params option for weights with " f"{self._weights_quantization_params} " "value is not supported with the mode option!" ) + raise nncf.ParameterNotSupportedError(msg) if isinstance(self._activations_quantization_params, QuantizationParameters): - raise nncf.ParameterNotSupportedError( + msg = ( "quantization_params option for activations with " f"{self._activations_quantization_params} " "value is not supported with the mode option!" ) + raise nncf.ParameterNotSupportedError(msg) elif self._mode is None: if isinstance(self._weights_quantization_params, FP8QuantizationParameters): - raise nncf.ParameterNotSupportedError( + msg = ( "quantization_params option for weights with " f"{self._weights_quantization_params} " "value is not supported with the mode: None option!" ) + raise nncf.ParameterNotSupportedError(msg) if isinstance(self._activations_quantization_params, FP8QuantizationParameters): - raise nncf.ParameterNotSupportedError( + msg = ( "quantization_params option for activations with " f"{self._activations_quantization_params} " "value is not supported with the mode: None option!" ) + raise nncf.ParameterNotSupportedError(msg) def _reset_cache(self) -> None: """ @@ -357,9 +359,8 @@ def _get_quantizer_constraints( if isinstance(quantization_params, FP8QuantizationParameters): if self._mode is None: - raise nncf.InternalError( - f"FP8QuantizationParameters for {group.value} can not be used without QuantizationMode option!" - ) + msg = f"FP8QuantizationParameters for {group.value} can not be used without QuantizationMode option!" + raise nncf.InternalError(msg) return QuantizationConstraints(**constraints) if quantization_params.mode is not None: @@ -397,9 +398,8 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = PTMinMaxAlgoBackend() else: - raise nncf.UnsupportedBackendError( - "Cannot return backend-specific entity because {} is not supported!".format(model_backend.value) - ) + msg = f"Cannot return backend-specific entity because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) def _get_range_estimator_parameters( self, target_point: TargetPoint, quantizer_config: QuantizerConfig @@ -515,10 +515,12 @@ def _get_statistic_collector( [MinMaxTensorStatistic.MIN_STAT, MinMaxTensorStatistic.MAX_STAT], ): if params.statistics_type not in self._backend_entity.reducer_map: - raise nncf.InternalError(f"Statistic type: {params.statistics_type} is not yet supported.") + msg = f"Statistic type: {params.statistics_type} is not yet supported." + raise nncf.InternalError(msg) if params.aggregator_type not in AGGREGATORS_MAP: - raise nncf.InternalError(f"Aggregator type: {params.aggregator_type} is not yet supported.") + msg = f"Aggregator type: {params.aggregator_type} is not yet supported." + raise nncf.InternalError(msg) statistic_type = params.statistics_type if statistic_type in [StatisticsType.QUANTILE, StatisticsType.ABS_QUANTILE]: @@ -838,7 +840,8 @@ def fill_quantization_target_points( elif quantization_point.is_activation_quantization_point(): self._add_activation_quantization_target_point(quantization_point, nncf_graph) else: - raise nncf.InternalError("Incorrect quantization point") + msg = "Incorrect quantization point" + raise nncf.InternalError(msg) return self._quantization_target_points_to_qconfig, self._unified_scale_groups def _get_quantization_target_points( @@ -995,13 +998,15 @@ def filter_func(point: StatisticPoint) -> bool: ): statistics = tensor_collector.get_statistics() if statistics.min_values is None or statistics.max_values is None: - raise nncf.InternalError(f"Statistics were not collected for the node {target_node_name}") + msg = f"Statistics were not collected for the node {target_node_name}" + raise nncf.InternalError(msg) group_statistics.append(statistics) unified_values = self._unify_statistics(group_statistics) qconfigs = [quantization_target_points[qtp] for qtp in unified_scale_group] if any(qconfigs[0] != qconfig for qconfig in qconfigs[1:]): - raise nncf.InternalError(f"QConfigs for unified scale group {unified_scale_group} are not equal") + msg = f"QConfigs for unified scale group {unified_scale_group} are not equal" + raise nncf.InternalError(msg) qconfig = qconfigs[0] q_group = QuantizerGroup.ACTIVATIONS narrow_range = get_quantizer_narrow_range(qconfig, q_group) @@ -1044,7 +1049,8 @@ def filter_func(point: StatisticPoint) -> bool: narrow_range = get_quantizer_narrow_range(qconfig, quant_group) statistics = tensor_collector.get_statistics() if statistics.min_values is None or statistics.max_values is None: - raise nncf.InternalError(f"Statistics were not collected for the node {target_node_name}") + msg = f"Statistics were not collected for the node {target_node_name}" + raise nncf.InternalError(msg) if self._mode is not None: destination_type = self._quantization_params[quant_group].destination_type parameters = calculate_convert_parameters( @@ -1075,7 +1081,8 @@ def get_cached_statistic_points(self, model: TModel, graph: NNCFGraph) -> Statis :return: Filled statistic point container. """ if self._quantization_target_points_to_qconfig is None: - raise RuntimeError("get_cached_statistic_points is called before statistic caching.") + msg = "get_cached_statistic_points is called before statistic caching." + raise RuntimeError(msg) self._set_backend_entity(model) return self._get_statistic_point_container(self._quantization_target_points_to_qconfig, graph) diff --git a/nncf/quantization/algorithms/min_max/onnx_backend.py b/nncf/quantization/algorithms/min_max/onnx_backend.py index c6cd199ee7f..77d5ed665f0 100644 --- a/nncf/quantization/algorithms/min_max/onnx_backend.py +++ b/nncf/quantization/algorithms/min_max/onnx_backend.py @@ -163,7 +163,8 @@ def create_convert_insertion_command( target_point: ONNXTargetPoint, parameters: FakeConvertParameters, ) -> TransformationCommand: - raise nncf.InternalError("FakeConvert insertion not implemented in ONNX backend!") + msg = "FakeConvert insertion not implemented in ONNX backend!" + raise nncf.InternalError(msg) @staticmethod def _get_input_edges_mapping(nncf_graph: NNCFGraph): diff --git a/nncf/quantization/algorithms/min_max/openvino_backend.py b/nncf/quantization/algorithms/min_max/openvino_backend.py index ba7d2122e52..7c4ca43bebc 100644 --- a/nncf/quantization/algorithms/min_max/openvino_backend.py +++ b/nncf/quantization/algorithms/min_max/openvino_backend.py @@ -160,7 +160,8 @@ def get_target_point_shape(nncf_graph: NNCFGraph, node: NNCFNode, target_point: edges = nncf_graph.get_output_edges_by_port_id(node, target_point.port_id) return edges[0].tensor_shape - raise NotImplementedError(f"Unsupported target point type {target_point.type}.") + msg = f"Unsupported target point type {target_point.type}." + raise NotImplementedError(msg) @staticmethod def get_weight_quantization_axes(node: NNCFNode, target_point: OVTargetPoint, ndims: int) -> Tuple[int]: diff --git a/nncf/quantization/algorithms/min_max/torch_backend.py b/nncf/quantization/algorithms/min_max/torch_backend.py index d171a252a32..82b03560422 100644 --- a/nncf/quantization/algorithms/min_max/torch_backend.py +++ b/nncf/quantization/algorithms/min_max/torch_backend.py @@ -155,7 +155,8 @@ def create_convert_insertion_command( target_point: PTTargetPoint, parameters: FakeConvertParameters, ) -> TransformationCommand: - raise nncf.InternalError("FakeConvert insertion not implemented in PyTorch backend!") + msg = "FakeConvert insertion not implemented in PyTorch backend!" + raise nncf.InternalError(msg) @staticmethod def get_target_point_shape(nncf_graph: PTNNCFGraph, node: NNCFNode, target_point: PTTargetPoint) -> Tuple[int, ...]: diff --git a/nncf/quantization/algorithms/min_max/torch_fx_backend.py b/nncf/quantization/algorithms/min_max/torch_fx_backend.py index 9336d872f34..667a7a93b4c 100644 --- a/nncf/quantization/algorithms/min_max/torch_fx_backend.py +++ b/nncf/quantization/algorithms/min_max/torch_fx_backend.py @@ -141,7 +141,8 @@ def create_convert_insertion_command( target_point: PTTargetPoint, parameters: FakeConvertParameters, ) -> TransformationCommand: - raise nncf.InternalError("FakeConvert insertion not implemented in PyTorch backend!") + msg = "FakeConvert insertion not implemented in PyTorch backend!" + raise nncf.InternalError(msg) @staticmethod def get_target_point_shape(nncf_graph: PTNNCFGraph, node: NNCFNode, target_point: PTTargetPoint) -> Tuple[int, ...]: diff --git a/nncf/quantization/algorithms/post_training/algorithm.py b/nncf/quantization/algorithms/post_training/algorithm.py index a41ebf79841..850527381f4 100644 --- a/nncf/quantization/algorithms/post_training/algorithm.py +++ b/nncf/quantization/algorithms/post_training/algorithm.py @@ -100,10 +100,11 @@ def apply( dataset: Optional[Dataset] = None, ) -> TModel: if dataset is None and len(self._pipeline.pipeline_steps) > 1: - raise ValueError( + msg = ( "A dataset is required for the post-training quantization " "algorithm to collect statistics for intermediate models." ) + raise ValueError(msg) step_index_to_statistics = None if statistic_points: diff --git a/nncf/quantization/algorithms/smooth_quant/algorithm.py b/nncf/quantization/algorithms/smooth_quant/algorithm.py index 04e22a2168c..bad81ddd37d 100644 --- a/nncf/quantization/algorithms/smooth_quant/algorithm.py +++ b/nncf/quantization/algorithms/smooth_quant/algorithm.py @@ -92,9 +92,8 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = FXSmoothQuantAlgoBackend() else: - raise nncf.UnsupportedBackendError( - "Cannot return backend-specific entity because {} is not supported!".format(model_backend.value) - ) + msg = f"Cannot return backend-specific entity because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) def apply( self, @@ -125,12 +124,11 @@ def apply( empty_statistic = True break if len(activations_value) != 1: - raise RuntimeError( - ( - "More than one statistic is collected for one node during" - f"Smooth Quanti algorithm: {node_to_smooth.node_name}" - ) + msg = ( + "More than one statistic is collected for one node during" + f"Smooth Quanti algorithm: {node_to_smooth.node_name}" ) + raise RuntimeError(msg) activations_value = self._clip_statistics(activations_value) @@ -331,7 +329,8 @@ def _calculate_activation_scale( channel_axis = channel_axes[0] if not all(axis == channel_axis for axis in channel_axes): - raise nncf.InternalError(f"Channel axes for nodes {[n.node_name for n in nodes]} are not identical") + msg = f"Channel axes for nodes {[n.node_name for n in nodes]} are not identical" + raise nncf.InternalError(msg) activations_size = len(activations_shape) activation_scale = scale_value ** (-1) diff --git a/nncf/quantization/algorithms/smooth_quant/openvino_backend.py b/nncf/quantization/algorithms/smooth_quant/openvino_backend.py index 2ab1aa59fa5..a75f3b63794 100644 --- a/nncf/quantization/algorithms/smooth_quant/openvino_backend.py +++ b/nncf/quantization/algorithms/smooth_quant/openvino_backend.py @@ -72,7 +72,8 @@ def get_activations_port_id(node: NNCFNode, nncf_graph: NNCFGraph) -> int: ] if len(activation_ports) != 1: - raise nncf.InternalError(f"Too many weight or activation ports for {node.node_name} node") + msg = f"Too many weight or activation ports for {node.node_name} node" + raise nncf.InternalError(msg) return activation_ports[0] @staticmethod @@ -94,7 +95,8 @@ def get_weight_value(node_with_weight: NNCFNode, model: ov.Model, nncf_graph: NN def get_weight_tensor_port_id(node: NNCFNode) -> int: const_ids = node.layer_attributes.get_const_port_ids() if len(const_ids) != 1: - raise nncf.InternalError(f"Found more than 1 port for {node.node_name} node") + msg = f"Found more than 1 port for {node.node_name} node" + raise nncf.InternalError(msg) return const_ids[0] @staticmethod @@ -119,7 +121,8 @@ def get_activation_channel_axis(node: NNCFNode, port_id: int) -> int: channel_axis = 1 if port_id > 1: - raise nncf.InternalError(f"{node.metatype.name} can not take more than 2 input tensors.") + msg = f"{node.metatype.name} can not take more than 2 input tensors." + raise nncf.InternalError(msg) if ( node.metatype == OVMatMulMetatype diff --git a/nncf/quantization/algorithms/smooth_quant/torch_backend.py b/nncf/quantization/algorithms/smooth_quant/torch_backend.py index 6f9ee3738ad..638d6358649 100644 --- a/nncf/quantization/algorithms/smooth_quant/torch_backend.py +++ b/nncf/quantization/algorithms/smooth_quant/torch_backend.py @@ -122,7 +122,8 @@ def get_abs_max_channel_collector( def get_weight_value(node_with_weight: NNCFNode, model: NNCFNetwork, nncf_graph: NNCFGraph) -> Tensor: weight_node = get_const_node(node_with_weight, node_with_weight.metatype.weight_port_ids[0], nncf_graph) if weight_node is None: - raise RuntimeError(f"{node_with_weight} node has no weight node.") + msg = f"{node_with_weight} node has no weight node." + raise RuntimeError(msg) weight_data = get_const_data(weight_node, model) return Tensor(weight_data) diff --git a/nncf/quantization/algorithms/smooth_quant/torch_fx_backend.py b/nncf/quantization/algorithms/smooth_quant/torch_fx_backend.py index 61fe2b98496..fb9b8ebaa98 100644 --- a/nncf/quantization/algorithms/smooth_quant/torch_fx_backend.py +++ b/nncf/quantization/algorithms/smooth_quant/torch_fx_backend.py @@ -98,7 +98,8 @@ def get_abs_max_channel_collector( def get_weight_value(node_with_weight: NNCFNode, model: torch.fx.GraphModule, nncf_graph: NNCFGraph) -> Tensor: weight_node = get_const_node(node_with_weight, node_with_weight.metatype.weight_port_ids[0], nncf_graph) if weight_node is None: - raise RuntimeError(f"{node_with_weight} node has no weight node.") + msg = f"{node_with_weight} node has no weight node." + raise RuntimeError(msg) graph_node = get_graph_node_by_name(model.graph, weight_node.node_name) weight_data = get_tensor_constant_from_node(graph_node, model) return Tensor(weight_data.data) diff --git a/nncf/quantization/algorithms/weight_compression/algorithm.py b/nncf/quantization/algorithms/weight_compression/algorithm.py index 7973712da9a..fa07273fa69 100644 --- a/nncf/quantization/algorithms/weight_compression/algorithm.py +++ b/nncf/quantization/algorithms/weight_compression/algorithm.py @@ -127,15 +127,15 @@ def check_user_compression_configuration( """ if mode in INT8_MODES: if (ratio and ratio != 1) or (group_size and group_size != -1): - raise nncf.ParameterNotSupportedError( + msg = ( "INT8 modes require per-channel quantization of all layers in 8 bit. " "Default values of `ratio` (1) and `group_size` (-1) cannot be overridden." ) + raise nncf.ParameterNotSupportedError(msg) if advanced_parameters and advanced_parameters.statistics_path: - raise nncf.ParameterNotSupportedError( - "INT8 modes do not support the `statistics_path` option in `AdvancedCompressionParameters`." - ) + msg = "INT8 modes do not support the `statistics_path` option in `AdvancedCompressionParameters`." + raise nncf.ParameterNotSupportedError(msg) unsupported_options = { "all_layers": all_layers, @@ -149,15 +149,16 @@ def check_user_compression_configuration( } unsupported_for_int8 = [name for name, value in unsupported_options.items() if value is not None] if unsupported_for_int8: - raise nncf.ParameterNotSupportedError( - f"INT8 modes do not support {', '.join(unsupported_for_int8)} option(s). Set them to None." - ) + msg = f"INT8 modes do not support {', '.join(unsupported_for_int8)} option(s). Set them to None." + raise nncf.ParameterNotSupportedError(msg) if ratio is not None and not (0 <= ratio <= 1): - raise nncf.ValidationError(f"The ratio should be between 0 and 1, but ratio={ratio} is specified.") + msg = f"The ratio should be between 0 and 1, but ratio={ratio} is specified." + raise nncf.ValidationError(msg) if subset_size <= 0: - raise nncf.ValidationError(f"The subset_size value should be positive, but subset_size={subset_size} is given.") + msg = f"The subset_size value should be positive, but subset_size={subset_size} is given." + raise nncf.ValidationError(msg) if ( ratio @@ -165,10 +166,9 @@ def check_user_compression_configuration( and sensitivity_metric is not None and sensitivity_metric != SensitivityMetric.WEIGHT_QUANTIZATION_ERROR ): - raise nncf.ValidationError( - f"Mixed precision selection with sensitivity metric={sensitivity_metric.value} \ + msg = f"Mixed precision selection with sensitivity metric={sensitivity_metric.value} \ requires a dataset, but it's not provided." - ) + raise nncf.ValidationError(msg) class WeightCompression(Algorithm): @@ -303,9 +303,8 @@ def set_backend_entity(self, model: TModel) -> None: self._backend_entity = FXWeightCompressionAlgoBackend() else: - raise nncf.UnsupportedBackendError( - "Cannot return backend-specific entity because {} is not supported!".format(model_backend.value) - ) + msg = f"Cannot return backend-specific entity because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) def get_nodes_to_compress(self, nncf_graph: NNCFGraph) -> List[NNCFNode]: """ diff --git a/nncf/quantization/algorithms/weight_compression/awq.py b/nncf/quantization/algorithms/weight_compression/awq.py index 67ce43d3279..fd06bef6660 100644 --- a/nncf/quantization/algorithms/weight_compression/awq.py +++ b/nncf/quantization/algorithms/weight_compression/awq.py @@ -118,9 +118,8 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = OVAWQAlgoAlgoBackend(model, self.name_to_node_mapping) self._patterns = self._backend_entity.get_awq_patterns() else: - raise nncf.UnsupportedBackendError( - "Cannot return backend-specific AWQ entity because {} is not supported!".format(model_backend.value) - ) + msg = f"Cannot return backend-specific AWQ entity because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) def apply( self, diff --git a/nncf/quantization/algorithms/weight_compression/gptq.py b/nncf/quantization/algorithms/weight_compression/gptq.py index 7f6255d120b..bd6f027ce44 100644 --- a/nncf/quantization/algorithms/weight_compression/gptq.py +++ b/nncf/quantization/algorithms/weight_compression/gptq.py @@ -74,9 +74,8 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = OVWeightCompressionAlgoBackend(model) else: - raise nncf.UnsupportedBackendError( - f"Cannot return backend-specific entity because {self._backend.value} is not supported!" - ) + msg = f"Cannot return backend-specific entity because {self._backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) def apply( self, @@ -169,9 +168,11 @@ def _calculate_hessian(self, node: NNCFNode, inputs: List[Tensor]) -> Tensor: nsamples = 0 if node.metatype in self._backend_entity.convolution_metatypes: - raise nncf.UnsupportedModelError("Convolution metatypes are not supported") + msg = "Convolution metatypes are not supported" + raise nncf.UnsupportedModelError(msg) if node.layer_attributes.input_attributes["transpose"]: - raise nncf.UnsupportedModelError("Transposed input is not supported") + msg = "Transposed input is not supported" + raise nncf.UnsupportedModelError(msg) hessian = fns.zeros( (inputs[0].shape[-1], inputs[0].shape[-1]), backend=inputs[0].backend, dtype=TensorDataType.float32 @@ -208,9 +209,11 @@ def _quantize_weights( :return: Scales and zero points used for quantization. """ if wc_params.node_with_weight.metatype in self._backend_entity.convolution_metatypes: - raise RuntimeError("Convolution metatypes are not supported") + msg = "Convolution metatypes are not supported" + raise RuntimeError(msg) if not wc_params.node_with_weight.layer_attributes.constant_attributes[wc_params.weight_port_id]["transpose"]: - raise RuntimeError("Transpose is not supported") + msg = "Transpose is not supported" + raise RuntimeError(msg) weight_tensor = self._backend_entity.get_weight( wc_params.node_with_weight, wc_params.weight_port_id, model, graph diff --git a/nncf/quantization/algorithms/weight_compression/lora_correction.py b/nncf/quantization/algorithms/weight_compression/lora_correction.py index 2255ff30027..fbab4ba191d 100644 --- a/nncf/quantization/algorithms/weight_compression/lora_correction.py +++ b/nncf/quantization/algorithms/weight_compression/lora_correction.py @@ -180,9 +180,11 @@ def calculate_low_rank_matrices( indexes = do_nf4_quantization(compressed_weight.tensor, compressed_weight.scale, is_normalized_weight=True) fq_weights = do_nf4_dequantization(indexes, compressed_weight.scale, reduction_axis) else: - raise nncf.InternalError( - f"{mode.value} mode is invalid for Lora Correction algorithm. Supported modes: INT4_SYM, INT4_ASYM, NF4" + msg = ( + f"{mode.value} mode is invalid for Lora Correction algorithm." + " Supported modes: INT4_SYM, INT4_ASYM, NF4" ) + raise nncf.InternalError(msg) # fq_w + residual = w => residual = w - fq_w svd_residual = fns.astype(weight - fq_weights, TensorDataType.float32) diff --git a/nncf/quantization/algorithms/weight_compression/mixed_precision.py b/nncf/quantization/algorithms/weight_compression/mixed_precision.py index ddd0924c842..36d9d55bfb0 100644 --- a/nncf/quantization/algorithms/weight_compression/mixed_precision.py +++ b/nncf/quantization/algorithms/weight_compression/mixed_precision.py @@ -151,9 +151,8 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = FXWeightCompressionAlgoBackend() else: - raise nncf.UnsupportedBackendError( - "Cannot return backend-specific entity because {} is not supported!".format(model_backend.value) - ) + msg = f"Cannot return backend-specific entity because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) def _calc_weight_sensitivity( self, @@ -198,7 +197,8 @@ def get_statistic_points( graph: NNCFGraph, nodes_and_port_ids: Iterable[Tuple[NNCFNode, int]], ) -> StatisticPointsContainer: - raise RuntimeError("No statistics collection intended for data-free mixed precision criterion") + msg = "No statistics collection intended for data-free mixed precision criterion" + raise RuntimeError(msg) class DataBasedCriterion(DataFreeCriterion, ABC): @@ -224,9 +224,8 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = PTMixedPrecisionAlgoBackend() else: - raise nncf.UnsupportedBackendError( - "Cannot return backend-specific entity because {} is not supported!".format(model_backend.value) - ) + msg = f"Cannot return backend-specific entity because {model_backend.value} is not supported!" + raise nncf.UnsupportedBackendError(msg) def _calc_activation_sensitivity( self, @@ -268,10 +267,11 @@ def get_statistic_points( for act_node, output_port_id in nodes_and_port_ids: n_dims = len(graph.get_output_edges_by_port_id(act_node, output_port_id)[0].tensor_shape) if n_dims < 2: - raise RuntimeError( + msg = ( f"Data-aware mixed precision criteria are not supported for MatMuls with 1D inputs. " f"Node: {act_node.node_name}, number of dimensions: {n_dims}." ) + raise RuntimeError(msg) statistic_point = self._backend_entity.target_point( TargetType.POST_LAYER_OPERATION, act_node.node_name, port_id=output_port_id ) diff --git a/nncf/quantization/algorithms/weight_compression/openvino_backend.py b/nncf/quantization/algorithms/weight_compression/openvino_backend.py index 15be72caed1..81212bb36fa 100644 --- a/nncf/quantization/algorithms/weight_compression/openvino_backend.py +++ b/nncf/quantization/algorithms/weight_compression/openvino_backend.py @@ -111,7 +111,8 @@ def mean_statistic_collector( @staticmethod def get_activation_port_id(node: NNCFNode, nncf_graph: NNCFGraph) -> int: if node.layer_attributes.input_attributes["transpose"]: - raise nncf.UnsupportedModelError("Transposed input is not supported") + msg = "Transposed input is not supported" + raise nncf.UnsupportedModelError(msg) constant_ports = node.layer_attributes.get_const_port_ids() activation_ports = [ e.input_port_id for e in nncf_graph.get_input_edges(node) if e.input_port_id not in constant_ports @@ -239,7 +240,8 @@ def _create_compression_subgraph( elif compression_config.mode == CompressWeightsMode.INT8_ASYM: compression_dtype = ov.Type.u8 else: - raise nncf.ParameterNotSupportedError(f"{compression_config.mode.value} is not supported.") + msg = f"{compression_config.mode.value} is not supported." + raise nncf.ParameterNotSupportedError(msg) original_shape = weight.shape with disable_results_caching(OV_MODEL_CACHE): diff --git a/nncf/quantization/algorithms/weight_compression/scale_estimation.py b/nncf/quantization/algorithms/weight_compression/scale_estimation.py index 0e812b71760..6bfbaebbd83 100644 --- a/nncf/quantization/algorithms/weight_compression/scale_estimation.py +++ b/nncf/quantization/algorithms/weight_compression/scale_estimation.py @@ -81,11 +81,11 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = PTWeightCompressionAlgoBackend() else: - raise nncf.UnsupportedBackendError( - "Cannot return backend-specific Scale Estimation entity because {} is not supported!".format( - model_backend.value - ) + msg = ( + "Cannot return backend-specific Scale Estimation entity because" + f" {model_backend.value} is not supported!" ) + raise nncf.UnsupportedBackendError(msg) def apply( self, diff --git a/nncf/quantization/algorithms/weight_compression/torch_backend.py b/nncf/quantization/algorithms/weight_compression/torch_backend.py index c8f47f39b79..8cf5422eee2 100644 --- a/nncf/quantization/algorithms/weight_compression/torch_backend.py +++ b/nncf/quantization/algorithms/weight_compression/torch_backend.py @@ -185,8 +185,8 @@ def get_weight( weight_name = weight_node.layer_attributes.name weight = get_const_data(weight_node, model) if weight is None: - raise nncf.InternalError(f"Could not find a torch.nn.Parameter in the model by name {weight_name}.") - + msg = f"Could not find a torch.nn.Parameter in the model by name {weight_name}." + raise nncf.InternalError(msg) return Tensor(weight) def get_weight_dtype( @@ -237,13 +237,15 @@ def transform_model( CompressWeightsMode.NF4, CompressWeightsMode.E2M1, ]: - raise nncf.ParameterNotSupportedError(f"{compression_config.mode.value} is not supported.") + msg = f"{compression_config.mode.value} is not supported." + raise nncf.ParameterNotSupportedError(msg) weight_node = get_const_node(wc_params.node_with_weight, wc_params.weight_port_id, graph) weight_name = weight_node.layer_attributes.name weight = get_const_data(weight_node, model) if weight is None: - raise nncf.InternalError(f"Could not find a torch.nn.Parameter in the model by name {weight_name}.") + msg = f"Could not find a torch.nn.Parameter in the model by name {weight_name}." + raise nncf.InternalError(msg) # calculates compressed weights and decompression parameters compressed_weight = compress_weight( @@ -287,7 +289,8 @@ def transform_model( module = get_module_by_name(module_name, model) weight = getattr(module, weight_attr_name) if not isinstance(weight, torch.nn.Parameter): - raise nncf.InternalError(f"Weight is not a torch.nn.Parameter in the model by name {weight_name}.") + msg = f"Weight is not a torch.nn.Parameter in the model by name {weight_name}." + raise nncf.InternalError(msg) setattr(module, weight_attr_name, compressed_parameter) diff --git a/nncf/quantization/algorithms/weight_compression/torch_fx_backend.py b/nncf/quantization/algorithms/weight_compression/torch_fx_backend.py index 8b57cf5f5c4..06cd8b845e8 100644 --- a/nncf/quantization/algorithms/weight_compression/torch_fx_backend.py +++ b/nncf/quantization/algorithms/weight_compression/torch_fx_backend.py @@ -136,7 +136,8 @@ def get_weight( graph_weight_node = get_graph_node_by_name(model.graph, weight_node.node_name) weight = get_tensor_constant_from_node(graph_weight_node, model).data if weight is None: - raise nncf.InternalError(f"Could not find a node in the model by name {weight_node}.") + msg = f"Could not find a node in the model by name {weight_node}." + raise nncf.InternalError(msg) return Tensor(weight) @@ -194,12 +195,14 @@ def transform_model( CompressWeightsMode.NF4, CompressWeightsMode.E2M1, ]: - raise nncf.ParameterNotSupportedError(f"{compression_config.mode.value} is not supported.") + msg = f"{compression_config.mode.value} is not supported." + raise nncf.ParameterNotSupportedError(msg) weight_node = get_const_node(wc_params.node_with_weight, wc_params.weight_port_id, graph) weight_name = weight_node.node_name weight = self.get_weight(wc_params.node_with_weight, wc_params.weight_port_id, model, graph) if weight is None or not isinstance(weight, Tensor): - raise nncf.InternalError(f"Could not find a nncf.tensor in the model by name {weight_name}.") + msg = f"Could not find a nncf.tensor in the model by name {weight_name}." + raise nncf.InternalError(msg) # calculates compressed weights and decompression parameters compressed_weight = compress_weight( diff --git a/nncf/quantization/algorithms/weight_compression/weight_lowering.py b/nncf/quantization/algorithms/weight_compression/weight_lowering.py index 012d308087c..ad7495f114a 100644 --- a/nncf/quantization/algorithms/weight_compression/weight_lowering.py +++ b/nncf/quantization/algorithms/weight_compression/weight_lowering.py @@ -85,14 +85,12 @@ def reshape_weight_for_grouped_quantization( if isinstance(reduction_axes, tuple) and len(reduction_axes) == 1: reduction_axes = reduction_axes[0] if not isinstance(reduction_axes, int): - raise nncf.UnsupportedModelError( - f"Group-wise quantization expects a single reduction axis, but given: {reduction_axes}." - ) + msg = f"Group-wise quantization expects a single reduction axis, but given: {reduction_axes}." + raise nncf.UnsupportedModelError(msg) channel_size = weight.shape[reduction_axes] if channel_size % group_size != 0: - raise nncf.UnsupportedModelError( - f"Channel size {channel_size} should be divisible by size of group {group_size}" - ) + msg = f"Channel size {channel_size} should be divisible by size of group {group_size}" + raise nncf.UnsupportedModelError(msg) num_groups_per_channel = channel_size // group_size shape = list(weight.shape) # [a1, r, a2] - "r" refers to number of channels along reduction axis @@ -266,7 +264,8 @@ def calculate_integer_quantization_params( :return: Scale and zero point tensors. """ if not config.is_integer: - raise nncf.InternalError("The function supports integer quantization only") + msg = "The function supports integer quantization only" + raise nncf.InternalError(msg) num_bits = config.num_bits @@ -445,12 +444,14 @@ def do_int_quantization( :return: A tuple containing the compressed weights, scale, and zero point tensors. """ if not config.is_integer: - raise nncf.InternalError("The function supports integer quantization only") + msg = "The function supports integer quantization only" + raise nncf.InternalError(msg) if config.is_asym_mode and (precomputed_scale is None) != (precomputed_zero_point is None): - raise ValueError( + msg = ( "If precomputed quantization parameters are provided, both scale and zero point are required " "for asymmetric quantization." ) + raise ValueError(msg) # When reduction axes are not provided, assuming that the weights are already reshaped if config.group_size != -1 and reduction_axes is not None: @@ -478,7 +479,8 @@ def do_int_quantization( scale, zero_point = None, None if precomputed_scale is None or (config.is_asym_mode and precomputed_zero_point is None): if reduction_axes is None: - raise ValueError("Reduction axes are required for computing the scale and (zero point) vectors.") + msg = "Reduction axes are required for computing the scale and (zero point) vectors." + raise ValueError(msg) scale, zero_point = calculate_integer_quantization_params(weight, reduction_axes, config) if precomputed_scale is not None: scale = precomputed_scale diff --git a/nncf/quantization/fake_quantize.py b/nncf/quantization/fake_quantize.py index e0907910eb1..f521982f7a7 100644 --- a/nncf/quantization/fake_quantize.py +++ b/nncf/quantization/fake_quantize.py @@ -316,9 +316,11 @@ def _calculate_scaled_parameters( levels: Number of quantization levels. """ if quantizer_config.mode == QuantizationMode.ASYMMETRIC: - raise nncf.ValidationError("half_range is only applied to symmetric quantization mode.") + msg = "half_range is only applied to symmetric quantization mode." + raise nncf.ValidationError(msg) if quant_group != QuantizerGroup.WEIGHTS: - raise nncf.ValidationError("half_range is only applied to weight quantizers.") + msg = "half_range is only applied to weight quantizers." + raise nncf.ValidationError(msg) num_bits = quantizer_config.num_bits level_low, level_high = calculate_symmetric_level_ranges(num_bits - 1, signed=True, narrow_range=False) diff --git a/nncf/quantization/quantize_model.py b/nncf/quantization/quantize_model.py index c602485bd0c..9b7cdb6edcb 100644 --- a/nncf/quantization/quantize_model.py +++ b/nncf/quantization/quantize_model.py @@ -176,7 +176,8 @@ def quantize( :rtype: TModel """ if subset_size < 1: - raise nncf.ValidationError("Subset size must be positive.") + msg = "Subset size must be positive." + raise nncf.ValidationError(msg) advanced_parameters = _update_advanced_quantization_parameters(advanced_parameters, calibration_dataset) @@ -263,7 +264,8 @@ def quantize( ignored_scope=ignored_scope, advanced_parameters=advanced_parameters, ) - raise nncf.UnsupportedBackendError(f"Unsupported type of backend: {backend}") + msg = f"Unsupported type of backend: {backend}" + raise nncf.UnsupportedBackendError(msg) def wrap_validation_fn(validation_fn): @@ -405,7 +407,8 @@ def quantize_with_accuracy_control( advanced_accuracy_restorer_parameters, ) - raise nncf.UnsupportedBackendError(f"Unsupported type of backend: {backend}") + msg = f"Unsupported type of backend: {backend}" + raise nncf.UnsupportedBackendError(msg) @api(canonical_alias="nncf.compress_weights") @@ -512,9 +515,8 @@ def compress_weights( from nncf.torch.quantization.quantize_model import compress_weights_impl as pt_compression_weights_impl if mode in [CompressWeightsMode.NF4, CompressWeightsMode.E2M1]: - raise nncf.ParameterNotSupportedError( - "Torch backend does not support NF4 and E2M1 modes for weight compression." - ) + msg = "Torch backend does not support NF4 and E2M1 modes for weight compression." + raise nncf.ParameterNotSupportedError(msg) options = { "awq": awq, @@ -523,23 +525,25 @@ def compress_weights( } unsupported_options = [name for name, value in options.items() if value is not None] if unsupported_options: - raise nncf.ParameterNotSupportedError( - f"Torch backend does not support {', '.join(unsupported_options)} option(s). Set them to None." - ) + msg = f"Torch backend does not support {', '.join(unsupported_options)} option(s). Set them to None." + raise nncf.ParameterNotSupportedError(msg) if advanced_parameters and advanced_parameters.statistics_path: - raise nncf.ParameterNotSupportedError("Torch does not support statistics caching.") + msg = "Torch does not support statistics caching." + raise nncf.ParameterNotSupportedError(msg) if is_wrapped_model(model): if not model.nncf.trace_parameters: - raise nncf.ValidationError( + msg = ( "Tracing capabilities with tracing parameters are required in the PyTorch model " "for nncf.compress_weights(). Please wrap the model using " "nncf.torch.wrap_model(model, example_input, trace_parameters=True) before calling " "nncf.compress_weights()." ) + raise nncf.ValidationError(msg) elif dataset is None: - raise nncf.ValidationError("Please provide a dataset of at least one element for PyTorch model tracing.") + msg = "Please provide a dataset of at least one element for PyTorch model tracing." + raise nncf.ValidationError(msg) else: example_input = next(iter(dataset.get_inference_data())) model = wrap_model(model, example_input=example_input, trace_parameters=True) @@ -553,9 +557,8 @@ def compress_weights( ) if mode in [CompressWeightsMode.NF4, CompressWeightsMode.E2M1]: - raise nncf.ParameterNotSupportedError( - "Torch backend does not support NF4 and E2M1 modes for weight compression." - ) + msg = "Torch backend does not support NF4 and E2M1 modes for weight compression." + raise nncf.ParameterNotSupportedError(msg) options = { "awq": awq, @@ -565,22 +568,22 @@ def compress_weights( } unsupported_options = [name for name, value in options.items() if value is not None] if unsupported_options: - raise nncf.ParameterNotSupportedError( - f"TorchFX backend does not support {', '.join(unsupported_options)} option(s). Set them to None." - ) + msg = f"TorchFX backend does not support {', '.join(unsupported_options)} option(s). Set them to None." + raise nncf.ParameterNotSupportedError(msg) if sensitivity_metric not in [None, SensitivityMetric.WEIGHT_QUANTIZATION_ERROR]: - raise nncf.ParameterNotSupportedError( + msg = ( "TorchFX backend only supports data-free sensitivity metric. " "Set None or SensitivityMetric.WEIGHT_QUANTIZATION_ERROR." ) + raise nncf.ParameterNotSupportedError(msg) if dataset: - raise nncf.ParameterNotSupportedError( - "TorchFX only supports data-free weights compression. Set the 'dataset' option to None" - ) + msg = "TorchFX only supports data-free weights compression. Set the 'dataset' option to None" + raise nncf.ParameterNotSupportedError(msg) if advanced_parameters and advanced_parameters.statistics_path: - raise nncf.ParameterNotSupportedError("TorchFX does not supports statistics caching.") + msg = "TorchFX does not supports statistics caching." + raise nncf.ParameterNotSupportedError(msg) compression_weights_impl = fx_compression_weights_impl if backend == BackendType.OPENVINO: @@ -589,15 +592,15 @@ def compress_weights( if any((awq, scale_estimation, gptq, lora_correction)) and ( dataset is None or mode == CompressWeightsMode.E2M1 ): - raise nncf.ParameterNotSupportedError( + msg = ( "Scale estimation, AWQ, GPTQ or Lora Correction algorithm is defined, " "but dataset is None or mode is E2M1." ) + raise nncf.ParameterNotSupportedError(msg) if gptq and lora_correction: - raise nncf.ValidationError( - "Simultaneous use of Lora correction and GPTQ algorithms is not supported. Select one of them." - ) + msg = "Simultaneous use of Lora correction and GPTQ algorithms is not supported. Select one of them." + raise nncf.ValidationError(msg) compression_weights_impl = ov_compress_weights_impl check_user_compression_configuration( @@ -633,7 +636,8 @@ def compress_weights( ) if compression_weights_impl is None: - raise nncf.UnsupportedBackendError(f"Unsupported type of backend: {backend}") + msg = f"Unsupported type of backend: {backend}" + raise nncf.UnsupportedBackendError(msg) return compression_weights_impl( model=model, diff --git a/nncf/scopes.py b/nncf/scopes.py index 00819f0dd41..2ae0aa86c9f 100644 --- a/nncf/scopes.py +++ b/nncf/scopes.py @@ -148,7 +148,8 @@ def convert_ignored_scope_to_list(ignored_scope: Optional[IgnoredScope]) -> List for p in ignored_scope.patterns: results.append("{re}" + p) if ignored_scope.types: - raise nncf.InternalError("Legacy ignored scope format does not support operation types") + msg = "Legacy ignored scope format does not support operation types" + raise nncf.InternalError(msg) return results diff --git a/nncf/tensor/functions/dispatcher.py b/nncf/tensor/functions/dispatcher.py index 8fcbedcd9a2..b330c19b3e5 100644 --- a/nncf/tensor/functions/dispatcher.py +++ b/nncf/tensor/functions/dispatcher.py @@ -27,7 +27,8 @@ def tensor_guard(func: callable): def wrapper(*args, **kwargs): if isinstance(args[0], Tensor): return func(*args, **kwargs) - raise NotImplementedError(f"Function `{func.__name__}` is not implemented for {type(args[0])}") + msg = f"Function `{func.__name__}` is not implemented for {type(args[0])}" + raise NotImplementedError(msg) return wrapper @@ -59,7 +60,8 @@ def dispatch_dict(fn: "functools._SingleDispatchCallable", tensor_dict: Dict[str tensor_backend = type(tensor.data) else: if tensor_backend is not type(tensor.data): - raise nncf.InternalError("All tensors in the dictionary should have the same backend") + msg = "All tensors in the dictionary should have the same backend" + raise nncf.InternalError(msg) unwrapped_dict[key] = tensor.data return fn.dispatch(tensor_backend)(unwrapped_dict, *args, **kwargs) diff --git a/nncf/tensor/functions/io.py b/nncf/tensor/functions/io.py index 23ff04db0cf..16dcc8479a3 100644 --- a/nncf/tensor/functions/io.py +++ b/nncf/tensor/functions/io.py @@ -55,4 +55,5 @@ def save_file( fail_if_symlink(file_path) if isinstance(data, dict): return dispatch_dict(save_file, data, file_path) - raise NotImplementedError(f"Function `save_file` is not implemented for {type(data)}") + msg = f"Function `save_file` is not implemented for {type(data)}" + raise NotImplementedError(msg) diff --git a/nncf/tensor/functions/numeric.py b/nncf/tensor/functions/numeric.py index 0aad2ce6e65..9f4c8cfbe8a 100644 --- a/nncf/tensor/functions/numeric.py +++ b/nncf/tensor/functions/numeric.py @@ -340,7 +340,8 @@ def stack(x: List[Tensor], axis: int = 0) -> Tensor: """ if isinstance(x, (list, deque)): return Tensor(dispatch_list(stack, x, axis=axis)) - raise NotImplementedError(f"Function `stack` is not implemented for {type(x)}") + msg = f"Function `stack` is not implemented for {type(x)}" + raise NotImplementedError(msg) @functools.singledispatch @@ -354,7 +355,8 @@ def concatenate(x: List[Tensor], axis: int = 0) -> Tensor: """ if isinstance(x, (list, deque)): return Tensor(dispatch_list(concatenate, x, axis=axis)) - raise NotImplementedError(f"Function `concatenate` is not implemented for {type(x)}") + msg = f"Function `concatenate` is not implemented for {type(x)}" + raise NotImplementedError(msg) @functools.singledispatch diff --git a/nncf/tensor/functions/numpy_numeric.py b/nncf/tensor/functions/numpy_numeric.py index 9c2f65e3a34..4c272e1a08b 100644 --- a/nncf/tensor/functions/numpy_numeric.py +++ b/nncf/tensor/functions/numpy_numeric.py @@ -36,7 +36,8 @@ def validate_device(device: TensorDeviceType) -> None: if device is not None and device != TensorDeviceType.CPU: - raise ValueError("numpy_numeric only supports CPU device.") + msg = "numpy_numeric only supports CPU device." + raise ValueError(msg) def convert_to_numpy_dtype(dtype: TensorDataType) -> np.dtype: diff --git a/nncf/tensor/functions/torch_numeric.py b/nncf/tensor/functions/torch_numeric.py index 8f62990cdae..0b31a2adf0e 100644 --- a/nncf/tensor/functions/torch_numeric.py +++ b/nncf/tensor/functions/torch_numeric.py @@ -62,7 +62,8 @@ def _(a: torch.Tensor, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> to return a.squeeze() if isinstance(axis, Tuple) and any(a.shape[i] != 1 for i in axis): # Make Numpy behavior, torch.squeeze skips axes that are not equal to one.. - raise ValueError("Cannot select an axis to squeeze out which has size not equal to one") + msg = "Cannot select an axis to squeeze out which has size not equal to one" + raise ValueError(msg) return a.squeeze(axis) @@ -390,14 +391,16 @@ def _(a: torch.Tensor, axis: Union[int, Tuple[int, ...], List[int]]) -> np.ndarr axis = (axis,) if len(set(axis)) != len(axis): - raise ValueError("repeated axis") + msg = "repeated axis" + raise ValueError(msg) out_ndim = len(axis) + a.dim() norm_axis = [] for ax in axis: if ax < -out_ndim or ax >= out_ndim: - raise ValueError(f"axis {ax} is out of bounds for array of dimension {out_ndim}") + msg = f"axis {ax} is out of bounds for array of dimension {out_ndim}" + raise ValueError(msg) norm_axis.append(ax + out_ndim if ax < 0 else ax) shape_it = iter(a.shape) @@ -413,9 +416,11 @@ def _(a: torch.Tensor) -> torch.Tensor: @numeric.searchsorted.register(torch.Tensor) def _(a: torch.Tensor, v: torch.Tensor, side: str = "left", sorter: Optional[torch.Tensor] = None) -> torch.Tensor: if side not in ["right", "left"]: - raise ValueError(f"Invalid value for 'side': {side}. Expected 'right' or 'left'.") + msg = f"Invalid value for 'side': {side}. Expected 'right' or 'left'." + raise ValueError(msg) if a.dim() != 1: - raise ValueError(f"Input tensor 'a' must be 1-D. Received {a.dim()}-D tensor.") + msg = f"Input tensor 'a' must be 1-D. Received {a.dim()}-D tensor." + raise ValueError(msg) return torch.searchsorted(sorted_sequence=a, input=v, right=(side == "right"), sorter=sorter) diff --git a/nncf/tensor/tensor.py b/nncf/tensor/tensor.py index 3ee387371e5..cceb986164d 100644 --- a/nncf/tensor/tensor.py +++ b/nncf/tensor/tensor.py @@ -265,6 +265,7 @@ def get_tensor_backend(backend: BackendType) -> TensorBackend: BackendType.TORCH: TensorBackend.torch, } if backend not in BACKEND_TO_TENSOR_BACKEND: - raise nncf.ValidationError(f"Unsupported backend type: {backend}") + msg = f"Unsupported backend type: {backend}" + raise nncf.ValidationError(msg) return BACKEND_TO_TENSOR_BACKEND[backend] diff --git a/nncf/tensorflow/accuracy_aware_training/keras_model_utils.py b/nncf/tensorflow/accuracy_aware_training/keras_model_utils.py index 821f6c262c3..88af74f22cf 100644 --- a/nncf/tensorflow/accuracy_aware_training/keras_model_utils.py +++ b/nncf/tensorflow/accuracy_aware_training/keras_model_utils.py @@ -92,7 +92,8 @@ def train_epoch_fn(compression_ctrl, model, epoch, **kwargs): break if logs is None: - raise ValueError("Expect x to be a non-empty array or dataset.") + msg = "Expect x to be a non-empty array or dataset." + raise ValueError(msg) epoch_logs = copy.copy(logs) callbacks.on_epoch_end(epoch, epoch_logs) diff --git a/nncf/tensorflow/accuracy_aware_training/runner.py b/nncf/tensorflow/accuracy_aware_training/runner.py index 03ca47715d7..44ba5ab0c66 100644 --- a/nncf/tensorflow/accuracy_aware_training/runner.py +++ b/nncf/tensorflow/accuracy_aware_training/runner.py @@ -114,6 +114,7 @@ def _make_checkpoint_path(self, is_best, compression_rate=None): base_path = osp.join(self._checkpoint_save_dir, "acc_aware_checkpoint") if is_best: if compression_rate is None: - raise ValueError("Compression rate cannot be None") + msg = "Compression rate cannot be None" + raise ValueError(msg) return f"{base_path}_best_{compression_rate:.3f}{extension}" return f"{base_path}_last{extension}" diff --git a/nncf/tensorflow/api/composite_compression.py b/nncf/tensorflow/api/composite_compression.py index b009f6015a1..c1e9c0a7102 100644 --- a/nncf/tensorflow/api/composite_compression.py +++ b/nncf/tensorflow/api/composite_compression.py @@ -29,10 +29,11 @@ def __init__(self, config: NNCFConfig, should_init: bool = True): algo_names = extract_algorithm_names(config) if len(algo_names) < 2: - raise nncf.ValidationError( + msg = ( "Composite algorithm builder must be supplied with a config with more than one " "compression algo specified!" ) + raise nncf.ValidationError(msg) for algo_name in algo_names: algo_builder_cls = get_compression_algorithm_builder(algo_name) self._child_builders.append(algo_builder_cls(config, should_init=should_init)) diff --git a/nncf/tensorflow/api/compression.py b/nncf/tensorflow/api/compression.py index 5d9f84eb5fe..8de708b6d6c 100644 --- a/nncf/tensorflow/api/compression.py +++ b/nncf/tensorflow/api/compression.py @@ -30,10 +30,11 @@ def __init__(self, config: NNCFConfig, should_init: bool = True): "compression_lr_multiplier", self.name ) if compression_lr_multiplier is not None: - raise Exception( + msg = ( "compression_lr_multiplier is not supported when your work with a TF model in NNCF. " "Please remove the compression_lr_multiplier attribute from your NNCFConfig." ) + raise Exception(msg) def _get_state_without_name(self) -> Dict[str, Any]: """ diff --git a/nncf/tensorflow/batchnorm_adaptation.py b/nncf/tensorflow/batchnorm_adaptation.py index 6c3de1c4caf..b27a9db9572 100644 --- a/nncf/tensorflow/batchnorm_adaptation.py +++ b/nncf/tensorflow/batchnorm_adaptation.py @@ -54,11 +54,12 @@ def run(self, model: tf.keras.Model) -> None: :param model: A model for which the algorithm will be applied. """ if self._device is not None: - raise ValueError( + msg = ( "TF implementation of batchnorm adaptation algorithm " "does not support switch of devices. Model initial device " "is used by default for batchnorm adaptation." ) + raise ValueError(msg) with BNTrainingStateSwitcher(model): for x, _ in ProgressBar( islice(self._data_loader, self._num_bn_adaptation_steps), diff --git a/nncf/tensorflow/callbacks/checkpoint_callback.py b/nncf/tensorflow/callbacks/checkpoint_callback.py index 4a4911163e1..e6e333de175 100644 --- a/nncf/tensorflow/callbacks/checkpoint_callback.py +++ b/nncf/tensorflow/callbacks/checkpoint_callback.py @@ -31,7 +31,8 @@ def __init__(self, checkpoint: tf.train.Checkpoint, directory: str, save_freq: U self._last_batch_seen = 0 self._batches_seen_since_last_saving = 0 if save_freq != "epoch" and not isinstance(save_freq, int): - raise ValueError("Unrecognized save_freq: {}".format(save_freq)) + msg = f"Unrecognized save_freq: {save_freq}" + raise ValueError(msg) self._checkpoint_manager = tf.train.CheckpointManager(checkpoint, directory, None) self._save_freq = save_freq diff --git a/nncf/tensorflow/callbacks/statistics_callback.py b/nncf/tensorflow/callbacks/statistics_callback.py index 1e20e8aa1b7..639470ecc75 100644 --- a/nncf/tensorflow/callbacks/statistics_callback.py +++ b/nncf/tensorflow/callbacks/statistics_callback.py @@ -44,7 +44,8 @@ def __init__( self._file_writer = None if log_tensorboard: if log_dir is None: - raise ValueError("log_dir must be specified if log_tensorboard is true.") + msg = "log_dir must be specified if log_tensorboard is true." + raise ValueError(msg) self._file_writer = tf.summary.create_file_writer(log_dir + "/compression") @@ -76,6 +77,5 @@ def on_train_end(self, logs: dict = None): self._file_writer.close() def _prepare_for_tensorboard(self, stats: NNCFStatistics): - raise NotImplementedError( - "StatisticsCallback class implementation must override the _prepare_for_tensorboard method." - ) + msg = "StatisticsCallback class implementation must override the _prepare_for_tensorboard method." + raise NotImplementedError(msg) diff --git a/nncf/tensorflow/exporter.py b/nncf/tensorflow/exporter.py index 37de10c7dfa..da45524056f 100644 --- a/nncf/tensorflow/exporter.py +++ b/nncf/tensorflow/exporter.py @@ -56,7 +56,8 @@ def export_model(self, save_path: str, save_format: str = TFExportFormat.FROZEN_ if export_fn is None: available_formats = list(format_to_export_fn.keys()) - raise ValueError(f"Unsupported saving format: '{save_format}'. Available formats: {available_formats}") + msg = f"Unsupported saving format: '{save_format}'. Available formats: {available_formats}" + raise ValueError(msg) export_fn(save_path) diff --git a/nncf/tensorflow/graph/converter.py b/nncf/tensorflow/graph/converter.py index 013e0a09b5a..31f35bc08e0 100644 --- a/nncf/tensorflow/graph/converter.py +++ b/nncf/tensorflow/graph/converter.py @@ -258,7 +258,8 @@ def _collect_custom_layer_infos( input_graphdef_node_name = splits[0] output_port_id = int(splits[1]) else: - raise nncf.InternalError("Could not parse NodeDef's input field!") + msg = "Could not parse NodeDef's input field!" + raise nncf.InternalError(msg) pretty_input_node_name = custom_layer_info.graphdef_node_name_to_pretty_node_name[ input_graphdef_node_name @@ -360,7 +361,8 @@ def get_node_name(graphdef_node_name: str): # Filter control inputs, whatever these are previous_node_names = list(filter(lambda x: "^" not in x, previous_node_names)) if weight_node_name is None: - raise nncf.InternalError("Could not find a weight node for a weighted node {}".format(weighted_node.name)) + msg = f"Could not find a weight node for a weighted node {weighted_node.name}" + raise nncf.InternalError(msg) return weight_node_name @staticmethod @@ -407,7 +409,8 @@ def _get_layer(self, layer_name: str) -> tf.keras.layers.Layer: if layer.name == layer_name: return layer - raise ValueError(f"No such layer: {layer_name}.") + msg = f"No such layer: {layer_name}." + raise ValueError(msg) def _add_custom_layer_subgraph(self, nncf_graph: NNCFGraph, custom_layer_name: str) -> NNCFGraph: # TODO (vshampor): filter meaningless ops such as Identity, resource read etc. diff --git a/nncf/tensorflow/graph/metatypes/keras_layers.py b/nncf/tensorflow/graph/metatypes/keras_layers.py index 9945b5f0178..8586607797b 100644 --- a/nncf/tensorflow/graph/metatypes/keras_layers.py +++ b/nncf/tensorflow/graph/metatypes/keras_layers.py @@ -66,7 +66,8 @@ def _determine_subtype( else: matches.append(subtype) if len(matches) > 1: - raise nncf.InternalError("Multiple subtypes match operator call - cannot determine single subtype.") + msg = "Multiple subtypes match operator call - cannot determine single subtype." + raise nncf.InternalError(msg) if not matches: return None return matches[0] @@ -635,7 +636,8 @@ def _is_depthwise_conv(layer: tf.keras.layers.Layer, wrapper: Optional[tf.keras. ) if channels is None: - raise ValueError("The channel dimension of the inputs should be defined. Found `None`.") + msg = "The channel dimension of the inputs should be defined. Found `None`." + raise ValueError(msg) input_channels = int(channels) diff --git a/nncf/tensorflow/graph/model_transformer.py b/nncf/tensorflow/graph/model_transformer.py index ca35dd9eff2..c52316bb0a6 100644 --- a/nncf/tensorflow/graph/model_transformer.py +++ b/nncf/tensorflow/graph/model_transformer.py @@ -52,7 +52,8 @@ def __init__(self, model: TModel) -> None: :param model: Keras model to be transformed """ if not is_sequential_or_functional_model(model): - raise ValueError("Only tf.keras sequential or functional models can be transformed.") + msg = "Only tf.keras sequential or functional models can be transformed." + raise ValueError(msg) super().__init__(model) self._model_config = model.get_config() @@ -139,7 +140,8 @@ def _apply_transformation(self, transformation: TransformationCommand): elif transformation.type == TransformationType.REMOVE: self._remove(transformation.target_point) else: - raise TypeError("Transformation type {} does not support".format(transformation.type)) + msg = f"Transformation type {transformation.type} does not support" + raise TypeError(msg) def _insert(self, target_point: Union[TargetPoint, TFMultiLayerPoint], insertion_objects: List[Callable]): if isinstance(target_point, TFMultiLayerPoint): @@ -156,7 +158,8 @@ def _insert(self, target_point: Union[TargetPoint, TFMultiLayerPoint], insertion target_point.layer_name, target_point.instance_idx, target_point.output_port_id, insertion_objects ) else: - raise TypeError("Insertion transform does not support {} target point type".format(target_point.type)) + msg = f"Insertion transform does not support {target_point.type} target point type" + raise TypeError(msg) def _shared_insert_layers(self, target_points: List[TargetPoint], layers_to_insert: List[Callable]): functional_model = is_functional_model(self._model) @@ -164,7 +167,8 @@ def _shared_insert_layers(self, target_points: List[TargetPoint], layers_to_inse for layer in self._model_config["input_layers"]: for tp in target_points: if isinstance(tp, TFBeforeLayer) and tp.layer_name == layer[0]: - raise nncf.InternalError(f"Insertion before input layer: {tp.layer_name} is not supported") + msg = f"Insertion before input layer: {tp.layer_name} is not supported" + raise nncf.InternalError(msg) layer_configs = [] for layer in layers_to_insert: @@ -186,9 +190,8 @@ def _shared_insert_layers(self, target_points: List[TargetPoint], layers_to_inse inbound[3], ] else: - raise TypeError( - f"Insertion transform does not support {target_points[0].type} target point type" - ) + msg = f"Insertion transform does not support {target_points[0].type} target point type" + raise TypeError(msg) layer_configs.append(config) @@ -211,26 +214,25 @@ def _shared_insert_layers(self, target_points: List[TargetPoint], layers_to_inse tp.layer_name, tp.instance_idx, layer_out_ports, replace_layer_name, i ) if len(layer_out_ports) > 1: - raise nncf.InternalError( - "Insertion after layer ({}) with multiple ports is not supported".format(tp.layer_name) - ) + msg = f"Insertion after layer ({tp.layer_name}) with multiple ports is not supported" + raise nncf.InternalError(msg) layer_name = target_points[0].layer_name self._insert_layer_after_sequential(layer_name, config) def _multi_insertion(self, target_point: TargetPoint, commands: List[TransformationCommand]): if not isinstance(target_point, TFLayer): - raise TypeError( - "Multiple insertion transform does not support {} target point type".format(target_point.type) - ) + msg = f"Multiple insertion transform does not support {target_point.type} target point type" + raise TypeError(msg) weight_operations = [] for cmd in commands: if cmd.type != TransformationType.INSERT or cmd.target_point.type != TargetType.OPERATION_WITH_WEIGHTS: - raise TypeError( + msg = ( "Multiple insertion transform does not support command: " - "command type - {}; target point type - {}".format(cmd.type, cmd.target_point.type) + f"command type - {cmd.type}; target point type - {cmd.target_point.type}" ) + raise TypeError(msg) weight_operations.append(WeightOperations(cmd.target_point.weights_attr_name, cmd.insertion_objects)) self._insert_weight_operations(target_point.layer_name, weight_operations) @@ -242,7 +244,8 @@ def _remove(self, target_point: TargetPoint): target_layer_name, target_point.weights_attr_name, target_point.operation_name ) else: - raise TypeError("{} removal does not support".format(target_point.type)) + msg = f"{target_point.type} removal does not support" + raise TypeError(msg) def _remove_weight_operation(self, layer_name: str, weights_attr_name: str, operation_name: str): _, layer_config = self._find_layer_config(layer_name) @@ -326,7 +329,8 @@ def _insert_layers_before(self, layer_name: str, instance_idx: int, input_port_i if functional_model: for layer in self._model_config["input_layers"]: if layer_name == layer[0]: - raise nncf.InternalError("Insertion before input layer: {} is not supported".format(layer_name)) + msg = f"Insertion before input layer: {layer_name} is not supported" + raise nncf.InternalError(msg) layer_configs = [] idx, downstream_layer_cfg = self._find_layer_config(layer_name) @@ -411,15 +415,15 @@ def _insert_layer_after_functional(self, layer_name: str, instance_idx: int, lay self._insert_after_model_outputs(layer_name, instance_idx, layer_out_ports, replace_layer_name) if len(layer_out_ports) > 1: - raise nncf.InternalError( - "Insertion after layer ({}) with multiple ports is not supported".format(layer_name) - ) + msg = f"Insertion after layer ({layer_name}) with multiple ports is not supported" + raise nncf.InternalError(msg) self._insert_layer_after_sequential(layer_name, layer_to_insert_config) def _insert_layer_after_sequential(self, layer_name: str, layer_configs): idx, _ = self._find_layer_config(layer_name) if idx is None: - raise nncf.InternalError("Layer is not found: {}".format(layer_name)) + msg = f"Layer is not found: {layer_name}" + raise nncf.InternalError(msg) self._model_config["layers"].insert(idx + 1, layer_configs) @staticmethod diff --git a/nncf/tensorflow/graph/transformations/commands.py b/nncf/tensorflow/graph/transformations/commands.py index 3fb321f9533..1949d98b59a 100644 --- a/nncf/tensorflow/graph/transformations/commands.py +++ b/nncf/tensorflow/graph/transformations/commands.py @@ -444,13 +444,15 @@ def insertion_objects(self) -> List[Callable]: def union(self, other: TFTransformationCommand) -> "TFInsertionCommand": if isinstance(self.target_point, TFMultiLayerPoint): - raise NotImplementedError( + msg = ( "A command of TFInsertionCommand type with TFMultiLayerPoint " "could not be united with another command" ) + raise NotImplementedError(msg) if not self.check_command_compatibility(other): - raise ValueError("{} and {} commands could not be united".format(type(self).__name__, type(other).__name__)) + msg = f"{type(self).__name__} and {type(other).__name__} commands could not be united" + raise ValueError(msg) com = TFInsertionCommand(self.target_point) com.callable_objects = self.callable_objects + other.callable_objects @@ -467,7 +469,8 @@ def __init__(self, target_point: TargetPoint): super().__init__(TransformationType.REMOVE, target_point) def union(self, other: TFTransformationCommand) -> "TFRemovalCommand": - raise NotImplementedError("A command of TFRemovalCommand type could not be united with another command") + msg = "A command of TFRemovalCommand type could not be united with another command" + raise NotImplementedError(msg) class TFMultipleInsertionCommands(TFTransformationCommand): @@ -510,7 +513,8 @@ def check_insertion_command(self, command: TFTransformationCommand) -> bool: def add_insertion_command(self, command: TFTransformationCommand) -> None: if not self.check_insertion_command(command): - raise ValueError("{} command could not be added".format(type(command).__name__)) + msg = f"{type(command).__name__} command could not be added" + raise ValueError(msg) for idx, cmd in enumerate(self.commands): if cmd.target_point == command.target_point: @@ -521,7 +525,8 @@ def add_insertion_command(self, command: TFTransformationCommand) -> None: def union(self, other: TFTransformationCommand) -> "TFMultipleInsertionCommands": if not self.check_command_compatibility(other): - raise ValueError("{} and {} commands could not be united".format(type(self).__name__, type(other).__name__)) + msg = f"{type(self).__name__} and {type(other).__name__} commands could not be united" + raise ValueError(msg) def make_check_target_points_fn(fn1, fn2): def check_target_points(tp0, tp1): diff --git a/nncf/tensorflow/graph/utils.py b/nncf/tensorflow/graph/utils.py index 1e1cdfd35c9..146001b65e2 100644 --- a/nncf/tensorflow/graph/utils.py +++ b/nncf/tensorflow/graph/utils.py @@ -110,7 +110,7 @@ def collect_wrapped_layers(model): def get_shared_node_name(layer_name: str, instance_idx: int): - return "{}{}{}".format(layer_name, SHARED_OPERATION_MARK, instance_idx) + return f"{layer_name}{SHARED_OPERATION_MARK}{instance_idx}" def get_original_name_and_instance_idx(node_name: NNCFNodeName): @@ -129,7 +129,8 @@ def get_layer_to_graph_nodes_map(model, node_names): for node in node_names: parent_layer_name = node.split("/")[1] # model_name/layer_name/layer_op_name/... if parent_layer_name not in layer_to_nodes_map: - raise nncf.ValidationError("Could not find {} layer in Model".format(parent_layer_name)) + msg = f"Could not find {parent_layer_name} layer in Model" + raise nncf.ValidationError(msg) layer_to_nodes_map[parent_layer_name]["nodes"].append(node) return layer_to_nodes_map diff --git a/nncf/tensorflow/helpers/model_creation.py b/nncf/tensorflow/helpers/model_creation.py index 3edbb41880e..93cb3c11534 100644 --- a/nncf/tensorflow/helpers/model_creation.py +++ b/nncf/tensorflow/helpers/model_creation.py @@ -82,10 +82,11 @@ def create_compressed_model( """ if is_experimental_quantization(config): if is_keras_layer_model(model): - raise ValueError( + msg = ( "Experimental quantization algorithm has not supported models with " "`tensorflow_hub.KerasLayer` layer yet." ) + raise ValueError(msg) from nncf.experimental.tensorflow.nncf_network import NNCFNetwork @@ -118,7 +119,8 @@ def get_input_signature(config: NNCFConfig): sample_size = info["sample_size"] samples_sizes.append(sample_size) else: - raise nncf.ValidationError("sample_size must be provided in configuration file") + msg = "sample_size must be provided in configuration file" + raise nncf.ValidationError(msg) input_signature = [] for sample_size in samples_sizes: diff --git a/nncf/tensorflow/helpers/utils.py b/nncf/tensorflow/helpers/utils.py index 50dccfc81a2..22caa811330 100644 --- a/nncf/tensorflow/helpers/utils.py +++ b/nncf/tensorflow/helpers/utils.py @@ -20,7 +20,8 @@ def get_built_model(model, config): else: sample_size = input_info[0].get("sample_size", None) if input_info else None if not sample_size: - raise nncf.ValidationError("sample_size must be provided in configuration file") + msg = "sample_size must be provided in configuration file" + raise nncf.ValidationError(msg) model.build([None] + list(sample_size[1:])) return model diff --git a/nncf/tensorflow/layers/wrapper.py b/nncf/tensorflow/layers/wrapper.py index 16c87d66ef7..763c2494aee 100644 --- a/nncf/tensorflow/layers/wrapper.py +++ b/nncf/tensorflow/layers/wrapper.py @@ -37,13 +37,15 @@ def __init__(self, layer, **kwargs): :param kwargs: additional keyword arguments to be passed to the keras layer. """ if layer is None: - raise ValueError("`layer` cannot be None.") + msg = "`layer` cannot be None." + raise ValueError(msg) if not isinstance(layer, tf.keras.layers.Layer) or isinstance(layer, tf.keras.Model): - raise ValueError( + msg = ( "`layer` can only be a `tf.keras.layers.Layer` instance. " - "You passed an instance of type: {input}.".format(input=layer.__class__.__name__) + f"You passed an instance of type: {layer.__class__.__name__}." ) + raise ValueError(msg) if "name" not in kwargs: kwargs["name"] = layer.name @@ -202,9 +204,8 @@ def registry_weight_operation(self, weights_attr: str, op: NNCFOperation): self.weights_attr_ops[weights_attr] = OrderedDict() if op.name in self.weights_attr_ops[weights_attr]: - raise nncf.InternalError( - f"Attempt to apply an operation with the same name {op.name} on layer weight twice" - ) + msg = f"Attempt to apply an operation with the same name {op.name} on layer weight twice" + raise nncf.InternalError(msg) self.weights_attr_ops[weights_attr][op.name] = op diff --git a/nncf/tensorflow/pruning/base_algorithm.py b/nncf/tensorflow/pruning/base_algorithm.py index 8d69c5fbe27..7689a5943f2 100644 --- a/nncf/tensorflow/pruning/base_algorithm.py +++ b/nncf/tensorflow/pruning/base_algorithm.py @@ -312,7 +312,8 @@ def _check_pruning_level(self, params): pruning_target = params.get("pruning_target", None) pruning_flops_target = params.get("pruning_flops_target", None) if pruning_target and pruning_flops_target: - raise ValueError("Only one parameter from 'pruning_target' and 'pruning_flops_target' can be set.") + msg = "Only one parameter from 'pruning_target' and 'pruning_flops_target' can be set." + raise ValueError(msg) if pruning_flops_target: self.prune_flops = True diff --git a/nncf/tensorflow/pruning/filter_pruning/algorithm.py b/nncf/tensorflow/pruning/filter_pruning/algorithm.py index d5cc4ae2fae..aa565e7ae6e 100644 --- a/nncf/tensorflow/pruning/filter_pruning/algorithm.py +++ b/nncf/tensorflow/pruning/filter_pruning/algorithm.py @@ -438,7 +438,8 @@ def _set_binary_masks_for_pruned_modules_globally_by_flops_target(self, target_f if nncf_node.attributes["output_mask"] is not None: self._set_operation_masks([layer], nncf_node.attributes["output_mask"].tensor) return - raise nncf.InternalError(f"Unable to prune model to required flops pruning level: {target_flops_pruning_level}") + msg = f"Unable to prune model to required flops pruning level: {target_flops_pruning_level}" + raise nncf.InternalError(msg) def _set_operation_masks(self, layers: List[NNCFWrapper], filter_mask): for layer in layers: @@ -466,9 +467,8 @@ def _find_uniform_pruning_level_for_target_flops(self, target_flops_pruning_leve self.current_flops = flops self.current_params_num = params_num return right - raise nncf.ParameterNotSupportedError( - f"Unable to prune the model to get the required pruning level in flops = {target_flops_pruning_level}" - ) + msg = f"Unable to prune the model to get the required pruning level in flops = {target_flops_pruning_level}" + raise nncf.ParameterNotSupportedError(msg) def _calculate_flops_and_weights_in_uniformly_pruned_model(self, pruning_level): ( @@ -536,10 +536,11 @@ def _update_benchmark_statistics(self): def _layer_filter_importance(self, layer: NNCFWrapper): layer_metatype = get_keras_layer_metatype(layer) if len(layer_metatype.weight_definitions) != 1: - raise nncf.InternalError( + msg = ( f"The layer {layer.layer.name} does not support by the pruning " f"algorithm because it contains several weight attributes." ) + raise nncf.InternalError(msg) weight_attr = layer_metatype.weight_definitions[0].weight_attr_name weight = layer.layer_weights[weight_attr] if self.all_weights: diff --git a/nncf/tensorflow/pruning/utils.py b/nncf/tensorflow/pruning/utils.py index 801dc508a81..2c024ba2809 100644 --- a/nncf/tensorflow/pruning/utils.py +++ b/nncf/tensorflow/pruning/utils.py @@ -38,7 +38,8 @@ def get_filter_axis(layer: NNCFWrapper, weight_attr: str) -> int: def get_filters_num(layer: NNCFWrapper): layer_metatype = get_keras_layer_metatype(layer) if len(layer_metatype.weight_definitions) != 1: - raise ValueError(f"Could not calculate the number of filters for the layer {layer.layer.name}.") + msg = f"Could not calculate the number of filters for the layer {layer.layer.name}." + raise ValueError(msg) weight_def = layer_metatype.weight_definitions[0] weight_attr = weight_def.weight_attr_name @@ -95,7 +96,8 @@ def collect_output_shapes(model: "NNCFNetwork", graph: NNCFGraph) -> Dict[NNCFNo out_shape = layer.get_output_shape_at(node_index)[dims_slice] if not is_valid_shape(in_shape) or not is_valid_shape(out_shape): - raise nncf.ValidationError(f"Input/output shape is not defined for layer `{layer.name}` ") + msg = f"Input/output shape is not defined for layer `{layer.name}` " + raise nncf.ValidationError(msg) layers_out_shapes[node.node_name] = out_shape @@ -107,7 +109,8 @@ def collect_output_shapes(model: "NNCFNetwork", graph: NNCFGraph) -> Dict[NNCFNo out_shape = layer.get_output_shape_at(node_index)[1:] if not is_valid_shape(in_shape) or not is_valid_shape(out_shape): - raise nncf.ValidationError(f"Input/output shape is not defined for layer `{layer.name}` ") + msg = f"Input/output shape is not defined for layer `{layer.name}` " + raise nncf.ValidationError(msg) layers_out_shapes[node.node_name] = out_shape return layers_out_shapes diff --git a/nncf/tensorflow/quantization/algorithm.py b/nncf/tensorflow/quantization/algorithm.py index 7a8a23a0ed7..a3f298a63f3 100644 --- a/nncf/tensorflow/quantization/algorithm.py +++ b/nncf/tensorflow/quantization/algorithm.py @@ -256,9 +256,11 @@ def __init__(self, config: NNCFConfig, should_init: bool = True): self._target_device = config.get("target_device", TARGET_DEVICE) algo_config = self._get_algo_specific_config_section() if self._target_device == "NPU" and "preset" in algo_config: - raise nncf.ValidationError("The NPU target device does not support presets.") + msg = "The NPU target device does not support presets." + raise nncf.ValidationError(msg) if self._target_device == "CPU_SPR": - raise nncf.ValidationError("The CPU_SPR target device does not supported.") + msg = "The CPU_SPR target device does not supported." + raise nncf.ValidationError(msg) self.global_quantizer_constraints = {} self.ignored_scopes_per_group = {} @@ -470,18 +472,20 @@ def _get_quantizer_setup(self, model: tf.keras.Model) -> TFQuantizationSetup: target_node = nncf_graph.get_node_by_name(qp.insertion_point.target_node_name) is_custom, layer_info = converter.get_layer_info_for_node(target_node.node_name) if is_custom: - raise nncf.InternalError("Quantizing custom layer weights is currently unsupported!") + msg = "Quantizing custom layer weights is currently unsupported!" + raise nncf.InternalError(msg) layer_name = layer_info.layer_name qconfig = qp.qconfig if layer_name in quantized_layer_names_vs_qconfigs: assigned_qconfig = quantized_layer_names_vs_qconfigs[layer_name] if qconfig != assigned_qconfig: - raise nncf.InternalError( + msg = ( f"Inconsistent quantizer configurations selected by solver for one and the " f"same quantizable layer! Tried to assign {qconfig} to {layer_name} as " f"specified by QP {qp_id}, but the layer already has quantizer " f"config {assigned_qconfig} assigned to it!" ) + raise nncf.InternalError(msg) continue # The layer has already been quantized quantized_layer_names_vs_qconfigs[layer_name] = qconfig metatype = target_node.metatype @@ -510,7 +514,8 @@ def _get_quantizer_setup(self, model: tf.keras.Model) -> TFQuantizationSetup: is_custom, layer_info = converter.get_layer_info_for_node(target_node_name) if is_custom: - raise nncf.InternalError("Quantizing custom layer activations is currently unsupported!") + msg = "Quantizing custom layer activations is currently unsupported!" + raise nncf.InternalError(msg) if input_port_id is not None: target_point = TFBeforeLayer( layer_info.layer_name, instance_idx=layer_info.instance_idx, input_port_id=input_port_id @@ -547,7 +552,8 @@ def _log_if_overflow_fix_was_applied(self, applied_overflow_fix: bool): elif self._overflow_fix == "first_layer_only": quantizers_with_overflow_fix_str = "first convolution weight quantizers" elif self._overflow_fix != "disable": - raise nncf.InternalError(f"Unknown overflow fix type: {self._overflow_fix}") + msg = f"Unknown overflow fix type: {self._overflow_fix}" + raise nncf.InternalError(msg) nncf_logger.info(f"Overflow issue fix was applied to {quantizers_with_overflow_fix_str}.") def _generate_unified_scale_groups( @@ -707,7 +713,7 @@ def _preprocess_cast_nodes(nncf_graph: NNCFGraph, cast_metatypes: List[OperatorM def _get_fake_quantize_name(self, node_name: NNCFNodeName, input_port_id: int = None) -> str: original_node_name, instance_idx = get_original_name_and_instance_idx(node_name) - fq_name = "{}/fake_quantize".format(original_node_name) + fq_name = f"{original_node_name}/fake_quantize" if instance_idx != 0: fq_name += f"_{instance_idx}" if input_port_id is not None: diff --git a/nncf/tensorflow/quantization/init_range.py b/nncf/tensorflow/quantization/init_range.py index f2befb3825c..59a15f7478e 100644 --- a/nncf/tensorflow/quantization/init_range.py +++ b/nncf/tensorflow/quantization/init_range.py @@ -75,18 +75,15 @@ def get_init_config_for_scope_and_group(self, node_name: str, group: QuantizerGr ) ) if len(matches) > 1: - raise ValueError( - "Location {} matches more than one per-layer initialization parameter " - "definition!".format(str(node_name)) - ) + msg = f"Location {str(node_name)} matches more than one per-layer initialization parameter " "definition!" + raise ValueError(msg) if len(matches) == 1: return matches[0] if not matches and self.global_init_config is not None: return deepcopy(self.global_init_config) - raise ValueError( - "Location {} does not match any per-layer initialization parameter definition!".format(str(node_name)) - ) + msg = f"Location {str(node_name)} does not match any per-layer initialization parameter definition!" + raise ValueError(msg) class RangeInitializer: @@ -137,7 +134,8 @@ def generate_stat_collector( min_percentile = init_config.init_type_specific_params.get("min_percentile", MIN_PERCENTILE) max_percentile = init_config.init_type_specific_params.get("max_percentile", MAX_PERCENTILE) return TFMeanPercentileStatisticCollector([min_percentile, max_percentile], reduction_shape, num_samples) - raise ValueError(f"Range type {range_type} is not supported.") + msg = f"Range type {range_type} is not supported." + raise ValueError(msg) def _register_layer_statistics(self, layer: tf.keras.layers.Layer, layer_statistics: list, handles: list): channel_axes = get_channel_axis(InputType.INPUTS, "", layer) diff --git a/nncf/tensorflow/quantization/quantize_model.py b/nncf/tensorflow/quantization/quantize_model.py index 02da5a28ab4..e27abab4920 100644 --- a/nncf/tensorflow/quantization/quantize_model.py +++ b/nncf/tensorflow/quantization/quantize_model.py @@ -148,20 +148,25 @@ def quantize_impl( Implementation of the `quantize()` method for the TensorFlow backend. """ if model_type is not None: - raise ValueError(f"model_type={model_type} is not supported") + msg = f"model_type={model_type} is not supported" + raise ValueError(msg) if fast_bias_correction is False: - raise ValueError(f"fast_bias_correction={fast_bias_correction} is not supported") + msg = f"fast_bias_correction={fast_bias_correction} is not supported" + raise ValueError(msg) if ignored_scope is not None and ignored_scope.types: - raise nncf.InternalError( + msg = ( "Quantization algorithm form the TensorFlow backend " "does not support operation types in the ignored " "scopes yet" ) + raise nncf.InternalError(msg) if target_device == TargetDevice.CPU_SPR: - raise nncf.InternalError("target_device == CPU_SPR is not supported.") + msg = "target_device == CPU_SPR is not supported." + raise nncf.InternalError(msg) if mode is not None: - raise ValueError(f"mode={mode} is not supported") + msg = f"mode={mode} is not supported" + raise ValueError(msg) if preset is None: preset = QuantizationPreset.PERFORMANCE diff --git a/nncf/tensorflow/quantization/quantizers.py b/nncf/tensorflow/quantization/quantizers.py index 02a68ada283..4b7a9cfb864 100644 --- a/nncf/tensorflow/quantization/quantizers.py +++ b/nncf/tensorflow/quantization/quantizers.py @@ -154,7 +154,8 @@ def setup_input_transformation(self, input_shape, channel_axes): input_shape, channel_axes ) except NotImplementedError as e: - raise NotImplementedError(f"Additional information: quantizer name {self.name}") from e + msg = f"Additional information: quantizer name {self.name}" + raise NotImplementedError(msg) from e @staticmethod def _make_transformation_fns(input_shape, channel_axes): @@ -177,10 +178,11 @@ def _make_transformation_fns(input_shape, channel_axes): accumulate = False new_shape.append(val) if switch_counter > 1: - raise NotImplementedError( + msg = ( "Quntizer could not transform input to apply per-channel quantization: " f"input_shape {input_shape}, channel_axes {channel_axes}" ) + raise NotImplementedError(msg) forward_params = {"shape": new_shape} backward_params = {"shape": input_shape} fns_registry.append((tf.reshape, forward_params, backward_params)) @@ -327,9 +329,8 @@ def _create_variables(self, layer, input_shape, channel_axes, name: str = ""): def apply_overflow_fix(self, weights): if self.num_bits != 8 or not self._half_range: - raise nncf.InternalError( - "Attempt to apply overflow issue fix to quantizer which is not configured for that." - ) + msg = "Attempt to apply overflow issue fix to quantizer which is not configured for that." + raise nncf.InternalError(msg) # Multiplier to expand scale from 7 bit to 8 bit multiplier = 127 / 63 if self.narrow_range else 255 / 127 @@ -453,9 +454,8 @@ def _create_variables(self, layer, input_shape, channel_axes, name: str = ""): def apply_overflow_fix(self, weights): if self.num_bits != 8 or not self._half_range: - raise nncf.InternalError( - "Attempt to apply overflow issue fix to quantizer which is not configured for that." - ) + msg = "Attempt to apply overflow issue fix to quantizer which is not configured for that." + raise nncf.InternalError(msg) # Low value shift to expand quantize range from 7 bit to 8 bit properly weights["input_low_var"].assign( diff --git a/nncf/tensorflow/quantization/utils.py b/nncf/tensorflow/quantization/utils.py index fdfbd43825c..2b4f6e95fb9 100644 --- a/nncf/tensorflow/quantization/utils.py +++ b/nncf/tensorflow/quantization/utils.py @@ -21,7 +21,8 @@ def apply_overflow_fix(model: tf.keras.Model, op_names: List[str]) -> None: if not isinstance(model, tf.keras.Model): - raise ValueError(f"Expected model to be a `tf.keras.Model` instance but got: {type(model)}") + msg = f"Expected model to be a `tf.keras.Model` instance but got: {type(model)}" + raise ValueError(msg) for wrapped_layer, weight_attr, op in get_nncf_operations(model, op_names): if op.half_range: diff --git a/nncf/tensorflow/sparsity/magnitude/algorithm.py b/nncf/tensorflow/sparsity/magnitude/algorithm.py index 01d5cd585f0..2bd8d302c44 100644 --- a/nncf/tensorflow/sparsity/magnitude/algorithm.py +++ b/nncf/tensorflow/sparsity/magnitude/algorithm.py @@ -154,7 +154,8 @@ def __init__(self, target_model, config: NNCFConfig, op_names): scheduler_type = params.get("schedule", "polynomial") if scheduler_type == "adaptive": - raise ValueError("Magnitude sparsity algorithm do not support adaptive scheduler") + msg = "Magnitude sparsity algorithm do not support adaptive scheduler" + raise ValueError(msg) scheduler_cls = SPARSITY_SCHEDULERS.get(scheduler_type) self._scheduler: SparsityScheduler = scheduler_cls(self, params) @@ -187,9 +188,8 @@ def set_sparsity_level(self, sparsity_level, run_batchnorm_adaptation: bool = Fa """ if not self._frozen: if sparsity_level >= 1 or sparsity_level < 0: - raise AttributeError( - "Sparsity level should be within interval [0,1), actual value to set is: {}".format(sparsity_level) - ) + msg = f"Sparsity level should be within interval [0,1), actual value to set is: {sparsity_level}" + raise AttributeError(msg) self._threshold = self._select_threshold(sparsity_level) self._set_masks_for_threshold(self._threshold) diff --git a/nncf/tensorflow/sparsity/magnitude/operation.py b/nncf/tensorflow/sparsity/magnitude/operation.py index 24aa3cc10e7..3f8d48df6fe 100644 --- a/nncf/tensorflow/sparsity/magnitude/operation.py +++ b/nncf/tensorflow/sparsity/magnitude/operation.py @@ -22,7 +22,8 @@ class BinaryMask(NNCFOperation): def build(self, input_shape, input_type, name, layer): if input_type is not InputType.WEIGHTS: - raise ValueError("Binary Mask operation could not be applied to input of the layer: {}".format(layer.name)) + msg = f"Binary Mask operation could not be applied to input of the layer: {layer.name}" + raise ValueError(msg) mask = layer.add_weight( name + "_mask", diff --git a/nncf/tensorflow/sparsity/rb/algorithm.py b/nncf/tensorflow/sparsity/rb/algorithm.py index ee9985b053a..987382d65ca 100644 --- a/nncf/tensorflow/sparsity/rb/algorithm.py +++ b/nncf/tensorflow/sparsity/rb/algorithm.py @@ -124,7 +124,8 @@ def __init__(self, target_model, config: NNCFConfig, op_names: List[str]): sparsity_level_mode = params.get("sparsity_level_setting_mode", SPARSITY_LEVEL_SETTING_MODE) if sparsity_level_mode == "local": - raise NotImplementedError("RB sparsity algorithm do not support local sparsity loss") + msg = "RB sparsity algorithm do not support local sparsity loss" + raise NotImplementedError(msg) target_ops = [] for wrapped_layer, _, op in get_nncf_operations(self.model, self._op_names): @@ -134,7 +135,8 @@ def __init__(self, target_model, config: NNCFConfig, op_names: List[str]): schedule_type = params.get("schedule", "exponential") if schedule_type == "adaptive": - raise NotImplementedError("RB sparsity algorithm do not support adaptive scheduler") + msg = "RB sparsity algorithm do not support adaptive scheduler" + raise NotImplementedError(msg) scheduler_cls = SPARSITY_SCHEDULERS.get(schedule_type) self._scheduler = scheduler_cls(self, params) diff --git a/nncf/tensorflow/sparsity/rb/loss.py b/nncf/tensorflow/sparsity/rb/loss.py index 99f6067a0e3..47124702d5b 100644 --- a/nncf/tensorflow/sparsity/rb/loss.py +++ b/nncf/tensorflow/sparsity/rb/loss.py @@ -57,7 +57,8 @@ def target_sparsity_rate(self): eager_target = tf.keras.backend.eval(self.target) rate = 1.0 - eager_target if rate < 0 or rate > 1: - raise ValueError("Target is not within range [0, 1]") + msg = "Target is not within range [0, 1]" + raise ValueError(msg) return rate def set_target_sparsity_loss(self, sparsity_level): diff --git a/nncf/tensorflow/sparsity/rb/operation.py b/nncf/tensorflow/sparsity/rb/operation.py index edc5686e510..ef4989f7ee2 100644 --- a/nncf/tensorflow/sparsity/rb/operation.py +++ b/nncf/tensorflow/sparsity/rb/operation.py @@ -43,9 +43,8 @@ def build(self, input_shape, input_type: InputType, name: str, layer: NNCFWrappe :param layer: Layer which needs to be sparsifyed. """ if input_type is not InputType.WEIGHTS: - raise ValueError( - "RB Sparsity mask operation could not be applied to input of the layer: {}".format(layer.name) - ) + msg = f"RB Sparsity mask operation could not be applied to input of the layer: {layer.name}" + raise ValueError(msg) mask = layer.add_weight( name + "_mask", diff --git a/nncf/tensorflow/sparsity/utils.py b/nncf/tensorflow/sparsity/utils.py index 6658e405381..7748ac6ca5d 100644 --- a/nncf/tensorflow/sparsity/utils.py +++ b/nncf/tensorflow/sparsity/utils.py @@ -24,7 +24,8 @@ def strip_model_from_masks(model: tf.keras.Model, op_names: List[str]) -> tf.keras.Model: if not isinstance(model, tf.keras.Model): - raise ValueError(f"Expected model to be a `tf.keras.Model` instance but got: {type(model)}") + msg = f"Expected model to be a `tf.keras.Model` instance but got: {type(model)}" + raise ValueError(msg) transformations = TFTransformationLayout() for wrapped_layer, weight_attr, op in get_nncf_operations(model, op_names): diff --git a/nncf/tensorflow/tensor_statistics/statistics.py b/nncf/tensorflow/tensor_statistics/statistics.py index fc5c39ee9a4..06c3acff440 100644 --- a/nncf/tensorflow/tensor_statistics/statistics.py +++ b/nncf/tensorflow/tensor_statistics/statistics.py @@ -47,10 +47,12 @@ def tf_convert_stat_to_min_max_tensor_stat(statistic: TensorStatistic) -> TFMinM ) if isinstance(statistic, TFPercentileTensorStatistic): if len(statistic.percentile_vs_values_dict.keys()) < 2: - raise ValueError("Cannot create a min-max statistic for less than 2 percentile values") + msg = "Cannot create a min-max statistic for less than 2 percentile values" + raise ValueError(msg) min_pct = min(statistic.percentile_vs_values_dict.keys()) max_pct = max(statistic.percentile_vs_values_dict.keys()) return TFMinMaxTensorStatistic( statistic.percentile_vs_values_dict[min_pct], statistic.percentile_vs_values_dict[max_pct] ) - raise ValueError("Unknown TensorStatistic to generate min-max stat from!") + msg = "Unknown TensorStatistic to generate min-max stat from!" + raise ValueError(msg) diff --git a/nncf/tensorflow/utils/state.py b/nncf/tensorflow/utils/state.py index df223c62623..1c51ddeaf39 100644 --- a/nncf/tensorflow/utils/state.py +++ b/nncf/tensorflow/utils/state.py @@ -75,9 +75,8 @@ def state(self) -> Dict[str, Any]: return self._state def serialize(self) -> str: - raise NotImplementedError( - "Use an instance of the `TFCompressionState` class to serialize the compression state." - ) + msg = "Use an instance of the `TFCompressionState` class to serialize the compression state." + raise NotImplementedError(msg) def deserialize(self, string_value: str) -> None: """ diff --git a/nncf/torch/accuracy_aware_training/runner.py b/nncf/torch/accuracy_aware_training/runner.py index f15e388d3c6..eb6a95bc467 100644 --- a/nncf/torch/accuracy_aware_training/runner.py +++ b/nncf/torch/accuracy_aware_training/runner.py @@ -167,6 +167,7 @@ def _make_checkpoint_path(self, is_best, compression_rate=None): base_path = osp.join(self._checkpoint_save_dir, "acc_aware_checkpoint") if is_best: if compression_rate is None: - raise ValueError("Compression rate cannot be None") + msg = "Compression rate cannot be None" + raise ValueError(msg) return f"{base_path}_best_{compression_rate:.3f}{extension}" return f"{base_path}_last{extension}" diff --git a/nncf/torch/automl/agent/ddpg/ddpg.py b/nncf/torch/automl/agent/ddpg/ddpg.py index cc531ff8b42..1cbfa0f34de 100644 --- a/nncf/torch/automl/agent/ddpg/ddpg.py +++ b/nncf/torch/automl/agent/ddpg/ddpg.py @@ -269,13 +269,13 @@ def load_weights(self, output): if output is None: return - self.actor.load_state_dict(torch.load("{}/actor.pkl".format(output))) + self.actor.load_state_dict(torch.load(f"{output}/actor.pkl")) - self.critic.load_state_dict(torch.load("{}/critic.pkl".format(output))) + self.critic.load_state_dict(torch.load(f"{output}/critic.pkl")) def save_model(self, output): - torch.save(self.actor.state_dict(), "{}/actor.pkl".format(output)) - torch.save(self.critic.state_dict(), "{}/critic.pkl".format(output)) + torch.save(self.actor.state_dict(), f"{output}/actor.pkl") + torch.save(self.critic.state_dict(), f"{output}/critic.pkl") def seed(self, s): torch.manual_seed(s) diff --git a/nncf/torch/automl/agent/ddpg/memory.py b/nncf/torch/automl/agent/ddpg/memory.py index f4e4b964428..684db25b869 100644 --- a/nncf/torch/automl/agent/ddpg/memory.py +++ b/nncf/torch/automl/agent/ddpg/memory.py @@ -9,9 +9,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from __future__ import absolute_import - import random from collections import deque from collections import namedtuple @@ -241,7 +238,8 @@ def append(self, observation, action, reward, terminal, training=True): def discard(self, subscript): if not isinstance(subscript, slice): - raise ValueError("discard function only supports input of type slice") + msg = "discard function only supports input of type slice" + raise ValueError(msg) del self.observations[subscript] del self.actions[subscript] diff --git a/nncf/torch/automl/environment/quantization_env.py b/nncf/torch/automl/environment/quantization_env.py index e81dd22b91f..f2524007a52 100644 --- a/nncf/torch/automl/environment/quantization_env.py +++ b/nncf/torch/automl/environment/quantization_env.py @@ -212,7 +212,8 @@ def __init__( # Create master dataframe to keep track of quantizable layers and their attributes self.master_df, self.state_list = self._get_state_space(self.qctrl, self.qmodel, self.quantizer_table) if self.master_df.isnull().values.any(): - raise ValueError("Q.Env Master Dataframe has null value(s)") + msg = "Q.Env Master Dataframe has null value(s)" + raise ValueError(msg) assert len(self.quantizer_table) == len( self.qctrl.all_quantizations @@ -234,13 +235,11 @@ def __init__( self.target_model_size = self.orig_model_size * self.compression_ratio if self.target_model_size < self.min_model_size and self.target_model_size > self.max_model_size: - raise ValueError( - "Model Size Ratio {} is out of bound ({}, {})".format( - self.compression_ratio, - self.min_model_size / self.orig_model_size, - self.max_model_size / self.orig_model_size, - ) + msg = ( + f"Model Size Ratio {self.compression_ratio} is out of bound" + f" ({self.min_model_size / self.orig_model_size}, {self.max_model_size / self.orig_model_size})" ) + raise ValueError(msg) # Compression Ratio Calculation (BOP relative to 8-bit) self.compression_ratio_calculator = CompressionRatioCalculator( @@ -394,7 +393,8 @@ def _get_layer_attr(self, row: pd.Series) -> pd.Series: feature["prev_action"] = 0.0 # placeholder else: - raise NotImplementedError("State embedding extraction of {}".format(m.__class__.__name__)) + msg = f"State embedding extraction of {m.__class__.__name__}" + raise NotImplementedError(msg) elif isinstance(qid, NonWeightQuantizerId): qmod = self.qctrl.all_quantizations[qid] @@ -410,9 +410,11 @@ def _get_layer_attr(self, row: pd.Series) -> pd.Series: feature["prev_action"] = 0.0 if len(input_shape) != 4 and len(input_shape) != 2: - raise NotImplementedError("A design is required to cater this scenario. Pls. report to maintainer") + msg = "A design is required to cater this scenario. Pls. report to maintainer" + raise NotImplementedError(msg) else: - raise ValueError("qid is an instance of unexpected class {}".format(qid.__class__.__name__)) + msg = f"qid is an instance of unexpected class {qid.__class__.__name__}" + raise ValueError(msg) return pd.Series(feature) @@ -476,7 +478,8 @@ def _run_quantization_pipeline(self, finetune=False) -> float: self._run_batchnorm_adaptation() if finetune: - raise NotImplementedError("Post-Quantization fine tuning is not implemented.") + msg = "Post-Quantization fine tuning is not implemented." + raise NotImplementedError(msg) with torch.no_grad(): quantized_score = self.eval_fn(self.qmodel, self.eval_loader) nncf_logger.info(f"[Q.Env] Quantized Score: {quantized_score:.3f}") @@ -654,7 +657,7 @@ def _dump_groups_of_adjacent_quantizers(self): group_members.append(self.master_df.index[self.master_df.qid == str(wq[0])][0]) adj_quantizer_groups.append(natsorted(group_members)) - with safe_open(self.dump_dir / "{}_groups_of_adjacent_quantizers.json".format(self.model_name), "w") as DUMP_FH: + with safe_open(self.dump_dir / f"{self.model_name}_groups_of_adjacent_quantizers.json", "w") as DUMP_FH: json.dump(natsorted(adj_quantizer_groups), DUMP_FH, indent=4) def _align_bw_action(self): @@ -705,5 +708,5 @@ def _dump_adjacent_quantizer_group_alignment(self): ) os.makedirs(self.dump_dir / "bw_alignment", exist_ok=True) - with safe_open(self.dump_dir / "bw_alignment/{0:03d}_bw_alignment.json".format(self._n_eval), "w") as DUMP_FH: + with safe_open(self.dump_dir / f"bw_alignment/{self._n_eval:03d}_bw_alignment.json", "w") as DUMP_FH: json.dump(list_of_dump_dict, DUMP_FH, indent=4) diff --git a/nncf/torch/checkpoint_loading.py b/nncf/torch/checkpoint_loading.py index 35215f3a15e..3dcc308256d 100644 --- a/nncf/torch/checkpoint_loading.py +++ b/nncf/torch/checkpoint_loading.py @@ -131,7 +131,7 @@ def handle_problematic(self, is_resume: bool, are_all_loaded_params_matched: boo error_msgs = [] def add_error_msg(name, keys_): - error_msgs.insert(0, "{} key(s):\n{}. ".format(name, ",\n".join('\t\t"{}"'.format(k) for k in keys_))) + error_msgs.insert(0, "{} key(s):\n{}. ".format(name, ",\n".join(f'\t\t"{k}"' for k in keys_))) for key_status, keys in self._keys.items(): is_missing = key_status == ProcessedKeyStatus.MISSING diff --git a/nncf/torch/composite_compression.py b/nncf/torch/composite_compression.py index 206a1b06d47..895b0037445 100644 --- a/nncf/torch/composite_compression.py +++ b/nncf/torch/composite_compression.py @@ -45,10 +45,11 @@ def __init__(self, config: NNCFConfig, should_init: bool = True): algo_names = extract_algorithm_names(config) if len(algo_names) < 2: - raise nncf.ValidationError( + msg = ( "Composite algorithm builder must be supplied with a config with more than one " "compression algo specified!" ) + raise nncf.ValidationError(msg) for algo_name in algo_names: algo_builder = PT_COMPRESSION_ALGORITHMS.get(algo_name) self._child_builders.append(algo_builder(config, should_init=should_init)) diff --git a/nncf/torch/compression_method_api.py b/nncf/torch/compression_method_api.py index 40765ec76a5..d2d088a6284 100644 --- a/nncf/torch/compression_method_api.py +++ b/nncf/torch/compression_method_api.py @@ -167,10 +167,11 @@ def build_controller(self, model: TModel) -> PTCompressionAlgorithmController: """ ctrl = self._build_controller(model) if not isinstance(ctrl, PTCompressionAlgorithmController): - raise nncf.InternalError( + msg = ( "Internal error: builder must create controller inherited from " "`PTCompressionAlgorithmController` class" ) + raise nncf.InternalError(msg) ctrl.set_builder_state_with_name(self.name, self.get_state()) return ctrl @@ -207,12 +208,13 @@ def _handle_frozen_layers(self, target_model: NNCFNetwork): f"{reason}, compressing them without tuning weights.\nFrozen layers:\n{scopes_to_print}" ) else: - raise nncf.InternalError( + msg = ( f"{reason}.\n" f"Please unfreeze them or put into the Ignored Scope.\n" f"Frozen Layers:\n" f"{scopes_to_print}" ) + raise nncf.InternalError(msg) def _should_consider_scope(self, node_name: NNCFNodeName) -> bool: return should_consider_scope(node_name, self.ignored_scopes, self.target_scopes) diff --git a/nncf/torch/dynamic_graph/context.py b/nncf/torch/dynamic_graph/context.py index 831cf27572c..7a556a4c639 100644 --- a/nncf/torch/dynamic_graph/context.py +++ b/nncf/torch/dynamic_graph/context.py @@ -55,7 +55,7 @@ def __eq__(self, other): return self.__dict__ == other.__dict__ def __str__(self): - return str(self.op_address) + "|INPUT{}".format(self.input_port_id) + return str(self.op_address) + f"|INPUT{self.input_port_id}" def __hash__(self): return hash(str(self)) @@ -255,7 +255,7 @@ def reset_scope_operator_call_counters(self): @staticmethod def _get_operator_counter_key(operator_name: str, scope: Scope): - return "{}_{}".format(str(scope), operator_name) + return f"{str(scope)}_{operator_name}" def register_operator_call(self, operator_name: str, scope: Scope): key = self._get_operator_counter_key(operator_name, scope) diff --git a/nncf/torch/dynamic_graph/io_handling.py b/nncf/torch/dynamic_graph/io_handling.py index 9b079b108a5..25f161f988b 100644 --- a/nncf/torch/dynamic_graph/io_handling.py +++ b/nncf/torch/dynamic_graph/io_handling.py @@ -128,7 +128,8 @@ def __init__(self, shape: List[int], type_str: str = "float", keyword: str = Non else: self.filler = filler if self.filler not in self.FILLER_TYPES: - raise ValueError(f"Unknown input filler type: {filler}") + msg = f"Unknown input filler type: {filler}" + raise ValueError(msg) @staticmethod def _string_to_torch_type(string): @@ -179,7 +180,8 @@ def from_nncf_config(cls, config: NNCFConfig): """ input_infos = config.get("input_info") if input_infos is None: - raise nncf.ValidationError("Passed NNCFConfig does not have an 'input_info' field") + msg = "Passed NNCFConfig does not have an 'input_info' field" + raise nncf.ValidationError(msg) if isinstance(input_infos, dict): return FillerInputInfo( [ @@ -203,7 +205,8 @@ def from_nncf_config(cls, config: NNCFConfig): ) ) return FillerInputInfo(elements) - raise nncf.ValidationError("Invalid input_infos specified in config - should be either dict or list of dicts") + msg = "Invalid input_infos specified in config - should be either dict or list of dicts" + raise nncf.ValidationError(msg) def get_forward_inputs( self, device: Optional[Union[str, torch.device]] = None diff --git a/nncf/torch/dynamic_graph/layer_attributes_handlers.py b/nncf/torch/dynamic_graph/layer_attributes_handlers.py index 61189782e97..b1fbdcf4db1 100644 --- a/nncf/torch/dynamic_graph/layer_attributes_handlers.py +++ b/nncf/torch/dynamic_graph/layer_attributes_handlers.py @@ -178,12 +178,14 @@ def apply_args_defaults( elif idx < len(args): args_dict[arg_desc] = args[idx] else: - raise ValueError("Incorrect args_signature, can not by applied to function arguments.") + msg = "Incorrect args_signature, can not by applied to function arguments." + raise ValueError(msg) elif isinstance(arg_desc, Tuple): arg_name, default = arg_desc args_dict[arg_name] = kwargs.get(arg_name, args[idx] if idx < len(args) else default) else: - raise ValueError("Incorrect args_signature, element of list should be str or tuple.") + msg = "Incorrect args_signature, element of list should be str or tuple." + raise ValueError(msg) return args_dict diff --git a/nncf/torch/dynamic_graph/patch_pytorch.py b/nncf/torch/dynamic_graph/patch_pytorch.py index 81d0747bfd5..4c756ac465b 100644 --- a/nncf/torch/dynamic_graph/patch_pytorch.py +++ b/nncf/torch/dynamic_graph/patch_pytorch.py @@ -43,7 +43,8 @@ def get_namespaces_to_patch(namespace_target: NamespaceTarget) -> object: return TracedParameter if namespace_target == NamespaceTarget.TORCH: return torch - raise nncf.ValidationError("{} namespace wasn't found in {}".format(namespace_target, NamespaceTarget)) + msg = f"{namespace_target} namespace wasn't found in {NamespaceTarget}" + raise nncf.ValidationError(msg) def get_namespace_to_extract_functions_from(namespace_target: NamespaceTarget) -> object: @@ -55,7 +56,8 @@ def get_namespace_to_extract_functions_from(namespace_target: NamespaceTarget) - return torch.nn.Parameter if namespace_target == NamespaceTarget.TORCH: return torch._C._VariableFunctions - raise nncf.ValidationError("{} namespace wasn't found in {}".format(namespace_target, NamespaceTarget)) + msg = f"{namespace_target} namespace wasn't found in {NamespaceTarget}" + raise nncf.ValidationError(msg) class FunctionsToPatchWithoutTracing: @@ -255,7 +257,8 @@ def wrapper(model: Optional[Callable] = None, **kwargs): from nncf.torch.nncf_network import NNCFNetwork if isinstance(model, NNCFNetwork): - raise TypeError("At the moment torch.compile() is not supported for models optimized by NNCF.") + msg = "At the moment torch.compile() is not supported for models optimized by NNCF." + raise TypeError(msg) with disable_patching(): return _ORIG_TORCH_COMPILE(model, **kwargs) diff --git a/nncf/torch/dynamic_graph/scope.py b/nncf/torch/dynamic_graph/scope.py index 87a59f4b639..4b76c120c5e 100644 --- a/nncf/torch/dynamic_graph/scope.py +++ b/nncf/torch/dynamic_graph/scope.py @@ -23,7 +23,7 @@ def __init__(self, calling_module_class_name: str, calling_field_name: str = Non def __str__(self): if self.calling_field_name is None: return self.calling_module_class_name - return "{cls}[{name}]".format(cls=self.calling_module_class_name, name=self.calling_field_name) + return f"{self.calling_module_class_name}[{self.calling_field_name}]" def __eq__(self, other: "ScopeElement"): return (self.calling_module_class_name == other.calling_module_class_name) and ( @@ -37,12 +37,14 @@ def __hash__(self): def from_str(string: str): matches = re.search(r"(.*)\[(.*)\]|(.*)", string) if matches is None: - raise nncf.InternalError("Invalid scope element string") + msg = "Invalid scope element string" + raise nncf.InternalError(msg) if matches.groups()[0] is None and matches.groups()[1] is None: return ScopeElement(matches.groups()[2]) if matches.groups()[0] is not None and matches.groups()[1] is not None: return ScopeElement(matches.groups()[0], matches.groups()[1]) - raise nncf.InternalError("Could not parse the scope element string") + msg = "Could not parse the scope element string" + raise nncf.InternalError(msg) class Scope: diff --git a/nncf/torch/dynamic_graph/scope_access.py b/nncf/torch/dynamic_graph/scope_access.py index 355198e6bb4..84c4addab14 100644 --- a/nncf/torch/dynamic_graph/scope_access.py +++ b/nncf/torch/dynamic_graph/scope_access.py @@ -27,10 +27,10 @@ def get_module_by_scope(model: torch.nn.Module, scope: Scope) -> Optional[torch. next_module = curr_module._modules.get(scope_element.calling_field_name) if next_module is None: - raise nncf.InternalError( - "Could not find a {} module member in {} module of scope {} during node search".format( - scope_element.calling_field_name, scope_element.calling_module_class_name, str(scope) - ) + msg = ( + f"Could not find a {scope_element.calling_field_name} module member in" + f" {scope_element.calling_module_class_name} module of scope {str(scope)} during node search" ) + raise nncf.InternalError(msg) curr_module = next_module return curr_module diff --git a/nncf/torch/dynamic_graph/trace_functions.py b/nncf/torch/dynamic_graph/trace_functions.py index 2c506328333..5890d2c5810 100644 --- a/nncf/torch/dynamic_graph/trace_functions.py +++ b/nncf/torch/dynamic_graph/trace_functions.py @@ -39,8 +39,7 @@ def flatten(items): it = items.items() if hasattr(items, "items") else iter(items) for item in it: if is_iterable(item): - for i in flatten(item): - yield i + yield from flatten(item) else: yield item @@ -82,10 +81,11 @@ def forward_trace_only(operator: Callable, *args, **kwargs): forwarded_meta.shape = tuple(result[out_idx].shape) result[out_idx] = TracedTensor.from_torch_tensor(result[out_idx], forwarded_meta) elif len(input_traced_tensor_indices) != len(output_tensors_to_be_traced_indices): - raise nncf.ValidationError( - "Unable to forward trace through operator {} - " - "input and output tensor count mismatch!".format(operator.__name__) + msg = ( + f"Unable to forward trace through operator {operator.__name__} - " + "input and output tensor count mismatch!" ) + raise nncf.ValidationError(msg) else: # Assume that output tensor order corresponds to input tensor order for in_idx, out_idx in zip(input_traced_tensor_indices, output_tensors_to_be_traced_indices): @@ -96,10 +96,8 @@ def forward_trace_only(operator: Callable, *args, **kwargs): if was_tuple: result = tuple(result) elif len(input_traced_tensor_indices) > 1: - raise nncf.ValidationError( - "Unable to forward trace through operator {} - " - "input and output tensor count mismatch!".format(operator.__name__) - ) + msg = f"Unable to forward trace through operator {operator.__name__} - input and output tensor count mismatch!" + raise nncf.ValidationError(msg) elif input_traced_tensor_indices: forwarded_meta = deepcopy(fargs[input_traced_tensor_indices[0]].tensor_meta) if forwarded_meta is not None: diff --git a/nncf/torch/dynamic_graph/trace_tensor.py b/nncf/torch/dynamic_graph/trace_tensor.py index e977ddb6d8e..138759ddebb 100644 --- a/nncf/torch/dynamic_graph/trace_tensor.py +++ b/nncf/torch/dynamic_graph/trace_tensor.py @@ -48,7 +48,7 @@ def __hash__(self): return hash((self.creator_id, self.index, self.shape)) def __str__(self): - return "C{}_I{}_".format(self.creator_id, self.index) + "S" + "x".join([str(s) for s in self.shape]) + return f"C{self.creator_id}_I{self.index}_" + "S" + "x".join([str(s) for s in self.shape]) class TracedTensorMixin: diff --git a/nncf/torch/dynamic_graph/wrappers.py b/nncf/torch/dynamic_graph/wrappers.py index bab14137e83..3107379ac02 100644 --- a/nncf/torch/dynamic_graph/wrappers.py +++ b/nncf/torch/dynamic_graph/wrappers.py @@ -190,7 +190,7 @@ def _execute_op( result = operator(*args, **kwargs) node = None if isinstance(result, type(NotImplemented)): - nncf_logger.debug("Operation {} returned NotImplemented".format(op_name)) + nncf_logger.debug(f"Operation {op_name} returned NotImplemented") elif ctx.trace_dynamic_graph: tensor_metas = make_tensor_metas(processed_input) node = ctx.find_operator_node(tensor_metas, op_address) diff --git a/nncf/torch/exporter.py b/nncf/torch/exporter.py index 811a8e526db..dddfb1fb0cb 100644 --- a/nncf/torch/exporter.py +++ b/nncf/torch/exporter.py @@ -94,7 +94,8 @@ def parse_format(save_format: str) -> Tuple[str, dict]: opset = int(split_format[1]) if opset is not None and opset <= 0: - raise ValueError("Incorrect save_format, expected 'onnx' or 'onnx_'.") + msg = "Incorrect save_format, expected 'onnx' or 'onnx_'." + raise ValueError(msg) if opset != PTExporter._ONNX_DEFAULT_OPSET: nncf_logger.warning( @@ -131,7 +132,8 @@ def export_model(self, save_path: str, save_format: str = PTExportFormat.ONNX) - if export_fn is None: available_formats = list(format_to_export_fn.keys()) - raise ValueError(f"Unsupported saving format: '{save_format}'. Available formats: {available_formats}") + msg = f"Unsupported saving format: '{save_format}'. Available formats: {available_formats}" + raise ValueError(msg) export_fn(**fn_args) diff --git a/nncf/torch/extensions/__init__.py b/nncf/torch/extensions/__init__.py index ba26cdcc401..34c30c6847c 100644 --- a/nncf/torch/extensions/__init__.py +++ b/nncf/torch/extensions/__init__.py @@ -141,7 +141,8 @@ def force_build_cuda_extensions(): class CudaNotAvailableStub: def __getattr__(self, item): - raise nncf.InstallationError( + msg = ( f"CUDA is not available on this machine. Check that the machine has a GPU and a proper " f"driver supporting CUDA {torch.version.cuda} is installed." ) + raise nncf.InstallationError(msg) diff --git a/nncf/torch/extractor.py b/nncf/torch/extractor.py index 4b4c7f4394b..640a27f5ac6 100644 --- a/nncf/torch/extractor.py +++ b/nncf/torch/extractor.py @@ -173,10 +173,12 @@ def try_to_fuse_conv( return extracted_module if output_node != next_nodes[0]: - raise nncf.InternalError(f"Output node {output_node} not found after {input_node}") + msg = f"Output node {output_node} not found after {input_node}" + raise nncf.InternalError(msg) if next_nodes[0].metatype != om.PTBatchNormMetatype: - raise nncf.InternalError("Supported only BatchNorm layers") + msg = "Supported only BatchNorm layers" + raise nncf.InternalError(msg) extracted_bn = extract_bn(next_nodes[0], model) if extracted_bn is None: @@ -199,7 +201,8 @@ def extract_model(model: NNCFNetwork, input_nodes: List[str], output_nodes: List """ if len(input_nodes) != 1 or len(output_nodes) != 1: - raise nncf.InternalError("input_nodes and output_nodes should contain only one node.") + msg = "input_nodes and output_nodes should contain only one node." + raise nncf.InternalError(msg) graph = model.nncf.get_graph() input_node = graph.get_node_by_name(input_nodes[0]) diff --git a/nncf/torch/graph/graph.py b/nncf/torch/graph/graph.py index 909a0d9fa09..6bd1a3b71f8 100644 --- a/nncf/torch/graph/graph.py +++ b/nncf/torch/graph/graph.py @@ -89,7 +89,8 @@ def get_scope_by_node_name(self, node_name: NNCFNodeName) -> Scope: matches.append(Scope.from_str(scope_str)) assert len(matches) <= 1 if not matches: - raise nncf.InternalError("Node name {} not found in the node-vs-scope dict!".format(node_name)) + msg = f"Node name {node_name} not found in the node-vs-scope dict!" + raise nncf.InternalError(msg) return matches[0] def get_nodes_with_missed_input_edges(self) -> List[NNCFNode]: diff --git a/nncf/torch/graph/transformations/commands.py b/nncf/torch/graph/transformations/commands.py index 20c32bf4409..4e3ae9b1b00 100644 --- a/nncf/torch/graph/transformations/commands.py +++ b/nncf/torch/graph/transformations/commands.py @@ -48,7 +48,8 @@ def __init__(self, target_type: TargetType, target_node_name: NNCFNodeName, *, i self.target_node_name = target_node_name self.target_type = target_type if self.target_type not in self._OPERATION_TYPES + self._HOOK_TYPES + self._LAYER_TYPE: - raise NotImplementedError("Unsupported target type: {}".format(target_type)) + msg = f"Unsupported target type: {target_type}" + raise NotImplementedError(msg) self.input_port_id = input_port_id @@ -64,10 +65,10 @@ def __str__(self): prefix = str(self.target_type) retval = prefix if self.target_type in self._OPERATION_TYPES + self._LAYER_TYPE: - retval += " {}".format(self.target_node_name) + retval += f" {self.target_node_name}" elif self.target_type in self._HOOK_TYPES: if self.input_port_id is not None: - retval += " {}".format(self.input_port_id) + retval += f" {self.input_port_id}" retval += " " + str(self.target_node_name) return retval diff --git a/nncf/torch/graph/transformations/serialization.py b/nncf/torch/graph/transformations/serialization.py index 195ee59f6f5..b016fb960d3 100644 --- a/nncf/torch/graph/transformations/serialization.py +++ b/nncf/torch/graph/transformations/serialization.py @@ -62,7 +62,8 @@ def serialize_command(command: PTTransformationCommand) -> Dict[str, Any]: :return: Serialized representation of given command as a dict. """ if not isinstance(command, SUPPORTED_COMMANDS): - raise RuntimeError(f"Command type {command.__class__} is not supported.") + msg = f"Command type {command.__class__} is not supported." + raise RuntimeError(msg) serialized_transformation = dict() serialized_transformation["type"] = command.__class__.__name__ @@ -76,10 +77,11 @@ def serialize_command(command: PTTransformationCommand) -> Dict[str, Any]: # Check compression module is registered compression_module_name = command.fn.__class__.__name__ if compression_module_name not in COMPRESSION_MODULES.registry_dict: - raise RuntimeError( + msg = ( f"Could not serialize compression module with name {compression_module_name}." " Please register your module in the COMPRESSION_MODULES registry." ) + raise RuntimeError(msg) serialized_transformation["compression_module_name"] = compression_module_name serialized_transformation["fn_config"] = command.fn.get_config() serialized_transformation["hooks_group_name"] = command.hooks_group_name @@ -96,7 +98,8 @@ def deserialize_command(serialized_command: Dict[str, Any]) -> Union[PTInsertion :return: The deserialized command. """ if serialized_command["type"] not in (command_cls.__name__ for command_cls in SUPPORTED_COMMANDS): - raise RuntimeError(f"Command type {serialized_command['type']} is not supported.") + msg = f"Command type {serialized_command['type']} is not supported." + raise RuntimeError(msg) module_cls = COMPRESSION_MODULES.get(serialized_command["compression_module_name"]) fn = module_cls.from_config(serialized_command["fn_config"]) diff --git a/nncf/torch/initialization.py b/nncf/torch/initialization.py index cb1fc8e4a31..c2ff3e8d5db 100644 --- a/nncf/torch/initialization.py +++ b/nncf/torch/initialization.py @@ -89,7 +89,7 @@ def wrap_dataloader_for_init(data_loader) -> PTInitializingDataLoader: loaded_item = next(iter(data_loader)) if isinstance(loaded_item, (tuple, list)) and len(loaded_item) == 2: return DefaultInitializingDataLoader(data_loader) - raise NotImplementedError( + msg = ( "Could not deduce the forward arguments from the initializing dataloader output.\n" "By default it is assumed that the data loader used for initialize " "produces a tuple/list of (*model_input*, *ground_truth*) and that no special " @@ -100,13 +100,15 @@ def wrap_dataloader_for_init(data_loader) -> PTInitializingDataLoader: "See https://github.com/openvinotoolkit/nncf/blob/develop/docs/FAQ.md#pt_init_dataloader for " "an example of how to do this this in your code." ) + raise NotImplementedError(msg) return data_loader class PartialDataLoader: def __init__(self, regular_data_loader: DataLoader, iter_ratio=1.0): if iter_ratio < 0.0 or iter_ratio > 1.0: - raise ValueError("iter_ratio must be within 0 to 1 range") + msg = "iter_ratio must be within 0 to 1 range" + raise ValueError(msg) self.data_loader = regular_data_loader self.batch_size = regular_data_loader.batch_size self._stop_id = math.ceil(len(self.data_loader) * iter_ratio) diff --git a/nncf/torch/knowledge_distillation/algo.py b/nncf/torch/knowledge_distillation/algo.py index 02c70aab482..789f2995f0b 100644 --- a/nncf/torch/knowledge_distillation/algo.py +++ b/nncf/torch/knowledge_distillation/algo.py @@ -41,7 +41,8 @@ def __init__(self, config: NNCFConfig, should_init: bool = True): self.scale = self._algo_config.get("scale", KNOWLEDGE_DISTILLATION_SCALE) self.temperature = self._algo_config.get("temperature", KNOWLEDGE_DISTILLATION_TEMPERATURE) if "temperature" in self._algo_config and self.kd_type == "mse": - raise ValueError("Temperature shouldn't be stated for MSE Loss (softmax only feature)") + msg = "Temperature shouldn't be stated for MSE Loss (softmax only feature)" + raise ValueError(msg) def _get_transformation_layout(self, target_model: NNCFNetwork) -> PTTransformationLayout: self.original_model = deepcopy(target_model).nncf.get_clean_shallow_copy() diff --git a/nncf/torch/layers.py b/nncf/torch/layers.py index 80a94e5df5f..5b05007e3f8 100644 --- a/nncf/torch/layers.py +++ b/nncf/torch/layers.py @@ -457,7 +457,7 @@ def register_module( # customly named attributes if it becomes necessary def wrap(cls): UNWRAPPED_USER_MODULES.registry_dict[cls.__name__] = cls - nncf_wrapped_module_class_name = "NNCFUser{}".format(cls.__name__) + nncf_wrapped_module_class_name = f"NNCFUser{cls.__name__}" NNCF_WRAPPED_USER_MODULES_DICT[cls] = type(nncf_wrapped_module_class_name, (_NNCFModuleMixin, cls), {}) get_base_attributes_fn = lambda self: GenericWeightedLayerAttributes( self.weight.requires_grad, self.weight.shape @@ -509,24 +509,17 @@ def extra_repr(self): def check_forward_input(self, input_): if input_.size(1) != self.input_size: - raise nncf.ValidationError( - "input_ has inconsistent input_size: got {}, expected {}".format(input_.size(1), self.input_size) - ) + msg = f"input_ has inconsistent input_size: got {input_.size(1)}, expected {self.input_size}" + raise nncf.ValidationError(msg) def check_forward_hidden(self, input_: torch.Tensor, hx: torch.Tensor, hidden_label: str = ""): if input_.size(0) != hx.size(0): - raise nncf.ValidationError( - "Input batch size {} doesn't match hidden{} batch size {}".format( - input_.size(0), hidden_label, hx.size(0) - ) - ) + msg = f"Input batch size {input_.size(0)} doesn't match hidden{hidden_label} batch size {hx.size(0)}" + raise nncf.ValidationError(msg) if hx.size(1) != self.hidden_size: - raise nncf.ValidationError( - "hidden{} has inconsistent hidden_size: got {}, expected {}".format( - hidden_label, hx.size(1), self.hidden_size - ) - ) + msg = f"hidden{hidden_label} has inconsistent hidden_size: got {hx.size(1)}, expected {self.hidden_size}" + raise nncf.ValidationError(msg) def reset_parameters(self): stdv = 1.0 / math.sqrt(self.hidden_size) @@ -788,11 +781,12 @@ def __init__( self.num_directions = 2 if bidirectional else 1 if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or isinstance(dropout, bool): - raise ValueError( + msg = ( "dropout should be a number in range [0, 1] " "representing the probability of an element being " "zeroed" ) + raise ValueError(msg) if dropout > 0 and num_layers == 1: nncf_logger.debug( f"dropout option adds dropout after all but last recurrent layer, " @@ -856,15 +850,11 @@ def check_forward_args(self, input_, hidden, batch_sizes): is_input_packed = batch_sizes is not None expected_input_dim = 2 if is_input_packed else 3 if input_.dim() != expected_input_dim: - raise nncf.ValidationError( - "input_ must have {} dimensions, got {}".format(expected_input_dim, input_.dim()) - ) + msg = f"input_ must have {expected_input_dim} dimensions, got {input_.dim()}" + raise nncf.ValidationError(msg) if self.input_size != input_.size(-1): - raise nncf.ValidationError( - "input_.size(-1) must be equal to input_size. Expected {}, got {}".format( - self.input_size, input_.size(-1) - ) - ) + msg = f"input_.size(-1) must be equal to input_size. Expected {self.input_size}, got {input_.size(-1)}" + raise nncf.ValidationError(msg) if is_input_packed: mini_batch = int(batch_sizes[0]) @@ -876,7 +866,8 @@ def check_forward_args(self, input_, hidden, batch_sizes): def check_hidden_size(hx, expected_hidden_size, msg="Expected hidden size {}, got {}"): expected_size = self.num_layers * self.num_directions if expected_size != len(hx): - raise nncf.InternalError("Expected number of hidden states {}, got {}".format(expected_size, len(hx))) + msg = f"Expected number of hidden states {expected_size}, got {len(hx)}" + raise nncf.InternalError(msg) for element in hx: if tuple(element.size()) != expected_hidden_size: raise nncf.InternalError(msg.format(expected_hidden_size, tuple(element.size()))) diff --git a/nncf/torch/model_creation.py b/nncf/torch/model_creation.py index 8674cb3d0ff..5bbce3f572b 100644 --- a/nncf/torch/model_creation.py +++ b/nncf/torch/model_creation.py @@ -115,7 +115,7 @@ def create_compressed_model( ) if isinstance(model, NNCFNetwork): - raise nncf.InternalError( + msg = ( "The model object has already been compressed.\n" "NNCF for PyTorch modifies the model object in-place, and repeat calls to " "`nncf.torch.create_compressed_model` with the same model object passed as argument " @@ -126,6 +126,7 @@ def create_compressed_model( "re-running cells involving `nncf.torch.create_compressed_model` the original model object " "is also re-created (via constructor call)." ) + raise nncf.InternalError(msg) set_debug_log_dir(config.get("log_dir", ".")) @@ -184,7 +185,7 @@ def get_input_info_from_config(config: NNCFConfig) -> ModelInputInfo: exact_info = LoaderInputInfo.from_nncf_config_dataloaders(config) if exact_info is not None: return exact_info - raise nncf.ValidationError( + msg = ( "Could not determine tensor inputs for the model's forward call.\n" "If you are using the `nncf.quantize` API, make sure that you supply the " "calibration dataloader to the `nncf.quantize` call.\n" @@ -196,6 +197,7 @@ def get_input_info_from_config(config: NNCFConfig) -> ModelInputInfo: f"{EXTRA_STRUCTS_WITH_DATALOADERS}\n" f"or by calling `nncf.torch.register_default_init_args`" ) + raise nncf.ValidationError(msg) def create_nncf_network( @@ -238,12 +240,13 @@ def create_nncf_network( :return: A model wrapped by NNCFNetwork, which is ready for adding compression.""" if dummy_forward_fn is not None and wrap_inputs_fn is None: - raise ValueError( + msg = ( "A custom dummy forward function was specified, but the corresponding input wrapping function " "was not. In case a custom dummy forward function is specified for purposes of NNCF graph " "building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with " "the input wrapping done in dummy_forward_fn." ) + raise ValueError(msg) # Preserve `.training`/`.requires_grad` state since we will be building NNCFNetwork in `.eval` mode with training_mode_switcher(model, is_training=False): @@ -349,10 +352,11 @@ def wrap_model( :return: A model wrapped by NNCFNetwork. """ if not isinstance(model, torch.nn.Module): - raise TypeError( + msg = ( f"The provided model type {type(model)} is incompatible. " "Only models inheriting from torch.nn.Module are supported." ) + raise TypeError(msg) input_info = ExampleInputInfo.from_example_input(example_input) diff --git a/nncf/torch/model_graph_manager.py b/nncf/torch/model_graph_manager.py index 5c2a813f9ab..dff5230796f 100644 --- a/nncf/torch/model_graph_manager.py +++ b/nncf/torch/model_graph_manager.py @@ -74,7 +74,8 @@ def get_const_node(node: NNCFNode, port_id: int, graph: NNCFGraph) -> Optional[N if edge.input_port_id == port_id: weight_node = find_const_node_in_constant_subgraph(prev_node, graph) if weight_node is None: - raise nncf.InternalError("Could not find a constant node in the model graph.") + msg = "Could not find a constant node in the model graph." + raise nncf.InternalError(msg) return weight_node @@ -89,7 +90,7 @@ def split_const_name(const_name: str) -> Tuple[str, str]: """ index = const_name.rfind(".") if index == -1: - return str(), const_name + return "", const_name module_name = const_name[:index] weight_attr_name = const_name[index + 1 :] return module_name, weight_attr_name @@ -112,7 +113,8 @@ def get_module_by_name(module_name: str, model: torch.nn.Module) -> torch.nn.Mod curr_module = child_module break else: - raise nncf.ModuleNotFoundError(f"Could not find the {module_name} module in the model.") + msg = f"Could not find the {module_name} module in the model." + raise nncf.ModuleNotFoundError(msg) return curr_module @@ -284,7 +286,8 @@ def set_const_data_to_port_id(data: torch.Tensor, node: NNCFNode, port_id: int, graph = model.nncf.get_graph() const_node = get_const_node(node, port_id, graph) if const_node is None: - raise nncf.InternalError(f"No found node with constant for {node.node_name} on {port_id} port") + msg = f"No found node with constant for {node.node_name} on {port_id} port" + raise nncf.InternalError(msg) const_name = const_node.layer_attributes.name module_name, const_attr_name = split_const_name(const_name) module = get_module_by_name(module_name, model) @@ -353,13 +356,15 @@ def get_weight_channel_axes(metatype: Type[OperatorMetatype], ndims: int, input_ return (ndims - 2,) if input_port_id == 2: return (ndims - 1,) - raise ValueError(f"Unexpected {input_port_id=} for {metatype=}") + msg = f"Unexpected {input_port_id=} for {metatype=}" + raise ValueError(msg) if metatype == om.PTMatMulMetatype: if input_port_id == 0: return () if ndims < 2 else (ndims - 2,) if input_port_id == 1: return () if ndims < 2 else (ndims - 1,) - raise ValueError(f"Unexpected {input_port_id=} for {metatype=}") + msg = f"Unexpected {input_port_id=} for {metatype=}" + raise ValueError(msg) if metatype in [om.PTConvTranspose1dMetatype, om.PTConvTranspose2dMetatype, om.PTConvTranspose3dMetatype]: return (1,) return (0,) diff --git a/nncf/torch/module_operations.py b/nncf/torch/module_operations.py index 9144389de31..d0775b18de6 100644 --- a/nncf/torch/module_operations.py +++ b/nncf/torch/module_operations.py @@ -50,7 +50,8 @@ def __init__(self, param_name, op): def __call__(self, module, _): if not hasattr(module, self._param_name): - raise TypeError("{} should have {} attribute".format(type(module), self._param_name)) + msg = f"{type(module)} should have {self._param_name} attribute" + raise TypeError(msg) value = getattr(module, self._param_name) result = super().__call__(value) setattr(module, self._param_name, result) @@ -86,7 +87,8 @@ def __call__(self, module, _): if is_optional: param_values.append(None) continue - raise TypeError("{} should have {} attribute".format(type(module), param_name)) + msg = f"{type(module)} should have {param_name} attribute" + raise TypeError(msg) param_values.append(getattr(module, param_name)) updated_kwargs = dict(zip(self._param_names, param_values)) updated_values = super().__call__(**updated_kwargs) diff --git a/nncf/torch/nncf_module_replacement.py b/nncf/torch/nncf_module_replacement.py index d8820d2b4b0..7700cb48387 100644 --- a/nncf/torch/nncf_module_replacement.py +++ b/nncf/torch/nncf_module_replacement.py @@ -132,7 +132,8 @@ def nncf_module_from(module: nn.Module) -> nn.Module: nncf_module = deepcopy(module) nncf_module = add_nncf_functionality_to_user_module(nncf_module) return nncf_module - raise nncf.InternalError(f"Could not extend module {module} with NNCF functionality!") + msg = f"Could not extend module {module} with NNCF functionality!" + raise nncf.InternalError(msg) def replace_modules_by_nncf_modules( @@ -253,11 +254,11 @@ def _replace_module_by_scope(base_model: torch.nn.Module, scope: Scope, replaced for scope_element in scope[1:]: # omit first scope element which corresponds to base module child_module = curr_module._modules.get(scope_element.calling_field_name) if child_module is None: - raise nncf.InternalError( - "Could not find a {} module member in {} module of scope {} during module replacement".format( - scope_element.calling_field_name, scope_element.calling_module_class_name, str(scope) - ) + msg = ( + f"Could not find a {scope_element.calling_field_name} module member in" + f" {scope_element.calling_module_class_name} module of scope {str(scope)} during module replacement" ) + raise nncf.InternalError(msg) owning_module = curr_module curr_module = child_module diff --git a/nncf/torch/nncf_network.py b/nncf/torch/nncf_network.py index 911a5d3b331..1da7aa8a05d 100644 --- a/nncf/torch/nncf_network.py +++ b/nncf/torch/nncf_network.py @@ -134,7 +134,8 @@ def _get_pt_insertion_type(target_type: TargetType, replaced_modules: bool) -> P map_target_types = TARGET_TYPE_VS_PT_INSERTION_TYPE_DICT_FOR_REPLACED_MODULES if not isinstance(target_type, TargetType) or target_type not in map_target_types: - raise nncf.InternalError("Unsupported target type for PyTorch: {}".format(target_type)) + msg = f"Unsupported target type for PyTorch: {target_type}" + raise nncf.InternalError(msg) return map_target_types[target_type] def __eq__(self, other: "PTInsertionPoint"): @@ -179,7 +180,8 @@ def forward(self): """ The module only serves a storage and namespacing purpose, forward functionality is not implemented. """ - raise NotImplementedError("Calling `forward` on NNCFInterface is prohibited.") + msg = "Calling `forward` on NNCFInterface is prohibited." + raise NotImplementedError(msg) def get_original_forward(self) -> Callable: """ @@ -280,7 +282,8 @@ def __init__( ) self._wrap_inputs_fn = self.__input_info_based_input_wrapper.wrap_inputs else: - raise ValueError("wrap_inputs_fn or input_infos should be passed.") + msg = "wrap_inputs_fn or input_infos should be passed." + raise ValueError(msg) if wrap_outputs_fn is not None: self._wrap_outputs_fn = wrap_outputs_fn @@ -451,7 +454,7 @@ def insert_at_point( elif point.insertion_type in [PTInsertionType.NNCF_MODULE_PRE_OP, PTInsertionType.NNCF_MODULE_POST_OP]: nncf_module = self.get_module_by_scope(point.module_scope) if not isinstance(nncf_module, _NNCFModuleMixin): - raise nncf.ValidationError( + msg = ( f"Failed to insert pre/post op for not registered custom module {point.module_scope}. NNCF only " f"supports native PyTorch modules with respect to trainable parameter (weight) compressed, such " f"as `torch.nn.Conv2d`. If your model contains a custom, non-PyTorch standard module with trainable" @@ -459,6 +462,7 @@ def insert_at_point( f"`@nncf.register_module` decorator. Please refer to `Compression of custom modules` section in " f"docs/Usage.md for more details." ) + raise nncf.ValidationError(msg) norm_target_scope = self._normalize_variable_recurrent_scope(point.module_scope) norm_nncf_scopes = [] @@ -470,7 +474,8 @@ def insert_at_point( elif point.insertion_type == PTInsertionType.NNCF_MODULE_POST_OP: handle = nncf_module.register_post_forward_operation(fn) else: - raise nncf.ValidationError("Unsupported insertion type: {}".format(point.insertion_type)) + msg = f"Unsupported insertion type: {point.insertion_type}" + raise nncf.ValidationError(msg) self._groups_vs_hooks_handlers[hooks_group_name].append(handle) return handle @@ -583,7 +588,8 @@ def is_scope_in_nncf_module_scope(self, scope: Scope) -> bool: def register_compression_module_type(self, compression_module_type: ExtraCompressionModuleType): attr_name = compression_module_type_to_attr_name(compression_module_type) if compression_module_type in self._extra_module_types: - raise nncf.ValidationError(f"Module type {compression_module_type} is already registered") + msg = f"Module type {compression_module_type} is already registered" + raise nncf.ValidationError(msg) self.__setattr__(attr_name, nn.ModuleDict()) self._extra_module_types.append(compression_module_type) @@ -593,16 +599,19 @@ def add_compression_module( ): attr_name = compression_module_type_to_attr_name(compression_module_type) if compression_module_type not in self._extra_module_types: - raise nncf.InternalError(f"Module type {compression_module_type} was not registered") + msg = f"Module type {compression_module_type} was not registered" + raise nncf.InternalError(msg) storage = self.__getattr__(attr_name) if module_key in storage: - raise nncf.InternalError(f"Module {module_key} is already registered under {attr_name}") + msg = f"Module {module_key} is already registered under {attr_name}" + raise nncf.InternalError(msg) storage[module_key] = module def get_compression_modules_by_type(self, compression_module_type: ExtraCompressionModuleType) -> nn.ModuleDict: attr_name = compression_module_type_to_attr_name(compression_module_type) if compression_module_type not in self._extra_module_types: - raise nncf.InternalError(f"Module type {compression_module_type} was not registered") + msg = f"Module type {compression_module_type} was not registered" + raise nncf.InternalError(msg) return self.__getattr__(attr_name) def is_compression_module_registered(self, compression_module_type: ExtraCompressionModuleType) -> bool: @@ -617,7 +626,8 @@ def is_compression_module_registered(self, compression_module_type: ExtraCompres def sort_compression_modules(self, compression_module_type: ExtraCompressionModuleType): attr_name = compression_module_type_to_attr_name(compression_module_type) if compression_module_type not in self._extra_module_types: - raise nncf.InternalError("Module type {} was not registered".format(compression_module_type)) + msg = f"Module type {compression_module_type} was not registered" + raise nncf.InternalError(msg) module_dict = self.__getattr__(attr_name) module_dict._modules = OrderedDict(sorted(module_dict._modules.items())) @@ -911,7 +921,8 @@ def _check_external_call_hook_is_valid(hook: ExternalOpCallHook, info: str): elif module_type_name == EXTERNAL_OP_STORAGE_NAME: module_type = ExtraCompressionModuleType.EXTERNAL_OP else: - raise RuntimeError(f"Module type {module_type_name} is not supported") + msg = f"Module type {module_type_name} is not supported" + raise RuntimeError(msg) command = PTSharedFnInsertionCommand( target_points=target_points, @@ -1153,7 +1164,8 @@ def __init__(self, *args, **kwargs): achieved by a __call__ method defined in the metaclass `NNCFNetworkMeta`. """ super().__init__() - raise nncf.InternalError("Direct instantiation of NNCFNetwork objects using __init__ is prohibited.") + msg = "Direct instantiation of NNCFNetwork objects using __init__ is prohibited." + raise nncf.InternalError(msg) def __call__(self, *args, **kwargs): """ @@ -1290,4 +1302,5 @@ def compression_module_type_to_attr_name(compression_module_type: ExtraCompressi return EXTERNAL_QUANTIZERS_STORAGE_NAME if compression_module_type == ExtraCompressionModuleType.EXTERNAL_OP: return EXTERNAL_OP_STORAGE_NAME - raise nncf.ValidationError("Unknown extra module type") + msg = "Unknown extra module type" + raise nncf.ValidationError(msg) diff --git a/nncf/torch/pruning/base_algo.py b/nncf/torch/pruning/base_algo.py index f9dfc085be1..d466128893d 100644 --- a/nncf/torch/pruning/base_algo.py +++ b/nncf/torch/pruning/base_algo.py @@ -88,10 +88,11 @@ def _set_default_params_for_ranking_type(params: Dict) -> None: params.setdefault("prune_first_conv", True) params.setdefault("prune_downsample_convs", True) if params.get("all_weights") is False: - raise Exception( + msg = ( "For LeGR pruning the `all_weights` config parameter be set to `true`." "Adjust the config accordingly if you want to proceed." ) + raise Exception(msg) params.setdefault("all_weights", True) def _get_transformation_layout(self, target_model: NNCFNetwork) -> PTTransformationLayout: @@ -236,7 +237,8 @@ def check_pruning_level(self, params): pruning_target = params.get("pruning_target", None) pruning_flops_target = params.get("pruning_flops_target", None) if pruning_target and pruning_flops_target: - raise ValueError("Only one parameter from 'pruning_target' and 'pruning_flops_target' can be set.") + msg = "Only one parameter from 'pruning_target' and 'pruning_flops_target' can be set." + raise ValueError(msg) if pruning_flops_target: self.prune_flops = True diff --git a/nncf/torch/pruning/filter_pruning/algo.py b/nncf/torch/pruning/filter_pruning/algo.py index 159030a31be..3d21f700b72 100644 --- a/nncf/torch/pruning/filter_pruning/algo.py +++ b/nncf/torch/pruning/filter_pruning/algo.py @@ -185,17 +185,19 @@ def __init__( with safe_open(Path(coeffs_path), "r", encoding="utf8") as coeffs_file: loaded_coeffs = json.load(coeffs_file) except (ValueError, FileNotFoundError) as err: - raise Exception( + msg = ( "Can't load json with ranking coefficients. Please, check format of json file " "and path to the file." - ) from err + ) + raise Exception(msg) from err ranking_coeffs = {key: tuple(loaded_coeffs[key]) for key in loaded_coeffs} nncf_logger.debug(f"Loaded ranking coefficients = {ranking_coeffs}") self.ranking_coeffs = ranking_coeffs else: # Ranking can't be trained without registered init struct LeGRInitArgs if not config.has_extra_struct(LeGRInitArgs): - raise Exception("Please, register LeGRInitArgs via register_default_init_args function.") + msg = "Please, register LeGRInitArgs via register_default_init_args function." + raise Exception(msg) # Wrapping model for parallelization distributed_wrapping_init_args = config.get_extra_struct(DistributedCallbacksArgs) target_model = distributed_wrapping_init_args.wrap_model(target_model) @@ -353,10 +355,8 @@ def _find_uniform_pruning_level_for_target_flops(self, target_flops_pruning_leve self.current_flops = flops self.current_params_num = params_num return right - raise nncf.InternalError( - "Can't prune the model to get the required " - "pruning level in flops = {}".format(target_flops_pruning_level) - ) + msg = f"Can't prune the model to get the required pruning level in flops = {target_flops_pruning_level}" + raise nncf.InternalError(msg) def set_pruning_level( self, pruning_level: Union[float, Dict[int, float]], run_batchnorm_adaptation: bool = False @@ -378,7 +378,8 @@ def set_pruning_level( with torch.no_grad(): if self.all_weights: if groupwise_pruning_levels_set: - raise nncf.InternalError("Cannot set group-wise pruning levels with all_weights=True") + msg = "Cannot set group-wise pruning levels with all_weights=True" + raise nncf.InternalError(msg) # Non-uniform (global) importance-score-based pruning according # to the global pruning level if self.prune_flops: @@ -389,9 +390,8 @@ def set_pruning_level( if groupwise_pruning_levels_set: group_ids = [group.id for group in self.pruned_module_groups_info.get_all_clusters()] if set(pruning_level.keys()) != set(group_ids): - raise nncf.InternalError( - "Groupwise pruning level dict keys do not correspond to layer group ids" - ) + msg = "Groupwise pruning level dict keys do not correspond to layer group ids" + raise nncf.InternalError(msg) else: # Pruning uniformly with the same pruning level across layers if self.prune_flops: @@ -597,7 +597,8 @@ def _set_binary_masks_for_pruned_modules_globally_by_flops_target(self, target_f self.current_params_num = params_num return cur_num += 1 - raise nncf.InternalError("Can't prune model to asked flops pruning level") + msg = "Can't prune model to asked flops pruning level" + raise nncf.InternalError(msg) def _propagate_masks(self): nncf_logger.debug("Propagating pruning masks") diff --git a/nncf/torch/pruning/filter_pruning/layers.py b/nncf/torch/pruning/filter_pruning/layers.py index cfa01704421..c0a77212484 100644 --- a/nncf/torch/pruning/filter_pruning/layers.py +++ b/nncf/torch/pruning/filter_pruning/layers.py @@ -101,9 +101,10 @@ def apply_filter_binary_mask( :return: result with applied mask """ if filter_mask.size(0) != module_parameter.size(dim): - raise nncf.InternalError( - "Shape of mask = {} for module {} isn't broadcastable to weight shape={}." - " ".format(filter_mask.shape, node_name_for_logging, module_parameter.shape) + msg = ( + f"Shape of mask = {filter_mask.shape} for module {node_name_for_logging}" + f" isn't broadcastable to weight shape={module_parameter.shape}." ) + raise nncf.InternalError(msg) broadcasted_filter_mask = broadcast_filter_mask(filter_mask, module_parameter.shape, dim) return module_parameter.mul(broadcasted_filter_mask) diff --git a/nncf/torch/pruning/operations.py b/nncf/torch/pruning/operations.py index 88d0b5d9cbf..811834bf2c9 100644 --- a/nncf/torch/pruning/operations.py +++ b/nncf/torch/pruning/operations.py @@ -558,9 +558,7 @@ def input_reorder(cls, model: NNCFNetwork, node: NNCFNode, graph: NNCFGraph): ln.bias.data = torch.index_select(ln.bias.data, 0, reorder_indexes) nncf_logger.debug( - "Reordered channels (first 10 reorder indexes {}) of LayerNorm: {} ".format( - reorder_indexes[:10], node.node_key - ) + f"Reordered channels (first 10 reorder indexes {reorder_indexes[:10]}) of LayerNorm: {node.node_key} " ) @classmethod @@ -575,7 +573,8 @@ def input_prune(cls, model: NNCFNetwork, node: NNCFNode, graph: NNCFGraph, prun_ node_module = model.nncf.get_containing_module(node.node_name) if prun_type == PrunType.CUT_WEIGHTS: - raise nncf.InternalError("LayerNorm does not support pruning by cutting channels") + msg = "LayerNorm does not support pruning by cutting channels" + raise nncf.InternalError(msg) node_module.weight = torch.nn.Parameter(apply_filter_binary_mask(input_mask, node_module.weight)) node_module.bias = torch.nn.Parameter(apply_filter_binary_mask(input_mask, node_module.bias)) diff --git a/nncf/torch/pruning/utils.py b/nncf/torch/pruning/utils.py index 8176728d07a..51faf7a376d 100644 --- a/nncf/torch/pruning/utils.py +++ b/nncf/torch/pruning/utils.py @@ -88,7 +88,8 @@ def _calculate_output_shape(graph: NNCFGraph, node: NNCFNode) -> Tuple[int, ...] elif isinstance(attrs, LinearLayerAttributes): shape = shape[:-1] + [attrs.out_features] else: - raise nncf.ValidationError(f"Unexpected node type {node.node_type} is fed to _calculate_output_shape") + msg = f"Unexpected node type {node.node_type} is fed to _calculate_output_shape" + raise nncf.ValidationError(msg) return tuple(shape) diff --git a/nncf/torch/quantization/adjust_padding.py b/nncf/torch/quantization/adjust_padding.py index 795465289ea..b19435ed23d 100644 --- a/nncf/torch/quantization/adjust_padding.py +++ b/nncf/torch/quantization/adjust_padding.py @@ -44,7 +44,8 @@ class CalculatePaddingAdjustment: def __init__(self, activation_quantizer: SymmetricQuantizer): if not isinstance(activation_quantizer, SymmetricQuantizer): - raise nncf.InternalError("Padding adjustment is not supported for not symmetric quantization") + msg = "Padding adjustment is not supported for not symmetric quantization" + raise nncf.InternalError(msg) self._activation_quantizer = activation_quantizer self._is_enabled = True diff --git a/nncf/torch/quantization/algo.py b/nncf/torch/quantization/algo.py index e39c0ebc137..029a6e2356e 100644 --- a/nncf/torch/quantization/algo.py +++ b/nncf/torch/quantization/algo.py @@ -463,9 +463,11 @@ def __init__(self, config, should_init: bool = True): algo_config = self._get_algo_specific_config_section() if self._target_device == "NPU" and "preset" in algo_config: - raise nncf.InternalError("The NPU target device does not support presets.") + msg = "The NPU target device does not support presets." + raise nncf.InternalError(msg) if self._target_device == "CPU_SPR": - raise nncf.InternalError("The CPU_SPR target device does not supported.") + msg = "The CPU_SPR target device does not supported." + raise nncf.InternalError(msg) self._range_init_params = None self._precision_init_type = None @@ -538,33 +540,37 @@ def _parse_precision_init_params(self, initializer_config: Dict) -> Tuple[str, B return None, None precision_init_type = init_precision_config.get("type", "manual") if precision_init_type not in PRECISION_INIT_TYPES_VS_DESCRIPTION: - raise nncf.InternalError(f"Unrecognized precision init type: {precision_init_type}") + msg = f"Unrecognized precision init type: {precision_init_type}" + raise nncf.InternalError(msg) if precision_init_type == "hawq": try: precision_init_args = self.config.get_extra_struct(QuantizationPrecisionInitArgs) except KeyError as e: - raise ValueError( + msg = ( "Specified non-manual precision initialization in the NNCF config, " "but the initializing data loader and loss criterion are not provided as an extra struct. " "Refer to `NNCFConfig.register_extra_structs` and the `QuantizationPrecisionInitArgs` " "class" - ) from e + ) + raise ValueError(msg) from e precision_init_params = HAWQPrecisionInitParams.from_config(init_precision_config, precision_init_args) elif precision_init_type == "autoq": if self.hw_config is not None and self.hw_config.target_device != HWConfigType.NPU.value: - raise ValueError( - "Unsupported device ({}). Automatic Precision Initialization only supports for " - "target_device NONE or NPU".format(self.hw_config.target_device) + msg = ( + f"Unsupported device ({self.hw_config.target_device})." + f" Automatic Precision Initialization only supports for target_device NONE or NPU" ) + raise ValueError(msg) try: precision_init_args = self.config.get_extra_struct(AutoQPrecisionInitArgs) except KeyError as e: - raise ValueError( + msg = ( "Specified Automated precision initialization in the NNCF config, " "but the initializing data loader and loss criterion are not provided as an extra " "struct. Refer to `NNCFConfig.register_extra_structs` and the " "`AutoQPrecisionInitArgs` class" - ) from e + ) + raise ValueError(msg) from e hw_config_type = None if self.hw_config is not None: @@ -575,7 +581,8 @@ def _parse_precision_init_params(self, initializer_config: Dict) -> Tuple[str, B elif precision_init_type == "manual": precision_init_params = ManualPrecisionInitParams.from_config(init_precision_config) else: - raise ValueError(f"Unhandled precision init type: {precision_init_type}") + msg = f"Unhandled precision init type: {precision_init_type}" + raise ValueError(msg) return precision_init_type, precision_init_params def _get_minmax_values_for_quantizer_locations( @@ -749,7 +756,8 @@ def _get_quantizer_setup(self, target_model: NNCFNetwork) -> PTQuantizerSetup: half_range = True quantizers_with_overflow_fix_str = "first convolution weight quantizers" elif self._overflow_fix != "disable": - raise nncf.InternalError(f"Unknown overflow fix type: {self._overflow_fix}") + msg = f"Unknown overflow fix type: {self._overflow_fix}" + raise nncf.InternalError(msg) if half_range: nncf_logger.debug(f"Overflow issue fix will be applied to {quantizers_with_overflow_fix_str}") @@ -902,10 +910,11 @@ def _build_insertion_commands_list_for_quantizer_setup( for layer_name in shared_weight_quantized_layers_in_group: if layer_name in already_weight_quantized_shared_layers: - raise nncf.InternalError( + msg = ( "Attempted to assign a unified-scale quantizer to a shared layer node that has " "already had its weights quantized by another unified-scale quantizer!" ) + raise nncf.InternalError(msg) already_weight_quantized_shared_layers[layer_name] = quant_module_id for us_qp_id in unified_scales_group: @@ -1070,8 +1079,9 @@ def ip_str_repr_key_lambda(x): qspec = quantizer_setup.quantization_points[primary_qp_id].qspec linked_qspecs = [quantizer_setup.quantization_points[qp_id].qspec for qp_id in linked_qp_ids] for linked_qspec in linked_qspecs: - if not qspec == linked_qspec: - raise nncf.InternalError("The qspecs for unified scale quantization points should be identical!") + if qspec != linked_qspec: + msg = "The qspecs for unified scale quantization points should be identical!" + raise nncf.InternalError(msg) range_init_minmax_values = None if minmax_values_for_range_init: @@ -1133,7 +1143,8 @@ def _quantize_at_points_by_single_module( target_model_graph = target_model.nncf.get_original_graph() if not insertion_points: - raise nncf.InternalError("No insertion points to put quantizers into!") + msg = "No insertion points to put quantizers into!" + raise nncf.InternalError(msg) def is_weights(ip: PTTargetPoint) -> bool: return ip.target_type is TargetType.OPERATION_WITH_WEIGHTS @@ -1191,7 +1202,8 @@ def is_weights(ip: PTTargetPoint) -> bool: insertion_commands = [] for curr_insertion_point in insertion_points: if curr_insertion_point in self._processed_insertion_points: - raise nncf.InternalError("Insertion point {} already quantized!".format(str(curr_insertion_point))) + msg = f"Insertion point {str(curr_insertion_point)} already quantized!" + raise nncf.InternalError(msg) self._processed_insertion_points.add(curr_insertion_point) if is_weights(curr_insertion_point): @@ -1600,7 +1612,8 @@ def get_quantizer_setup_for_current_state(self) -> SingleConfigQuantizerSetup: def is_new_setup_requires_regeneration(self, quantizer_setup: SingleConfigQuantizerSetup) -> bool: current_setup = self.get_quantizer_setup_for_current_state() if Counter(current_setup.quantization_points.keys()) != Counter(quantizer_setup.quantization_points.keys()): - raise ValueError("The new setup is inconsistent with the original parameter space!") + msg = "The new setup is inconsistent with the original parameter space!" + raise ValueError(msg) for qp_id, qp in quantizer_setup.quantization_points.items(): current_qconfig = current_setup.quantization_points[qp_id].qconfig new_qconfig = quantizer_setup.quantization_points[qp_id].qconfig diff --git a/nncf/torch/quantization/debug_interface.py b/nncf/torch/quantization/debug_interface.py index 5988b22730c..531fff5ea7e 100644 --- a/nncf/torch/quantization/debug_interface.py +++ b/nncf/torch/quantization/debug_interface.py @@ -104,16 +104,15 @@ def post_forward_actions(self, module: NNCFNetwork): if tracker.get_never_called_keys(): # This will always trigger for DataParallel - disregard or disable debug mode # for DataParallel runs - raise nncf.InternalError( - f"{tracker.name} has never called modules: {tracker.get_never_called_keys()}!" - ) + msg = f"{tracker.name} has never called modules: {tracker.get_never_called_keys()}!" + raise nncf.InternalError(msg) def dump_scale(self, quantizer_scale_params: Dict[str, torch.Tensor], quantizer_name: str): import re quantizer_normalized_name = re.sub(r"[^\w\-_\. ]", "_", quantizer_name) for scale_param_name, scale_param in quantizer_scale_params.items(): - fname = "{}_{}.txt".format(quantizer_normalized_name, scale_param_name) + fname = f"{quantizer_normalized_name}_{scale_param_name}.txt" with safe_open(self.scale_dump_dir / fname, "ab") as file: np.savetxt(file, scale_param.cpu().numpy().flatten()) @@ -161,12 +160,13 @@ def visualize_insertion_point_graph(self, insertion_point_graph: InsertionPointG InsertionPointGraphNodeType.POST_HOOK, ]: target_point_data = node[InsertionPointGraph.INSERTION_POINT_NODE_ATTR] - label = "TP: {}".format(str(target_point_data)) + label = f"TP: {str(target_point_data)}" out_graph.add_node(node_key, label=label, color="red") elif node[InsertionPointGraph.NODE_TYPE_NODE_ATTR] == InsertionPointGraphNodeType.OPERATOR: out_graph.add_node(node_key) else: - raise nncf.InternalError("Invalid InsertionPointGraph node!") + msg = "Invalid InsertionPointGraph node!" + raise nncf.InternalError(msg) for u, v in insertion_point_graph.edges: out_graph.add_edge(u, v) diff --git a/nncf/torch/quantization/extensions.py b/nncf/torch/quantization/extensions.py index 106454a0813..b3463171c61 100644 --- a/nncf/torch/quantization/extensions.py +++ b/nncf/torch/quantization/extensions.py @@ -95,12 +95,13 @@ def load(cls): raise e except (subprocess.CalledProcessError, OSError, RuntimeError) as e: assert torch.cuda.is_available() - raise nncf.InstallationError( + msg = ( "CUDA is available for PyTorch, but NNCF could not compile " "GPU quantization extensions. Make sure that you have installed CUDA development " "tools (see https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html for " "guidance) and that 'nvcc' is available on your system's PATH variable.\n" - ) from e + ) + raise nncf.InstallationError(msg) from e @classmethod def name(cls) -> str: diff --git a/nncf/torch/quantization/init_range.py b/nncf/torch/quantization/init_range.py index 7206d859fae..4c514cb44e9 100644 --- a/nncf/torch/quantization/init_range.py +++ b/nncf/torch/quantization/init_range.py @@ -80,17 +80,15 @@ def get_init_config_for_scope_and_group(self, qid: QuantizerId, group: Quantizer ) ) if len(matches) > 1: - raise ValueError( - "Location {} matches more than one per-layer initialization parameter definition!".format(str(qid)) - ) + msg = f"Location {str(qid)} matches more than one per-layer initialization parameter definition!" + raise ValueError(msg) if len(matches) == 1: return matches[0] if not matches and self.global_init_config is not None: return deepcopy(self.global_init_config) - raise ValueError( - "Location {} does not match any per-layer initialization parameter definition!".format(str(qid)) - ) + msg = f"Location {str(qid)} does not match any per-layer initialization parameter definition!" + raise ValueError(msg) class PTRangeInitCollectorParams(RangeInitCollectorParams): @@ -155,7 +153,8 @@ def generate_stat_collector_for_range_init_config( if num_samples_to_collect_override is not None: num_samples = num_samples_to_collect_override if init_config.init_type not in RANGE_INIT_TYPES_VS_DESCRIPTIONS: - raise nncf.InternalError("Unknown range init type: {}".format(init_config.init_type)) + msg = f"Unknown range init type: {init_config.init_type}" + raise nncf.InternalError(msg) use_per_sample_stats = collector_params.use_per_sample_stats(init_config.init_type == "mixed_min_max") reduction_axes, aggregation_axes = collector_params.get_reduction_aggregation_axes(use_per_sample_stats) @@ -215,7 +214,8 @@ def generate_stat_collector_for_range_init_config( scale_shape=scale_shape, num_samples=num_samples, ) - raise ValueError("Range init type not handled!") + msg = "Range init type not handled!" + raise ValueError(msg) @classmethod def get_all_scale_shapes_with_params( diff --git a/nncf/torch/quantization/layers.py b/nncf/torch/quantization/layers.py index 1d3ff9feefc..c6785b6ea18 100644 --- a/nncf/torch/quantization/layers.py +++ b/nncf/torch/quantization/layers.py @@ -433,10 +433,12 @@ def apply_minmax_init(self, min_values: torch.Tensor, max_values: torch.Tensor, return if torch.all(torch.isinf(min_values)) or torch.all(torch.isinf(max_values)): - raise ValueError(f"Statistics are not collected for {log_module_name}") + msg = f"Statistics are not collected for {log_module_name}" + raise ValueError(msg) if torch.any(torch.eq(min_values, np.inf)) or torch.any(torch.eq(max_values, -np.inf)): - raise ValueError(f"Some of the values in statistics have infinite value for {log_module_name}") + msg = f"Some of the values in statistics have infinite value for {log_module_name}" + raise ValueError(msg) own_device = get_model_device(self) min_values = min_values.to(own_device) @@ -515,10 +517,11 @@ def _prepare_qdq_export_quantization(self, x: torch.Tensor) -> Tuple[torch.Tenso y_scale, y_zero_point = get_scale_zp_from_input_low_input_high(level_low, level_high, input_low, input_high) possible_axes = self._possible_per_channel_dimensions() if len(possible_axes) > 1: - raise nncf.InternalError( + msg = ( f"Impossible to determine the per-channel axis for a scale shape {self.scale_shape} - " f"more than one dimension is >1" ) + raise nncf.InternalError(msg) if not possible_axes: # Impossible to determine proper axis for per-channel quantization because we have # scale shape ~ [1, 1, 1, 1], therefore falling back to per-tensor style export @@ -547,10 +550,11 @@ def run_export_quantization(self, x: torch.Tensor): if self._export_mode == QuantizerExportMode.ONNX_QUANTIZE_DEQUANTIZE_PAIRS: x, y_scale, y_zero_point, axis = self._prepare_qdq_export_quantization(x) return ExportQuantizeToONNXQuantDequant.apply(x, y_scale, y_zero_point, axis) - raise nncf.InternalError("Unknown export mode") + msg = "Unknown export mode" + raise nncf.InternalError(msg) def extra_repr(self): - return "bit={}, ch={}".format(self.num_bits, self.per_channel) + return f"bit={self.num_bits}, ch={self.per_channel}" @abstractmethod def get_quantizer_config(self) -> QuantizerConfig: @@ -1112,9 +1116,11 @@ def quantization_mode(self) -> QuantizationMode: def pack_weight(self, weight: torch.Tensor) -> torch.Tensor: if torch.is_floating_point(weight): - raise ValueError(f"Invalid weight dtype {weight.type}. Integer types are supported.") + msg = f"Invalid weight dtype {weight.type}. Integer types are supported." + raise ValueError(msg) if torch.any((weight < 0) | (weight > 255)): - raise ValueError("Weight values are not in [0, 255].") + msg = "Weight values are not in [0, 255]." + raise ValueError(msg) return weight.type(dtype=torch.uint8) def forward(self, x) -> torch.Tensor: @@ -1143,7 +1149,8 @@ def quantization_mode(self) -> QuantizationMode: def pack_weight(self, weight: torch.Tensor) -> torch.Tensor: if torch.any((weight < -128) | (weight > 127)): - raise ValueError("Weight values are not in [-128, 127].") + msg = "Weight values are not in [-128, 127]." + raise ValueError(msg) return weight.type(dtype=torch.int8) def forward(self, x) -> torch.Tensor: @@ -1184,7 +1191,8 @@ def quantization_mode(self) -> QuantizationMode: def pack_weight(self, weight: torch.Tensor) -> torch.Tensor: if torch.any((weight < 0) | (weight > 15)): - raise ValueError("Weight values are not in [0, 15].") + msg = "Weight values are not in [0, 15]." + raise ValueError(msg) return pack_uint4(weight.type(dtype=torch.uint8)) def forward(self, x): @@ -1227,9 +1235,11 @@ def quantization_mode(self) -> QuantizationMode: def pack_weight(self, weight: torch.Tensor) -> torch.Tensor: if torch.is_floating_point(weight): - raise ValueError(f"Invalid weight dtype {weight.type}. Integer types are supported.") + msg = f"Invalid weight dtype {weight.type}. Integer types are supported." + raise ValueError(msg) if torch.any((weight < -8) | (weight > 7)): - raise ValueError("Tensor values are not in [-8, 7].") + msg = "Tensor values are not in [-8, 7]." + raise ValueError(msg) return pack_int4(weight.type(dtype=torch.int8)) def forward(self, x): diff --git a/nncf/torch/quantization/precision_init/autoq_init.py b/nncf/torch/quantization/precision_init/autoq_init.py index e38c63b700a..92332bc83d0 100644 --- a/nncf/torch/quantization/precision_init/autoq_init.py +++ b/nncf/torch/quantization/precision_init/autoq_init.py @@ -211,7 +211,7 @@ def apply_init(self) -> SingleConfigQuantizerSetup: nncf_logger.info(f"[AutoQ] best_reward: {best_reward}") nncf_logger.info(f"[AutoQ] best_policy: {best_policy}") nncf_logger.info("[AutoQ] Search completed.") - nncf_logger.info("[AutoQ] Elapsed time of AutoQ Precision Initialization (): {}".format(end_ts - start_ts)) + nncf_logger.info(f"[AutoQ] Elapsed time of AutoQ Precision Initialization (): {end_ts - start_ts}") return final_quantizer_setup def _search(self, agent: DDPG, env: "QuantizationEnv") -> Tuple[pd.Series, float]: # noqa: F821 @@ -255,9 +255,11 @@ def _search(self, agent: DDPG, env: "QuantizationEnv") -> Tuple[pd.Series, float # Replay Buffer Management if agent.memory.nb_entries % (len(env.master_df) + 1) > 0: - raise ValueError("logical bug in buffer management, uneven episode length") + msg = "logical bug in buffer management, uneven episode length" + raise ValueError(msg) if agent.memory.limit % (len(env.master_df) + 1) > 0: - raise ValueError("replay buffer size must be divisible by episode step length") + msg = "replay buffer size must be divisible by episode step length" + raise ValueError(msg) if agent.memory.nb_entries + len(transition_buffer) >= agent.memory.limit: step_reward_per_episode = agent.memory.rewards.data[:: (len(transition_buffer) + 1)] @@ -395,9 +397,7 @@ def _dump_episode( } # Save nncf compression cfg - episode_cfgfile = "{0}/{1:03d}_nncfcfg.json".format( - str(self._init_args.config["episodic_nncfcfg"]), episode - ) + episode_cfgfile = "{}/{:03d}_nncfcfg.json".format(str(self._init_args.config["episodic_nncfcfg"]), episode) with safe_open(Path(episode_cfgfile), "w") as outfile: json.dump(current_episode_nncfcfg, outfile, indent=4, sort_keys=False) @@ -453,9 +453,9 @@ def _generate_tensorboard_logging_string( episode, reward, accuracy, model_ratio, bop_ratio = info_tuple text_string = bit_stats_df.to_markdown() + "\n\n\n" - text_string += "Episode: {:>4}, Reward: {:.3f}, ".format(episode, reward) - text_string += "Accuracy: {:.3f}, Model_Size_Ratio: {:.3f}, BOP_Ratio: {:.3f}\n\n\n".format( - accuracy, model_ratio, bop_ratio + text_string += f"Episode: {episode:>4}, Reward: {reward:.3f}, " + text_string += ( + f"Accuracy: {accuracy:.3f}, Model_Size_Ratio: {model_ratio:.3f}, BOP_Ratio: {bop_ratio:.3f}\n\n\n" ) for _, row_id in enumerate(qdf.index.tolist()): diff --git a/nncf/torch/quantization/precision_init/bitwidth_graph.py b/nncf/torch/quantization/precision_init/bitwidth_graph.py index 5365ea6c740..5ca3e5cbbfb 100644 --- a/nncf/torch/quantization/precision_init/bitwidth_graph.py +++ b/nncf/torch/quantization/precision_init/bitwidth_graph.py @@ -72,7 +72,7 @@ def __init__( operator_name += "(shared among nodes {})".format( ",".join([str(node_id) for node_id in node_ids]) ) - operator_name += "_#{}".format(node.node_id) + operator_name += f"_#{node.node_id}" target_node_to_draw = self._nx_graph.nodes[node_key] target_node_to_draw["label"] = operator_name target_node_to_draw["style"] = "filled" @@ -88,9 +88,8 @@ def __init__( for wq_id, wq_info in algo_ctrl.weight_quantizers.items(): nodes = [nncf_graph.get_node_by_name(tp.target_node_name) for tp in wq_info.affected_insertions] if not nodes: - raise AttributeError( - "Failed to get affected nodes for quantized module node: {}".format(wq_id.target_node_name) - ) + msg = f"Failed to get affected nodes for quantized module node: {wq_id.target_node_name}" + raise AttributeError(msg) preds = [nncf_graph.get_previous_nodes(node) for node in nodes] wq_nodes = [] for pred_list in preds: @@ -105,7 +104,7 @@ def __init__( nx_node_to_draw_upon = self._nx_graph.nodes[key] quantizer = wq_info.quantizer_module_ref bitwidths = quantizer.num_bits - nx_node_to_draw_upon["label"] = "WFQ_[{}]_#{}".format(quantizer.get_quantizer_config(), str(node_id)) + nx_node_to_draw_upon["label"] = f"WFQ_[{quantizer.get_quantizer_config()}]_#{str(node_id)}" if grouped_mode: group_id_str = "UNDEFINED" group_id = groups_of_adjacent_quantizers.get_group_id_for_quantizer(wq_id) @@ -160,8 +159,8 @@ def _paint_activation_quantizer_node( activation_fq_node["style"] = "filled" node_id = activation_fq_node[NNCFNode.ID_NODE_ATTR] - activation_fq_node["label"] = "AFQ_[{}]_#{}".format( - quantizer_info.quantizer_module_ref.get_quantizer_config(), str(node_id) + activation_fq_node["label"] = ( + f"AFQ_[{quantizer_info.quantizer_module_ref.get_quantizer_config()}]_#{str(node_id)}" ) grouped_mode = bool(groups_of_adjacent_quantizers) if grouped_mode: diff --git a/nncf/torch/quantization/precision_init/hawq_init.py b/nncf/torch/quantization/precision_init/hawq_init.py index 5d027107ba7..8bcde879a3a 100644 --- a/nncf/torch/quantization/precision_init/hawq_init.py +++ b/nncf/torch/quantization/precision_init/hawq_init.py @@ -186,7 +186,8 @@ def get_qconfig_sequences_constrained_by_trace_order( sequence is non-decreasing, the bitwidth closest to this target will be chosen instead. """ if len(possible_qconfigs_sequence_in_trace_order) != len(self._traces_order): - raise ValueError("The size of the qconfig space and the traces do not match!") + msg = "The size of the qconfig space and the traces do not match!" + raise ValueError(msg) retval: List[QConfigSequenceForHAWQToEvaluate] = [] observed_qconfs_in_retval = [OrderedDict() for _ in range(len(self._traces_order))] for bitwidth_sequence in self._bitwidth_sequences: @@ -266,7 +267,8 @@ def apply_init(self) -> SingleConfigQuantizerSetup: traces_per_layer = self._calc_traces(self._criterion_fn, self._criterion, self._iter_number, self._tolerance) if not traces_per_layer: - raise nncf.InternalError("Failed to calculate hessian traces!") + msg = "Failed to calculate hessian traces!" + raise nncf.InternalError(msg) traces_order = traces_per_layer.traces_order ( @@ -304,11 +306,11 @@ def apply_init(self) -> SingleConfigQuantizerSetup: min_ratio = min(compression_ratio_per_qconfig) max_ratio = max(compression_ratio_per_qconfig) if not min_ratio <= self._compression_ratio <= max_ratio: - raise AttributeError( - "Invalid compression ratio={}. Should be within range [{:.3f}, {:.3f}]".format( - self._compression_ratio, min_ratio, max_ratio - ) + msg = ( + f"Invalid compression ratio={self._compression_ratio}." + f" Should be within range [{min_ratio:.3f}, {max_ratio:.3f}]" ) + raise AttributeError(msg) perturbations, weight_observers = self.calc_quantization_noise(covering_qconfig_sequences, traces_order) @@ -382,10 +384,11 @@ def _merge_constraints_for_adjacent_quantizers( quantizer_ids.append(quantizer_id) minimal_set_bitwidths = set.intersection(*all_bitwidths_sets) if not minimal_set_bitwidths: - raise nncf.InternalError( + msg = ( "No bitwidths configurations are left after removing inconsistent groups of weight quantizers" " with adjacent activation quantizers!" ) + raise nncf.InternalError(msg) for quantizer_id in quantizer_ids: qconfig_sequence = retval.get(quantizer_id) filtered_qconfig_sequence = [] @@ -492,13 +495,14 @@ def _calc_traces( avg_traces = trace_estimator.get_average_traces(max_iter=iter_number, tolerance=tolerance) except RuntimeError as error: if "cuda out of memory" in error.args[0].lower(): - raise nncf.InternalError( + msg = ( "Failed to estimate average Hessian traces within precision initialization. Specify " "a smaller batch size via --batch-size-init option in the NNCF samples or register " "a data loader with a smaller batch size. Refer to " "`NNCFConfig.register_extra_structs` and the `QuantizationPrecisionInitArgs`" " class" - ) from error + ) + raise nncf.InternalError(msg) from error raise error self.restore_disabled_gradients( @@ -729,7 +733,8 @@ def _set_activations_bitwidth_strictly( weight_bitwidth_set: Set[int], ) -> SingleConfigQuantizerSetup: if len(weight_bitwidth_set) > 1: - raise nncf.InternalError("Invalid grouping of weight quantizers") + msg = "Invalid grouping of weight quantizers" + raise nncf.InternalError(msg) all_constraints = set() original_quant_module_ids = [ self._original_qp_id_vs_quantizer_module_id_dict[act_qp_id] for act_qp_id in act_qp_ids @@ -743,7 +748,8 @@ def _set_activations_bitwidth_strictly( if weight_bitwidth_set: common_constraints = common_constraints.intersection(weight_bitwidth_set) if not common_constraints: - raise nncf.InternalError("No hardware compatible bitwidth for activation quantizers") + msg = "No hardware compatible bitwidth for activation quantizers" + raise nncf.InternalError(msg) for act_qp_id in act_qp_ids: quant_id = self._original_qp_id_vs_quantizer_module_id_dict[act_qp_id] target_bitwidth = sorted(list(common_constraints))[0] diff --git a/nncf/torch/quantization/precision_init/manual_init.py b/nncf/torch/quantization/precision_init/manual_init.py index b095a661796..253d9749ee4 100644 --- a/nncf/torch/quantization/precision_init/manual_init.py +++ b/nncf/torch/quantization/precision_init/manual_init.py @@ -61,8 +61,9 @@ def apply_init(self) -> SingleConfigQuantizerSetup: is_matched = True break if not is_matched: - raise ValueError( - "Could not find a quantization point at scope name `{}`, failed to assign bitwidth {} " - "to it".format(scope_name, bitwidth) + msg = ( + f"Could not find a quantization point at scope name `{scope_name}`," + f" failed to assign bitwidth {bitwidth} to it" ) + raise ValueError(msg) return quantizer_setup diff --git a/nncf/torch/quantization/precision_init/traces_order.py b/nncf/torch/quantization/precision_init/traces_order.py index 57807e7e73f..7b49a130cdf 100644 --- a/nncf/torch/quantization/precision_init/traces_order.py +++ b/nncf/torch/quantization/precision_init/traces_order.py @@ -23,7 +23,8 @@ def __init__(self, execution_indexes_of_weights_ordered_by_traces: List[int]): def get_execution_order_configs(self, trace_ordered_configuration: List) -> List: if len(trace_ordered_configuration) != self._num_weights: - raise ValueError("Incompatible configuration size!") + msg = "Incompatible configuration size!" + raise ValueError(msg) execution_order_config = [None] * self._num_weights for i, config in enumerate(trace_ordered_configuration): execution_order_config[self._index_by_traces_to_execution_index[i]] = config @@ -31,7 +32,8 @@ def get_execution_order_configs(self, trace_ordered_configuration: List) -> List def get_traces_order_configs(self, execution_ordered_configuration: List) -> List: if len(execution_ordered_configuration) != self._num_weights: - raise ValueError("Incompatible configuration size!") + msg = "Incompatible configuration size!" + raise ValueError(msg) traces_order_config = [None] * self._num_weights for i, config in enumerate(execution_ordered_configuration): traces_order_config[self._index_by_execution_to_index_by_traces[i]] = config diff --git a/nncf/torch/quantization/quantize_functions.py b/nncf/torch/quantization/quantize_functions.py index 368f1bd1d9e..710f899ff4d 100644 --- a/nncf/torch/quantization/quantize_functions.py +++ b/nncf/torch/quantization/quantize_functions.py @@ -300,7 +300,8 @@ def pack_uint4(tensor: torch.Tensor) -> torch.Tensor: :raises nncf.errors.ValidationError: If the input tensor is not of type `torch.uint8`. """ if tensor.dtype != torch.uint8: - raise ValidationError(f"Invalid tensor dtype {tensor.type}. torch.uint8 type is supported.") + msg = f"Invalid tensor dtype {tensor.type}. torch.uint8 type is supported." + raise ValidationError(msg) packed_tensor = tensor.contiguous() packed_tensor = packed_tensor.reshape(-1, 2) packed_tensor = torch.bitwise_and(packed_tensor[..., ::2], 15) | packed_tensor[..., 1::2] << 4 @@ -330,7 +331,8 @@ def pack_int4(tensor: torch.Tensor) -> torch.Tensor: :raises nncf.errors.ValidationError: If the input tensor is not of type `torch.int8`. """ if tensor.dtype != torch.int8: - raise ValidationError(f"Invalid tensor dtype {tensor.type}. torch.int8 type is supported.") + msg = f"Invalid tensor dtype {tensor.type}. torch.int8 type is supported." + raise ValidationError(msg) tensor = tensor + 8 return pack_uint4(tensor.type(torch.uint8)) diff --git a/nncf/torch/quantization/quantize_model.py b/nncf/torch/quantization/quantize_model.py index 4e0a6e37b11..734f15a1607 100644 --- a/nncf/torch/quantization/quantize_model.py +++ b/nncf/torch/quantization/quantize_model.py @@ -52,11 +52,14 @@ def quantize_impl( Implementation of the `quantize()` method for the PyTorch backend. """ if fast_bias_correction is False: - raise ValueError(f"fast_bias_correction={fast_bias_correction} is not supported") + msg = f"fast_bias_correction={fast_bias_correction} is not supported" + raise ValueError(msg) if target_device == TargetDevice.CPU_SPR: - raise nncf.InternalError("target_device == CPU_SPR is not supported") + msg = "target_device == CPU_SPR is not supported" + raise nncf.InternalError(msg) if mode is not None: - raise ValueError(f"mode={mode} is not supported") + msg = f"mode={mode} is not supported" + raise ValueError(msg) copied_model = deepcopy(model) diff --git a/nncf/torch/quantization/reference.py b/nncf/torch/quantization/reference.py index b65a6891fcb..a3b31ea6087 100644 --- a/nncf/torch/quantization/reference.py +++ b/nncf/torch/quantization/reference.py @@ -33,7 +33,8 @@ def __init__(self, backend_type: ReferenceBackendType): elif backend_type is ReferenceBackendType.TORCH: self.backend = torch else: - raise nncf.UnsupportedBackendError("Unknown backend for ReferenceQuantize") + msg = "Unknown backend for ReferenceQuantize" + raise nncf.UnsupportedBackendError(msg) def _astype(self, tensor: GeneralizedTensor, dtype) -> GeneralizedTensor: if self.backend is np: diff --git a/nncf/torch/quantization/strip.py b/nncf/torch/quantization/strip.py index b1bc99050d7..596c83340e1 100644 --- a/nncf/torch/quantization/strip.py +++ b/nncf/torch/quantization/strip.py @@ -80,10 +80,11 @@ def convert_to_torch_fakequantizer(nncf_quantizer: BaseQuantizer) -> FakeQuantiz nncf_quantizer.set_levels() if nncf_quantizer.num_bits not in SUPPORTED_NUM_BITS_FOR_STRIP_MODEL: - raise nncf.InternalError( + msg = ( "Converting nncf quantizer module to torch native only supports " f"for num_bits in {SUPPORTED_NUM_BITS_FOR_STRIP_MODEL}." ) + raise nncf.InternalError(msg) per_channel = nncf_quantizer.per_channel scale_shape = nncf_quantizer.scale_shape ch_axis = int(np.argmax(scale_shape)) diff --git a/nncf/torch/sparsity/magnitude/algo.py b/nncf/torch/sparsity/magnitude/algo.py index 59feee7dc7b..36ec6cbf870 100644 --- a/nncf/torch/sparsity/magnitude/algo.py +++ b/nncf/torch/sparsity/magnitude/algo.py @@ -133,9 +133,8 @@ def set_sparsity_level( run_batchnorm_adaptation: bool = False, ): if sparsity_level >= 1 or sparsity_level < 0: - raise AttributeError( - "Sparsity level should be within interval [0,1), actual value to set is: {}".format(sparsity_level) - ) + msg = f"Sparsity level should be within interval [0,1), actual value to set is: {sparsity_level}" + raise AttributeError(msg) if target_sparsified_module_info is None: target_sparsified_module_info_list = self.sparsified_module_info # List[SparseModuleInfo] else: diff --git a/nncf/torch/sparsity/rb/algo.py b/nncf/torch/sparsity/rb/algo.py index 6c82277ea0a..41263891896 100644 --- a/nncf/torch/sparsity/rb/algo.py +++ b/nncf/torch/sparsity/rb/algo.py @@ -108,10 +108,11 @@ def freeze(self): def distributed(self): if not dist.is_initialized(): - raise KeyError( + msg = ( "Could not set distributed mode for the compression algorithm " "because the default process group has not been initialized." ) + raise KeyError(msg) if "cuda" in get_model_device(self._model).type: state = torch.cuda.get_rng_state() diff --git a/nncf/torch/sparsity/rb/loss.py b/nncf/torch/sparsity/rb/loss.py index aac8c08bef1..6c58bb6af73 100644 --- a/nncf/torch/sparsity/rb/loss.py +++ b/nncf/torch/sparsity/rb/loss.py @@ -44,9 +44,8 @@ def calculate(self) -> torch.Tensor: sparse_prob_sum = 0 for sparse_layer in self._sparse_layers: if not self.disabled and sparse_layer.frozen: - raise AssertionError( - "Invalid state of SparseLoss and SparsifiedWeight: mask is frozen for enabled loss" - ) + msg = "Invalid state of SparseLoss and SparsifiedWeight: mask is frozen for enabled loss" + raise AssertionError(msg) if not sparse_layer.frozen: sw_loss = sparse_layer.loss() params = params + sw_loss.view(-1).size(0) @@ -61,7 +60,8 @@ def calculate(self) -> torch.Tensor: def target_sparsity_rate(self): rate = 1 - self.target if rate < 0 or rate > 1: - raise IndexError("Target is not within range(0,1)") + msg = "Target is not within range(0,1)" + raise IndexError(msg) return rate def set_target_sparsity_loss(self, sparsity_level): @@ -84,9 +84,8 @@ def calculate(self) -> torch.Tensor: sparse_layers_loss = 0 for sparse_layer in self._sparse_layers: if not self.disabled and not sparse_layer.sparsify: - raise AssertionError( - "Invalid state of SparseLoss and SparsifiedWeight: mask is frozen for enabled loss" - ) + msg = "Invalid state of SparseLoss and SparsifiedWeight: mask is frozen for enabled loss" + raise AssertionError(msg) if sparse_layer.sparsify: sw_loss = sparse_layer.loss() params_layer = sw_loss.view(-1).size(0) diff --git a/nncf/torch/tensor_statistics/statistics.py b/nncf/torch/tensor_statistics/statistics.py index 541b1a77995..833360736a9 100644 --- a/nncf/torch/tensor_statistics/statistics.py +++ b/nncf/torch/tensor_statistics/statistics.py @@ -28,11 +28,13 @@ def pt_convert_stat_to_min_max_tensor_stat(statistic: TensorStatistic) -> MinMax ) if isinstance(statistic, PercentileTensorStatistic): if len(statistic.percentile_vs_values_dict.keys()) < 2: - raise ValueError("Cannot create a min-max statistic for less than 2 percentile values") + msg = "Cannot create a min-max statistic for less than 2 percentile values" + raise ValueError(msg) min_pct = min(statistic.percentile_vs_values_dict.keys()) max_pct = max(statistic.percentile_vs_values_dict.keys()) return MinMaxTensorStatistic( min_values=statistic.percentile_vs_values_dict[min_pct], max_values=statistic.percentile_vs_values_dict[max_pct], ) - raise ValueError("Unknown TensorStatistic to generate min-max stat from!") + msg = "Unknown TensorStatistic to generate min-max stat from!" + raise ValueError(msg) diff --git a/nncf/torch/utils.py b/nncf/torch/utils.py index 1287a9a8cd9..4a27337af25 100644 --- a/nncf/torch/utils.py +++ b/nncf/torch/utils.py @@ -33,7 +33,7 @@ def get_node_name(module, module_name, prefix): - return "{prefix}/{cls}[{name}]".format(prefix=prefix, cls=module.__class__.__name__, name=module_name) + return f"{prefix}/{module.__class__.__name__}[{module_name}]" def get_all_modules(model, prefix=None): @@ -93,7 +93,7 @@ def get_state_dict_names_with_modules( ) -> Dict[str, torch.nn.Module]: found = OrderedDict() for name, module in model.named_children(): - full_node_name = "{}{}".format(prefix, name) + full_node_name = f"{prefix}{name}" if str_types is not None and type(module).__name__ in str_types: found[full_node_name] = module sub_found = get_state_dict_names_with_modules(module, str_types, prefix=full_node_name + ".") @@ -160,9 +160,9 @@ def get_flat_tensor_contents_string(input_tensor): retval = "[" for idx, el in enumerate(input_tensor.view(-1)): if idx >= 10: - retval += "... (first 10/{} elements shown only) ".format(len(input_tensor.view(-1))) + retval += f"... (first 10/{len(input_tensor.view(-1))} elements shown only) " break - retval += "{:.4f}, ".format(el.item()) + retval += f"{el.item():.4f}, " retval += "]" return retval @@ -340,7 +340,7 @@ def rename_legacy_names_in_state_dict( if legacy_names: warning_deprecated( "Legacy Batch Norm layer names was detected in checkpoint model state dict." - " All occurrences of `{}` in nodes names was replaced by `{}`".format(legacy_name, new_name) + f" All occurrences of `{legacy_name}` in nodes names was replaced by `{new_name}`" ) @@ -407,8 +407,8 @@ def maybe_convert_legacy_names_in_compress_state(compression_state: Dict[str, An new_name = LEGACY_VS_NEW_BN_MAP[old_name] warning_deprecated( "Legacy Batch Norm layer names was detected in quantization setup target" - " point names. All occurrences of `{}` in nodes names was replaced by" - " `{}`".format(old_name, new_name) + f" point names. All occurrences of `{old_name}` in nodes names was replaced by" + f" `{new_name}`" ) diff --git a/pyproject.toml b/pyproject.toml index 25e06d4bffa..0becad96256 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -108,7 +108,10 @@ files = [ [tool.ruff] line-length = 120 -exclude = ["nncf/tensorflow/__init__.py"] +exclude = [ + "nncf/tensorflow/__init__.py", + "nncf/experimental/torch2/function_hook/handle_inner_functions.py" +] [tool.ruff.lint] preview = true @@ -124,12 +127,19 @@ ignore = [ "SIM117", # multiple-with-statements "SIM103", # needless-bool "NPY002", # numpy-legacy-random + "UP006", # non-pep585-annotation + "UP007", # non-pep604-annotation-union + "UP035", # deprecated-import + "UP038", # non-pep604-isinstance + "UP045", # non-pep604-annotation-optional ] select = [ "E", # pycodestyle rules "F", # pyflakes rules "CPY001", # copyright check "NPY", # numpy rules + "UP", # pyupgrade + "EM" # flake8-errmsg ] extend-select = [ "SIM", # https://pypi.org/project/flake8-simplify diff --git a/tests/common/experimental/test_reducers_and_aggregators.py b/tests/common/experimental/test_reducers_and_aggregators.py index 334640f996e..93bd3dce909 100644 --- a/tests/common/experimental/test_reducers_and_aggregators.py +++ b/tests/common/experimental/test_reducers_and_aggregators.py @@ -567,9 +567,10 @@ def test_reducers_name_hash_equal(self, reducer_name, reducers): params["inplace"] = [False, True] params["channel_axis"] = [1, 2] else: - raise nncf.ValidationError( + msg = ( f"test_min_max_mean_reducer_hash_equal configurated in a wrong way. Wrong reducer_name: {reducer_name}" ) + raise nncf.ValidationError(msg) def product_dict(**kwargs): keys = kwargs.keys() diff --git a/tests/common/pruning/test_pruning_operations.py b/tests/common/pruning/test_pruning_operations.py index 692da013c73..21b349f39fb 100644 --- a/tests/common/pruning/test_pruning_operations.py +++ b/tests/common/pruning/test_pruning_operations.py @@ -259,8 +259,8 @@ def test_linear_pruning_ops(): ) # Check linear layer always accept pruned input assert dummy_types.LinearPruningOp.accept_pruned_input(linear_op_target) - ones_input_mask = NPNNCFTensor(np.ones((in_features))) - ones_output_mask = NPNNCFTensor(np.ones((out_features))) + ones_input_mask = NPNNCFTensor(np.ones(in_features)) + ones_output_mask = NPNNCFTensor(np.ones(out_features)) # Check all combinations of masks for input_mask in [None, ones_input_mask]: for output_mask in [None, ones_output_mask]: diff --git a/tests/common/quantization/data_generators.py b/tests/common/quantization/data_generators.py index 98a2c48fb69..fa83b75ec6f 100644 --- a/tests/common/quantization/data_generators.py +++ b/tests/common/quantization/data_generators.py @@ -356,10 +356,11 @@ def check_outputs(arr_a: np.array, arr_b: np.array, is_near_mid_point: np.array, num_fail_spec_points = sum(np.invert(isclose_spec_points)) if num_fail_points or num_fail_spec_points: - raise ValueError( + msg = ( f"Points: {num_fail_points} / {len(isclose_points)} | max_d={arr_diff_points.max():.8f} " f"Mid_points: {num_fail_spec_points} / {len(isclose_spec_points)} | max_d={arr_diff_spec_points.max():.8f}" ) + raise ValueError(msg) def scatter_plot( diff --git a/tests/common/quantization/mock_graphs.py b/tests/common/quantization/mock_graphs.py index 04ce6b37a17..d1ba9c9ad5a 100644 --- a/tests/common/quantization/mock_graphs.py +++ b/tests/common/quantization/mock_graphs.py @@ -413,7 +413,7 @@ def get_sequentially_connected_model_graph(op_name_keys: List[str]) -> nx.DiGrap if node_key in OP_NAMES_IN_TEST_WITH_MODULE_ATTRIBUTES: attrs[NNCFNode.LAYER_ATTRIBUTES] = MagicMock() - actual_key = node_key + "_{}".format(node_key_appearances[node_key]) + actual_key = node_key + f"_{node_key_appearances[node_key]}" graph.add_node(actual_key, **attrs) node_key_appearances[node_key] += 1 actual_keys.append(actual_key) diff --git a/tests/common/test_logging.py b/tests/common/test_logging.py index b3f8b46b213..4f3eeafd854 100644 --- a/tests/common/test_logging.py +++ b/tests/common/test_logging.py @@ -48,7 +48,7 @@ def test_set_log_file(messages, expected): writer = level_to_fn_map[message_level] writer(message) - with open(log_file, "r", encoding="utf8") as f: + with open(log_file, encoding="utf8") as f: lines = f.readlines() for actual_line, expected_line in zip(lines, expected): diff --git a/tests/common/test_statistics_aggregator.py b/tests/common/test_statistics_aggregator.py index 8a6d0a95e28..58a05879616 100644 --- a/tests/common/test_statistics_aggregator.py +++ b/tests/common/test_statistics_aggregator.py @@ -599,7 +599,8 @@ def test_statistics_aggregator_bias_correction( elif test_params.collector_type == BCStatsCollectors.RAW: tensor_collector = algo_backend.raw_statistic_collector(len(dataset_samples)) else: - raise nncf.InvalidCollectorTypeError(f"Invalid collector type: {test_params.collector_type}") + msg = f"Invalid collector type: {test_params.collector_type}" + raise nncf.InvalidCollectorTypeError(msg) target_point = self.get_target_point(test_params.target_type) @@ -633,7 +634,8 @@ def filter_func(point): ret_val = stat.values test_params.ref_values = dataset_samples else: - raise nncf.InvalidCollectorTypeError(f"Invalid collector type: {test_params.collector_type}") + msg = f"Invalid collector type: {test_params.collector_type}" + raise nncf.InvalidCollectorTypeError(msg) for val, ref in zip(ret_val, test_params.ref_values): if isinstance(ref, np.ndarray): diff --git a/tests/cross_fw/install/common.py b/tests/cross_fw/install/common.py index 3096c64a4e6..900436e299a 100644 --- a/tests/cross_fw/install/common.py +++ b/tests/cross_fw/install/common.py @@ -26,7 +26,8 @@ def excluded_module(name, excluded_modules_patterns): def onerror(name, excluded_modules_patterns): if not excluded_module(name, excluded_modules_patterns): - raise nncf.InternalError(f"Could not import {name}") + msg = f"Could not import {name}" + raise nncf.InternalError(msg) def load_nncf_modules(excluded_modules_patterns, verbose=False): diff --git a/tests/cross_fw/install/install_checks_torch.py b/tests/cross_fw/install/install_checks_torch.py index 7e37c392a80..f64347f3973 100644 --- a/tests/cross_fw/install/install_checks_torch.py +++ b/tests/cross_fw/install/install_checks_torch.py @@ -16,9 +16,8 @@ import nncf if len(sys.argv) != 3: - raise nncf.ValidationError( - "Must be run with an execution type as argument (either 'cpu' or 'gpu') and package type" - ) + msg = "Must be run with an execution type as argument (either 'cpu' or 'gpu') and package type" + raise nncf.ValidationError(msg) execution_type = sys.argv[1] package_type = sys.argv[2] @@ -53,4 +52,5 @@ input_tensor, input_low_tensor, input_high_tensor, levels ) else: - raise nncf.ValidationError(f"Invalid execution type {execution_type} (expected 'cpu' or 'gpu')!") + msg = f"Invalid execution type {execution_type} (expected 'cpu' or 'gpu')!" + raise nncf.ValidationError(msg) diff --git a/tests/cross_fw/install/test_install.py b/tests/cross_fw/install/test_install.py index 34a07ef2cb2..f1adbcedef2 100644 --- a/tests/cross_fw/install/test_install.py +++ b/tests/cross_fw/install/test_install.py @@ -28,7 +28,8 @@ def run_install_checks(venv_path: Path, tmp_path: Path, package_type: str, backend: str, install_type: str): if install_type.lower() not in ["cpu", "gpu"]: - raise ValueError("Unknown installation mode - must be either 'cpu' or 'gpu'") + msg = "Unknown installation mode - must be either 'cpu' or 'gpu'" + raise ValueError(msg) python_executable_with_venv = get_python_executable_with_venv(venv_path) diff --git a/tests/cross_fw/shared/command.py b/tests/cross_fw/shared/command.py index eb7e57584c3..e7d6243d116 100644 --- a/tests/cross_fw/shared/command.py +++ b/tests/cross_fw/shared/command.py @@ -37,8 +37,6 @@ def __init__(self, cmd: str, cwd: Path = None, env: Dict = None): # set system/version dependent "start_new_session" analogs if is_windows(): self.kwargs.update(creationflags=subprocess.CREATE_NEW_PROCESS_GROUP) - if sys.version_info < (3, 2): # assume posix - self.kwargs.update(preexec_fn=os.setsid) else: # Python 3.2+ and Unix self.kwargs.update(start_new_session=True) diff --git a/tests/cross_fw/shared/helpers.py b/tests/cross_fw/shared/helpers.py index da827dd5867..b873fd52321 100644 --- a/tests/cross_fw/shared/helpers.py +++ b/tests/cross_fw/shared/helpers.py @@ -28,7 +28,7 @@ def is_linux() -> bool: def get_cli_dict_args(args): cli_args = {} for key, val in args.items(): - cli_key = "--{}".format(str(key)) + cli_key = f"--{str(key)}" cli_args[cli_key] = None if val is not None: cli_args[cli_key] = str(val) @@ -48,7 +48,8 @@ def find_file_by_extension(directory: Path, extension: str) -> str: file_path_str = str(file_path) if file_path_str.endswith(extension): return file_path_str - raise FileNotFoundError("NNCF package not found") + msg = "NNCF package not found" + raise FileNotFoundError(msg) def create_venv_with_nncf(tmp_path: Path, package_type: str, venv_type: str, backends: Set[str] = None): @@ -94,7 +95,8 @@ def create_venv_with_nncf(tmp_path: Path, package_type: str, venv_type: str, bac elif package_type == "build_w": run_cmd_line = f"{python_executable_with_venv} -m build -w --outdir {dist_path}" else: - raise ValueError(f"Invalid package type: {package_type}") + msg = f"Invalid package type: {package_type}" + raise ValueError(msg) subprocess.run(run_cmd_line, check=True, shell=True, cwd=PROJECT_ROOT) diff --git a/tests/cross_fw/shared/json.py b/tests/cross_fw/shared/json.py index 16c36745025..2a9fc5d7899 100644 --- a/tests/cross_fw/shared/json.py +++ b/tests/cross_fw/shared/json.py @@ -24,7 +24,7 @@ def load_json(stats_path: Path): - with open(stats_path, "r", encoding="utf8") as json_file: + with open(stats_path, encoding="utf8") as json_file: return json.load(json_file) diff --git a/tests/cross_fw/shared/nx_graph.py b/tests/cross_fw/shared/nx_graph.py index eb0116c4113..0305ecda92c 100644 --- a/tests/cross_fw/shared/nx_graph.py +++ b/tests/cross_fw/shared/nx_graph.py @@ -23,7 +23,7 @@ def sort_dot(path): - with open(path, "r", encoding="utf8") as f: + with open(path, encoding="utf8") as f: content = f.readlines() start_line = "strict digraph {\n" end_line = "}\n" @@ -41,16 +41,18 @@ def __init__( ): if node_id is not None: if edge_start_id is not None or edge_end_id is not None: - raise nncf.ValidationError( + msg = ( "Invalid node order parsed from graph line - " "must specify either `node_id` or a pair of `edge_start_id`/`edge_end_id`!" ) + raise nncf.ValidationError(msg) else: if edge_start_id is None or edge_end_id is None: - raise nncf.ValidationError( + msg = ( "Invalid node order - must specify both `edge_start_id` and `edge_end_id` " "if node_id is None!" ) + raise nncf.ValidationError(msg) self.node_id = node_id self.edge_start_id = edge_start_id self.edge_end_id = edge_end_id @@ -76,14 +78,16 @@ def graph_key(line: str) -> LineOrder: extract_ids_regex = r'^"(\d+) ' start_id_matches = re.search(extract_ids_regex, line) if start_id_matches is None: - raise nncf.InternalError(f"Could not parse first node ID in node name: {line}") + msg = f"Could not parse first node ID in node name: {line}" + raise nncf.InternalError(msg) start_id = int(start_id_matches.group(1)) edge_indicator = " -> " if edge_indicator in line: end_node_and_attrs_str = line.split(edge_indicator)[1] end_id_matches = re.search(extract_ids_regex, end_node_and_attrs_str) if end_id_matches is None: - raise nncf.InternalError(f"Could not parse end node ID in node name: {end_node_and_attrs_str}") + msg = f"Could not parse end node ID in node name: {end_node_and_attrs_str}" + raise nncf.InternalError(msg) end_id = int(end_id_matches.group(1)) return LineOrder(edge_start_id=start_id, edge_end_id=end_id) return LineOrder(node_id=int(start_id)) diff --git a/tests/cross_fw/test_templates/test_channel_alignment.py b/tests/cross_fw/test_templates/test_channel_alignment.py index 5ac1f8bba73..ed5d92fac88 100644 --- a/tests/cross_fw/test_templates/test_channel_alignment.py +++ b/tests/cross_fw/test_templates/test_channel_alignment.py @@ -476,7 +476,8 @@ def dims_iter(*args, **kwargs): _class = weights_update_cls _attr = "weight_value" else: - raise nncf.ValidationError(f"Wrong type of transformation: {type(transformation)}") + msg = f"Wrong type of transformation: {type(transformation)}" + raise nncf.ValidationError(msg) target_names[tp.target_node_name].append(_class) assert ref_values[tp.target_node_name][_attr] == getattr(transformation, _attr) diff --git a/tests/onnx/benchmarking/run_ptq.py b/tests/onnx/benchmarking/run_ptq.py index 000a921bbda..77eb47db712 100644 --- a/tests/onnx/benchmarking/run_ptq.py +++ b/tests/onnx/benchmarking/run_ptq.py @@ -40,7 +40,8 @@ def process_fn(data_item, model_evaluator: ModelEvaluator, has_batch_dim: Option if len(filled_inputs) == 1: return {k: np.squeeze(v, axis=0) if has_batch_dim else v for k, v in filled_inputs[0].items()} - raise Exception("len(filled_inputs) should be one.") + msg = "len(filled_inputs) should be one." + raise Exception(msg) def run( @@ -72,7 +73,7 @@ def run( ) # Save the quantized model. onnx.save(quantized_model, output_model_path) - print("The quantized model is saved to: {}".format(output_model_path)) + print(f"The quantized model is saved to: {output_model_path}") if __name__ == "__main__": diff --git a/tests/onnx/models.py b/tests/onnx/models.py index 6920895d976..29c3f8b9662 100644 --- a/tests/onnx/models.py +++ b/tests/onnx/models.py @@ -975,7 +975,7 @@ def __init__(self): to=onnx.TensorProto.FLOAT, ) - tensor = np.array((1)).astype(np.float32) + tensor = np.array(1).astype(np.float32) tensor_name = "Tensor" initializer_tensor = create_initializer_tensor( name=tensor_name, tensor_array=tensor, data_type=onnx.TensorProto.FLOAT diff --git a/tests/onnx/quantization/test_classification_models_graph.py b/tests/onnx/quantization/test_classification_models_graph.py index 12ed1639d68..735ab7958fd 100644 --- a/tests/onnx/quantization/test_classification_models_graph.py +++ b/tests/onnx/quantization/test_classification_models_graph.py @@ -46,7 +46,8 @@ def model_builder(model_name): return models.densenet121(weights=models.DenseNet121_Weights.IMAGENET1K_V1) if model_name == "mnasnet0_5": return models.mnasnet0_5(weights=models.MNASNet0_5_Weights.IMAGENET1K_V1) - raise ValueError(f"Unknown model name {model_name}") + msg = f"Unknown model name {model_name}" + raise ValueError(msg) TORCHVISION_TEST_DATA = [ diff --git a/tests/onnx/quantization/test_fast_bias_correction.py b/tests/onnx/quantization/test_fast_bias_correction.py index f7b3492a88f..4f1f6e97e93 100644 --- a/tests/onnx/quantization/test_fast_bias_correction.py +++ b/tests/onnx/quantization/test_fast_bias_correction.py @@ -68,4 +68,5 @@ def check_bias(model: onnx.ModelProto, ref_bias: list): # TODO(AlexanderDokuchaev): return atol=0.0001 after fix 109189 assert np.all(np.isclose(bias_value, ref_bias, atol=0.01)), f"{bias_value} != {ref_bias}" return - raise ValueError("Not found node with bias") + msg = "Not found node with bias" + raise ValueError(msg) diff --git a/tests/onnx/test_e2e_ptq.py b/tests/onnx/test_e2e_ptq.py index 82c4a2cb59d..2275350b656 100644 --- a/tests/onnx/test_e2e_ptq.py +++ b/tests/onnx/test_e2e_ptq.py @@ -197,7 +197,7 @@ def _read_accuracy_checker_result(root_dir: Path, key: str) -> pd.DataFrame: def _read_reference_json(fpath: Path) -> pd.DataFrame: fpath = str(fpath) - with open(fpath, "r", encoding="utf-8") as fp: + with open(fpath, encoding="utf-8") as fp: d0 = json.load(fp) rows = [] diff --git a/tests/onnx/test_model_transformer.py b/tests/onnx/test_model_transformer.py index 53df1b61f7b..4870597ebb3 100644 --- a/tests/onnx/test_model_transformer.py +++ b/tests/onnx/test_model_transformer.py @@ -79,7 +79,7 @@ def test_quantizer_insertion(target_layers, should_raise, quantizer_number): TARGET_LAYERS = ["Conv1", "BN1", "ReLU1"] -QUANTIZER_SCALES = [np.array(3.0), 13.2 * np.ones((32)), np.array(17.1)] +QUANTIZER_SCALES = [np.array(3.0), 13.2 * np.ones(32), np.array(17.1)] QUANTIZER_ZERO_POINT = [np.array(1, dtype=np.int32), 2 * np.ones((32), dtype=np.int32), np.array(0, dtype=np.int32)] QUANTIZER_ONNX_DTYPE = [np.dtype(np.int8), np.dtype(np.int8), np.dtype(np.uint8)] QUANTIZER_ONNX_ATTRIBUTES = [{"axis": 0}, {"axis": 0}, {"axis": 0}] diff --git a/tests/onnx/test_statistics_caching.py b/tests/onnx/test_statistics_caching.py index 2094fa1d086..aa0b85a8e7b 100644 --- a/tests/onnx/test_statistics_caching.py +++ b/tests/onnx/test_statistics_caching.py @@ -25,4 +25,4 @@ def get_statistics_aggregator(self): return ONNXStatisticsAggregator(None) def _create_dummy_min_max_tensor(self) -> Tensor: - return Tensor(np.zeros((3))), Tensor(np.ones((3))) + return Tensor(np.zeros(3)), Tensor(np.ones(3)) diff --git a/tests/openvino/native/common.py b/tests/openvino/native/common.py index 09a30873eb0..d85f12a2627 100644 --- a/tests/openvino/native/common.py +++ b/tests/openvino/native/common.py @@ -56,7 +56,7 @@ def get_dataset_for_test(model): def load_json(stats_path): - with open(stats_path, "r", encoding="utf8") as json_file: + with open(stats_path, encoding="utf8") as json_file: return json.load(json_file) diff --git a/tests/openvino/native/models.py b/tests/openvino/native/models.py index e5efdaf8235..d35ec98ed59 100644 --- a/tests/openvino/native/models.py +++ b/tests/openvino/native/models.py @@ -482,9 +482,9 @@ def _create_ov_model(self): initial_cell_state = opset.parameter([1, 1, 128], name="initial_cell_state") seq_len = opset.constant(np.array([2]), dtype=np.int32) - W = opset.constant(np.zeros(([1, 512, 16])), dtype=np.float32) - R = opset.constant(np.zeros(([1, 512, 128])), dtype=np.float32) - B = opset.constant(np.zeros(([1, 512])), dtype=np.float32) + W = opset.constant(np.zeros([1, 512, 16]), dtype=np.float32) + R = opset.constant(np.zeros([1, 512, 128]), dtype=np.float32) + B = opset.constant(np.zeros([1, 512]), dtype=np.float32) lstm = opset.lstm_sequence( x, initial_hidden_state, initial_cell_state, seq_len, W, R, B, 128, "FORWARD", name="LSTMSequence" @@ -507,9 +507,9 @@ def _create_ov_model(self, linear_before_reset=True): seq_len = opset.constant(np.array([1, 2, 3]), dtype=np.int32) scale_factor = 4 if linear_before_reset else 3 - W = opset.constant(np.zeros(([1, 3 * hidden_size, 16])), dtype=np.float32) - R = opset.constant(np.zeros(([1, 3 * hidden_size, hidden_size])), dtype=np.float32) - B = opset.constant(np.zeros(([1, scale_factor * hidden_size])), dtype=np.float32) + W = opset.constant(np.zeros([1, 3 * hidden_size, 16]), dtype=np.float32) + R = opset.constant(np.zeros([1, 3 * hidden_size, hidden_size]), dtype=np.float32) + B = opset.constant(np.zeros([1, scale_factor * hidden_size]), dtype=np.float32) gru = opset.gru_sequence( x, diff --git a/tests/openvino/native/quantization/test_gptq.py b/tests/openvino/native/quantization/test_gptq.py index ea6c75546e2..a141d7c99fe 100644 --- a/tests/openvino/native/quantization/test_gptq.py +++ b/tests/openvino/native/quantization/test_gptq.py @@ -36,7 +36,7 @@ def quantize(x, scale, zero, minq, maxq): class GPTQQuantizer(torch.nn.Module): def __init__(self, shape=1): - super(GPTQQuantizer, self).__init__() + super().__init__() self.register_buffer("maxq", torch.tensor(0)) self.register_buffer("scale", torch.zeros(shape)) self.register_buffer("zero", torch.zeros(shape)) diff --git a/tests/openvino/native/test_fast_bias_correction.py b/tests/openvino/native/test_fast_bias_correction.py index 82ce89e2835..ea5b5ccf133 100644 --- a/tests/openvino/native/test_fast_bias_correction.py +++ b/tests/openvino/native/test_fast_bias_correction.py @@ -66,4 +66,5 @@ def check_bias(model: ov.Model, ref_bias: list): assert np.all(np.isclose(bias_value, ref_bias, atol=atol)), f"{bias_value} != {ref_bias}" return - raise ValueError("Not found node with bias") + msg = "Not found node with bias" + raise ValueError(msg) diff --git a/tests/openvino/native/test_layer_attributes.py b/tests/openvino/native/test_layer_attributes.py index c5056d236be..66c942eaed0 100644 --- a/tests/openvino/native/test_layer_attributes.py +++ b/tests/openvino/native/test_layer_attributes.py @@ -129,7 +129,7 @@ def get_lstm(input_1, node_name, input_shape): num_directions = 1 hs = opset.constant(np.ones((batch_size, num_directions, hidden_size)), dtype=np.float32, name="hs") cs = opset.constant(np.ones((batch_size, num_directions, hidden_size)), dtype=np.float32, name="cs") - seq_len_const = opset.constant(np.ones((batch_size)), dtype=np.int32, name="seq_len_const") + seq_len_const = opset.constant(np.ones(batch_size), dtype=np.int32, name="seq_len_const") w = opset.constant(np.ones((num_directions, 4 * hidden_size, input_size)), dtype=np.float32, name="w") r = opset.constant(np.ones((num_directions, 4 * hidden_size, hidden_size)), dtype=np.float32, name="r") b = opset.constant(np.ones((num_directions, 4 * hidden_size)), dtype=np.float32, name="b") diff --git a/tests/openvino/native/test_model_transformer.py b/tests/openvino/native/test_model_transformer.py index 75abe8f2465..a09b6050e38 100644 --- a/tests/openvino/native/test_model_transformer.py +++ b/tests/openvino/native/test_model_transformer.py @@ -116,8 +116,8 @@ def create_fake_quantize_params() -> FakeQuantizeParameters: def create_fake_convert_params(destination_type: FP8Type) -> FakeConvertParameters: - scale = Tensor(np.ones((1)).astype(np.float32)) - shift = Tensor(np.zeros((1)).astype(np.float32)) + scale = Tensor(np.ones(1).astype(np.float32)) + shift = Tensor(np.zeros(1).astype(np.float32)) return FakeConvertParameters(scale, shift, destination_type) diff --git a/tests/openvino/native/test_statistics_caching.py b/tests/openvino/native/test_statistics_caching.py index cc2d27442e7..2f57e96d834 100644 --- a/tests/openvino/native/test_statistics_caching.py +++ b/tests/openvino/native/test_statistics_caching.py @@ -25,4 +25,4 @@ def get_statistics_aggregator(self): return OVStatisticsAggregator(None) def _create_dummy_min_max_tensor(self) -> Tensor: - return Tensor(np.zeros((3))), Tensor(np.ones((3))) + return Tensor(np.zeros(3)), Tensor(np.ones(3)) diff --git a/tests/openvino/native/test_tensor.py b/tests/openvino/native/test_tensor.py index f105c011f59..58df3d19204 100644 --- a/tests/openvino/native/test_tensor.py +++ b/tests/openvino/native/test_tensor.py @@ -34,10 +34,12 @@ def to_tensor(x, backend=TensorBackend.ov, dtype=TensorDataType.float32): return ov.Tensor(np.array(x, dtype=DTYPE_MAP_NP[dtype])) elif backend == TensorBackend.numpy: if dtype in [TensorDataType.bfloat16, TensorDataType.uint4, TensorDataType.int4]: - raise ValueError(f"Can't create NumPY tensor in dtype {dtype}") + msg = f"Can't create NumPY tensor in dtype {dtype}" + raise ValueError(msg) return np.array(x, dtype=DTYPE_MAP_NP[dtype]) else: - raise ValueError("Unsupported backend") + msg = "Unsupported backend" + raise ValueError(msg) @staticmethod def backend() -> TensorBackend: diff --git a/tests/openvino/tools/calibrate.py b/tests/openvino/tools/calibrate.py index a2240453674..b12573c74c0 100644 --- a/tests/openvino/tools/calibrate.py +++ b/tests/openvino/tools/calibrate.py @@ -131,7 +131,8 @@ def default(self, o): return o.value if isinstance(o, (IgnoredScope, AdvancedQuantizationParameters, AdvancedAccuracyRestorerParameters)): return asdict(o) - raise TypeError(f"Object of type {o.__class__.__name__} is not JSON serializable") + msg = f"Object of type {o.__class__.__name__} is not JSON serializable" + raise TypeError(msg) class ACValidationFunction: @@ -237,9 +238,8 @@ def __call__(self, compiled_model: ov.CompiledModel, indices: Optional[Iterable[ def _output_callback(self, raw_predictions, **kwargs): if not ("metrics_result" in kwargs and "dataset_indices" in kwargs): - raise nncf.ValidationError( - "Expected `metrics_result`, `dataset_indices` be passed to output_callback inside accuracy checker" - ) + msg = "Expected `metrics_result`, `dataset_indices` be passed to output_callback inside accuracy checker" + raise nncf.ValidationError(msg) metrics_result = kwargs["metrics_result"] if metrics_result is None: @@ -324,34 +324,39 @@ def set_algorithm_parameters_context(ctx): def map_target_device(target_device): target_device = target_device.upper() if target_device not in [t.value for t in TargetDevice]: - raise ValueError(f"{target_device} target device is not supported") + msg = f"{target_device} target device is not supported" + raise ValueError(msg) return {"target_device": TargetDevice(target_device)} def map_model_type(model_type): model_type = model_type.lower() if model_type not in [m.value for m in ModelType]: - raise ValueError(f"{model_type} model type is not supported") + msg = f"{model_type} model type is not supported" + raise ValueError(msg) return {"model_type": ModelType(model_type)} def map_drop_type(drop_type): drop_type = drop_type.lower() if drop_type not in [m.value for m in DropType]: - raise ValueError(f"{drop_type} drop type is not supported") + msg = f"{drop_type} drop type is not supported" + raise ValueError(msg) return {"drop_type": DropType(drop_type)} def map_ignored_scope(ignored): if ignored.get("skip_model") is not None: - raise ValueError("skip_model attribute in the ignored tag is not supported") + msg = "skip_model attribute in the ignored tag is not supported" + raise ValueError(msg) operations = ignored.get("operations") ignored_operations = [] if operations is not None: for op in operations: if op.get("attributes") is not None: - raise ValueError('"attributes" in the ignored operations ' "are not supported") + msg = '"attributes" in the ignored operations ' "are not supported" + raise ValueError(msg) ignored_operations.append(op["type"]) return {"ignored_scope": IgnoredScope(names=ignored.get("scope", []), types=ignored_operations)} @@ -359,7 +364,8 @@ def map_ignored_scope(ignored): def map_preset(preset): preset = preset.lower() if preset not in [p.value for p in QuantizationPreset]: - raise ValueError(f"{preset} preset is not supported") + msg = f"{preset} preset is not supported" + raise ValueError(msg) return {"preset": QuantizationPreset(preset)} @@ -376,7 +382,8 @@ def update_statistics_collector_parameters( ): granularity = pot_config.get("granularity") if granularity is not None: - raise ValueError('"granularity" parameter in the range estimator is not supported') + msg = '"granularity" parameter in the range estimator is not supported' + raise ValueError(msg) stat_collector_names = ["statistics_type", "aggregator_type", "clipping_value", "quantile_outlier_prob"] stat_collector_types = [StatisticsType, AggregatorType, float, float] @@ -400,7 +407,8 @@ def update_range_estimator_parameters( ): preset = pot_config.get("preset") if preset is not None: - raise ValueError('"preset" parameter in the range estimator is not supported') + msg = '"preset" parameter in the range estimator is not supported' + raise ValueError(msg) min_config = pot_config.get("min") max_config = pot_config.get("max") @@ -442,10 +450,12 @@ def map_range_estmator(range_estimator): def update_quantization_parameters(quantization_params, pot_config): level_low = pot_config.get("level_low") if level_low is not None: - raise ValueError('"level_low" parameter is not supported') + msg = '"level_low" parameter is not supported' + raise ValueError(msg) level_high = pot_config.get("level_high") if level_high is not None: - raise ValueError('"level_high" parameter is not supported') + msg = '"level_high" parameter is not supported' + raise ValueError(msg) num_bits = pot_config.get("bits") if num_bits is not None: quantization_params.num_bits = num_bits @@ -456,7 +466,8 @@ def update_quantization_parameters(quantization_params, pot_config): elif mode == "asymmetric": quantization_params.mode = QuantizationScheme.ASYMMETRIC else: - raise ValueError(f"mode = {mode} is not supported") + msg = f"mode = {mode} is not supported" + raise ValueError(msg) granularity = pot_config.get("granularity") if granularity is not None: if granularity == "perchannel": @@ -464,7 +475,8 @@ def update_quantization_parameters(quantization_params, pot_config): elif mode == "pertensor": quantization_params.per_channel = False else: - raise ValueError(f"granularity = {granularity} is not supported") + msg = f"granularity = {granularity} is not supported" + raise ValueError(msg) def map_weights(weights): @@ -548,7 +560,8 @@ def map_smooth_quant_alpha(smooth_quant_alpha): def map_mode(mode): if not hasattr(QuantizationMode, mode): - raise ValueError(f"{mode} mode is not supported") + msg = f"{mode} mode is not supported" + raise ValueError(msg) return {"mode": getattr(QuantizationMode, mode)} @@ -610,7 +623,8 @@ def create_parameters_for_algorithm( if kwarg is not None: ctx.params.update(kwarg) else: - raise ValueError(f"{name} parameter is not supported") + msg = f"{name} parameter is not supported" + raise ValueError(msg) return ctx.params @@ -714,18 +728,21 @@ def map_paramaters(pot_algo_name, nncf_algo_name, pot_parameters, output_dir): return map_quantization_parameters(pot_parameters) if nncf_algo_name == "quantize_with_accuracy_control": return map_quantize_with_accuracy_control_parameters(pot_parameters, output_dir) - raise ValueError(f"Mapping POT {pot_algo_name} parameters to NNCF {nncf_algo_name} parameters is not supported") + msg = f"Mapping POT {pot_algo_name} parameters to NNCF {nncf_algo_name} parameters is not supported" + raise ValueError(msg) def get_model_paths(model_config): if model_config.cascade: - raise ValueError("Cascade models are not supported yet.") + msg = "Cascade models are not supported yet." + raise ValueError(msg) return model_config.model, model_config.weights def get_accuracy_checker_config(engine_config): if engine_config.type != "accuracy_checker": - raise ValueError(f"Engine type {engine_config.type} is not supported.") + msg = f"Engine type {engine_config.type} is not supported." + raise ValueError(msg) return engine_config @@ -735,7 +752,8 @@ def get_nncf_algorithms_config(compression_config, output_dir): for pot_algo in compression_config.algorithms: pot_algo_name = pot_algo.name if pot_algo_name not in MAP_POT_NNCF_ALGORITHMS: - raise ValueError(f"Algorithm {pot_algo_name} is not supported.") + msg = f"Algorithm {pot_algo_name} is not supported." + raise ValueError(msg) nncf_algo_name = MAP_POT_NNCF_ALGORITHMS[pot_algo_name]["method"] advanced_parameters = MAP_POT_NNCF_ALGORITHMS[pot_algo_name].get("advanced_parameters", None) @@ -788,23 +806,24 @@ def maybe_reshape_model(model, dataset, subset_size, input_to_tensor_name): model_inputs_shapes[input_to_tensor_name[input_node.friendly_name]] = tuple(partial_shape) if len(dataset_inputs_shapes) != len(model_inputs_shapes): - raise nncf.InternalError( + msg = ( f"Model inputs: {list(model_inputs_shapes.keys())}" f" and dataset inputs {list(dataset_inputs_shapes.keys())} are not compatible" ) + raise nncf.InternalError(msg) for name in model_inputs_shapes: if name not in dataset_inputs_shapes: - raise nncf.ValidationError( - f"Model input {name} is not present in dataset inputs: {list(dataset_inputs_shapes.keys())}" - ) + msg = f"Model input {name} is not present in dataset inputs: {list(dataset_inputs_shapes.keys())}" + raise nncf.ValidationError(msg) dynamic_dims = defaultdict(list) reshaped_static_dims = defaultdict(list) for name, shapes in dataset_inputs_shapes.items(): shapes = list(shapes) if len(set(len(shape) for shape in shapes)) != 1 or len(model_inputs_shapes[name]) != len(shapes[0]): - raise nncf.InternalError("calibrate.py does not support dataset with dynamic ranks") + msg = "calibrate.py does not support dataset with dynamic ranks" + raise nncf.InternalError(msg) for idx in range(len(shapes[0])): if len(shapes) == 1: @@ -1182,7 +1201,8 @@ def main(): keys = ["xml_path", "quantization_parameters"] dump_to_json(path, quantize_model_arguments, keys) else: - raise nncf.InternalError(f"Support for {algo_name} is not implemented in the optimize tool.") + msg = f"Support for {algo_name} is not implemented in the optimize tool." + raise nncf.InternalError(msg) model_name = config.model.model_name output_model_path = os.path.join(output_dir, f"{model_name}.xml") diff --git a/tests/openvino/tools/config.py b/tests/openvino/tools/config.py index 2a538014368..ad31028b403 100644 --- a/tests/openvino/tools/config.py +++ b/tests/openvino/tools/config.py @@ -79,15 +79,19 @@ def _validate_model_conf(self): nncf_logger.warning("Cascade is defined with single model") if not models: - raise nncf.ValidationError("Path to input model xml and bin is required.") + msg = "Path to input model xml and bin is required." + raise nncf.ValidationError(msg) for model in models: if len(models) > 1 and not model.name: - raise nncf.ValidationError("Name of input model is required.") + msg = "Name of input model is required." + raise nncf.ValidationError(msg) if not model.model: - raise nncf.ValidationError("Path to input model xml is required.") + msg = "Path to input model xml is required." + raise nncf.ValidationError(msg) if not model.weights: - raise nncf.ValidationError("Path to input model bin is required.") + msg = "Path to input model bin is required." + raise nncf.ValidationError(msg) def validate_algo_config(self): """ @@ -252,11 +256,13 @@ def _configure_engine_params(self): self.engine.type = "accuracy_checker" elif engine.type == "simplified": if engine.data_source is None: - raise KeyError("Missed data dir for simplified engine") + msg = "Missed data dir for simplified engine" + raise KeyError(msg) self.engine.device = engine.device if engine.device else "CPU" engine.data_source = Path(engine.data_source) else: - raise KeyError("Unsupported engine type") + msg = "Unsupported engine type" + raise KeyError(msg) def _configure_ac_params(self): """Converts engine config into accuracy checker config""" @@ -276,9 +282,7 @@ def _configure_ac_params(self): if not dataset.preprocessing: dataset["preprocessing"] = preprocessing_config else: - nncf_logger.debug( - "Local preprocessing configuration is used for {} dataset".format(dataset_name) - ) + nncf_logger.debug(f"Local preprocessing configuration is used for {dataset_name} dataset") ConfigReader.check_local_config(ac_conf) ac_conf = ConfigReader.convert_paths(ac_conf) ConfigReader._filter_launchers(ac_conf, filtering_params, mode=mode) @@ -320,7 +324,7 @@ def _configure_logger_params(self): if "optimizer" in self: log_algo_name = "{}_{}".format(log_algo_name, self["optimizer"]["name"]) for algo in self["compression"]["algorithms"]: - log_algo_name = ("{}_{}".format(log_algo_name, algo.name)) if log_algo_name else algo.name + log_algo_name = (f"{log_algo_name}_{algo.name}") if log_algo_name else algo.name self.model.log_algo_name = log_algo_name def get_model_paths(self): @@ -343,7 +347,8 @@ def read_config_from_file(path): return yaml.load(f, Loader=yaml.SafeLoader) if extension in (".json",): return json.load(f) - raise nncf.InternalError('Unknown file extension for the file "{}"'.format(path)) + msg = f'Unknown file extension for the file "{path}"' + raise nncf.InternalError(msg) def check_params(algo_name, config, supported_params): @@ -354,9 +359,11 @@ def check_params(algo_name, config, supported_params): """ for key, value in config.items(): if key not in supported_params: - raise nncf.InternalError("Algorithm {}. Unknown parameter: {}".format(algo_name, key)) + msg = f"Algorithm {algo_name}. Unknown parameter: {key}" + raise nncf.InternalError(msg) if isinstance(value, dict): if isinstance(supported_params[key], dict): check_params(algo_name, value, supported_params[key]) else: - raise nncf.InternalError("Algorithm {}. Wrong structure for parameter: {}".format(algo_name, key)) + msg = f"Algorithm {algo_name}. Wrong structure for parameter: {key}" + raise nncf.InternalError(msg) diff --git a/tests/post_training/experimental/sparsify_activations/model_scope.py b/tests/post_training/experimental/sparsify_activations/model_scope.py index 431fa3563a5..c721683f6a5 100644 --- a/tests/post_training/experimental/sparsify_activations/model_scope.py +++ b/tests/post_training/experimental/sparsify_activations/model_scope.py @@ -104,10 +104,12 @@ def generate_tests_scope(models_list: List[Dict]) -> Dict[str, Dict]: model_param.pop("backends") if backend == BackendType.FP32: if model_id in fp32_models: - raise nncf.ValidationError(f"Duplicate test case for {model_id} with FP32 backend") + msg = f"Duplicate test case for {model_id} with FP32 backend" + raise nncf.ValidationError(msg) fp32_models.add(model_id) if test_case_name in tests_scope: - raise nncf.ValidationError(f"{test_case_name} already in tests_scope") + msg = f"{test_case_name} already in tests_scope" + raise nncf.ValidationError(msg) tests_scope[test_case_name] = model_param return tests_scope diff --git a/tests/post_training/experimental/sparsify_activations/pipelines.py b/tests/post_training/experimental/sparsify_activations/pipelines.py index b04512f22cb..2c63283e224 100644 --- a/tests/post_training/experimental/sparsify_activations/pipelines.py +++ b/tests/post_training/experimental/sparsify_activations/pipelines.py @@ -190,7 +190,8 @@ def prepare_model(self): if self.backend in PT_BACKENDS: if is_stateful: - raise RuntimeError(f"is_stateful={is_stateful} is not supported for PyTorch backend.") + msg = f"is_stateful={is_stateful} is not supported for PyTorch backend." + raise RuntimeError(msg) self.model_hf = AutoModelForCausalLM.from_pretrained( self.model_id, @@ -219,7 +220,8 @@ def prepare_model(self): ) self.model = self.model_hf.model else: - raise RuntimeError(f"backend={self.backend.value} is not supported.") + msg = f"backend={self.backend.value} is not supported." + raise RuntimeError(msg) if not (self.fp32_model_dir / self.OV_MODEL_NAME).exists(): self._dump_model_fp32() @@ -244,9 +246,8 @@ def transform_fn(chunk: List[Dict]): shape[0] = len(samples) inputs[input_name] = ov.Tensor(sample_value.get_element_type(), shape) else: - raise RuntimeError( - f"Failed to generate calibration set for {input_name} in type {type(sample_value)}" - ) + msg = f"Failed to generate calibration set for {input_name} in type {type(sample_value)}" + raise RuntimeError(msg) if self.backend == BackendType.CUDA_TORCH: for input_name in inputs: inputs[input_name] = torch.from_numpy(inputs[input_name]).cuda() diff --git a/tests/post_training/experimental/sparsify_activations/test_sparsify_activations_conformance.py b/tests/post_training/experimental/sparsify_activations/test_sparsify_activations_conformance.py index a642ee13c92..c0d37e6b527 100644 --- a/tests/post_training/experimental/sparsify_activations/test_sparsify_activations_conformance.py +++ b/tests/post_training/experimental/sparsify_activations/test_sparsify_activations_conformance.py @@ -114,7 +114,8 @@ def test_sparsify_activations( start_time = time.perf_counter() try: if test_case_name not in sparsify_activations_reference_data: - raise RuntimeError(f"{test_case_name} is not defined in `sparsify_activations_reference_data` fixture") + msg = f"{test_case_name} is not defined in `sparsify_activations_reference_data` fixture" + raise RuntimeError(msg) test_model_param = SPARSIFY_ACTIVATIONS_TEST_CASES[test_case_name] maybe_skip_test_case(test_model_param, run_fp32_backend, run_torch_cuda_backend, batch_size) fp32_model_params = { diff --git a/tests/post_training/model_scope.py b/tests/post_training/model_scope.py index 3a70f09c8fe..24e363a141d 100644 --- a/tests/post_training/model_scope.py +++ b/tests/post_training/model_scope.py @@ -560,7 +560,8 @@ def generate_tests_scope(models_list: List[Dict]) -> Dict[str, dict]: model_param["backend"] = backend model_param.pop("backends") if test_case_name in tests_scope: - raise nncf.ValidationError(f"{test_case_name} already in tests_scope") + msg = f"{test_case_name} already in tests_scope" + raise nncf.ValidationError(msg) tests_scope[test_case_name] = model_param return tests_scope diff --git a/tests/post_training/pipelines/base.py b/tests/post_training/pipelines/base.py index 9d2f50f7141..1f1de0dc258 100644 --- a/tests/post_training/pipelines/base.py +++ b/tests/post_training/pipelines/base.py @@ -322,7 +322,8 @@ def prepare(self): print("Preparing...") self.prepare_model() if self.model is None: - raise nncf.ValidationError("self.model is None") + msg = "self.model is None" + raise nncf.ValidationError(msg) self.prepare_preprocessor() self.prepare_calibration_dataset() diff --git a/tests/post_training/pipelines/image_classification_base.py b/tests/post_training/pipelines/image_classification_base.py index ee3a0311e6e..3d5b44549d1 100644 --- a/tests/post_training/pipelines/image_classification_base.py +++ b/tests/post_training/pipelines/image_classification_base.py @@ -42,8 +42,8 @@ def _validate(self) -> List[ErrorReport]: dataset_size = len(val_loader) # Initialize result tensors for async inference support. - predictions = np.zeros((dataset_size)) - references = -1 * np.ones((dataset_size)) + predictions = np.zeros(dataset_size) + references = -1 * np.ones(dataset_size) core = ov.Core() diff --git a/tests/post_training/pipelines/lm_weight_compression.py b/tests/post_training/pipelines/lm_weight_compression.py index 270cba09c9f..a2153149e66 100644 --- a/tests/post_training/pipelines/lm_weight_compression.py +++ b/tests/post_training/pipelines/lm_weight_compression.py @@ -63,7 +63,7 @@ def fill(self, stdout: str) -> None: time_regex = r".*ā€¢\s(.*)\sā€¢.*" for line in stdout.splitlines(): for attr_name, prefix_regex in zip(self.VAR_NAMES, self.REGEX_PREFIX): - match = re.search(r"{}{}".format(prefix_regex, time_regex), line) + match = re.search(f"{prefix_regex}{time_regex}", line) if match: setattr(self, attr_name, match.group(1)) continue @@ -84,7 +84,8 @@ def prepare_model(self) -> None: # load model if self.backend == BackendType.TORCH: if is_stateful: - raise RuntimeError(f"is_stateful={is_stateful} is not supported for PyTorch backend.") + msg = f"is_stateful={is_stateful} is not supported for PyTorch backend." + raise RuntimeError(msg) self.model_hf = AutoModelForCausalLM.from_pretrained( self.model_id, @@ -107,7 +108,8 @@ def prepare_model(self) -> None: ) self.model = self.model_hf.model else: - raise RuntimeError(f"backend={self.backend.value} is not supported.") + msg = f"backend={self.backend.value} is not supported." + raise RuntimeError(msg) # dump FP32 model if not (self.fp32_model_dir / self.OV_MODEL_NAME).exists(): diff --git a/tests/post_training/test_quantize_conformance.py b/tests/post_training/test_quantize_conformance.py index 618c0defdf2..df8810dafca 100644 --- a/tests/post_training/test_quantize_conformance.py +++ b/tests/post_training/test_quantize_conformance.py @@ -50,7 +50,8 @@ def fixture_use_avx2(): @pytest.fixture(scope="session", name="data_dir") def fixture_data(pytestconfig): if pytestconfig.getoption("data") is None: - raise ValueError("This test requires the --data argument to be specified.") + msg = "This test requires the --data argument to be specified." + raise ValueError(msg) return Path(pytestconfig.getoption("data")) @@ -290,7 +291,8 @@ def test_ptq_quantization( start_time = time.perf_counter() try: if test_case_name not in ptq_reference_data: - raise nncf.ValidationError(f"{test_case_name} does not exist in 'reference_data.yaml'") + msg = f"{test_case_name} does not exist in 'reference_data.yaml'" + raise nncf.ValidationError(msg) test_model_param = PTQ_TEST_CASES[test_case_name] maybe_skip_test_case(test_model_param, run_fp32_backend, run_torch_cuda_backend, batch_size) pipeline_cls = test_model_param["pipeline_cls"] diff --git a/tests/tensorflow/experimental/test_models/resnet.py b/tests/tensorflow/experimental/test_models/resnet.py index ec5bb897895..e78beeb727f 100644 --- a/tests/tensorflow/experimental/test_models/resnet.py +++ b/tests/tensorflow/experimental/test_models/resnet.py @@ -212,7 +212,8 @@ def get_stochastic_depth_rate(init_rate, i, n): """ if init_rate is not None: if init_rate < 0 or init_rate > 1: - raise ValueError("Initial drop rate must be within 0 and 1.") + msg = "Initial drop rate must be within 0 and 1." + raise ValueError(msg) rate = init_rate * float(i) / n else: rate = None @@ -909,7 +910,8 @@ def __init__( x = self._norm(axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon, trainable=bn_trainable)(x) x = get_activation(activation, use_keras_layer=True)(x) else: - raise ValueError("Stem type {} not supported.".format(stem_type)) + msg = f"Stem type {stem_type} not supported." + raise ValueError(msg) if replace_stem_max_pool: x = tf.keras.layers.Conv2D( @@ -934,7 +936,8 @@ def __init__( elif spec[0] == "bottleneck": block_fn = BottleneckBlock else: - raise ValueError("Block fn `{}` is not supported.".format(spec[0])) + msg = f"Block fn `{spec[0]}` is not supported." + raise ValueError(msg) x = self._block_group( inputs=x, filters=int(spec[1] * self._depth_multiplier), @@ -942,7 +945,7 @@ def __init__( block_fn=block_fn, block_repeats=spec[2], stochastic_depth_drop_rate=get_stochastic_depth_rate(self._init_stochastic_depth_rate, i + 2, 5), - name="block_group_l{}".format(i + 2), + name=f"block_group_l{i + 2}", ) endpoints[str(i + 2)] = x diff --git a/tests/tensorflow/helpers.py b/tests/tensorflow/helpers.py index f1d78074cb9..073151e9b68 100644 --- a/tests/tensorflow/helpers.py +++ b/tests/tensorflow/helpers.py @@ -124,7 +124,8 @@ def _to_numpy(cls, tensor: TensorType) -> Union[np.ndarray, numbers.Number]: return tensor.numpy() if isinstance(tensor, (np.ndarray, numbers.Number)): return tensor - raise Exception(f"Tensor must be numbers.Number, np.ndarray, tf.Tensor or tf.Variable, not {type(tensor)}") + msg = f"Tensor must be numbers.Number, np.ndarray, tf.Tensor or tf.Variable, not {type(tensor)}" + raise Exception(msg) class MockCOCODatasetBuilder(COCODatasetBuilder): diff --git a/tests/tensorflow/quantization/test_algorithm_quantization.py b/tests/tensorflow/quantization/test_algorithm_quantization.py index b1e3716937d..906bdd272a9 100644 --- a/tests/tensorflow/quantization/test_algorithm_quantization.py +++ b/tests/tensorflow/quantization/test_algorithm_quantization.py @@ -250,7 +250,7 @@ def get_quantize_inputs_test_model(input_shapes): inputs = [] for i, input_shape in enumerate(input_shapes): - inputs.append(tf.keras.Input(shape=input_shape[1:], name="input_{}".format(i + 1))) + inputs.append(tf.keras.Input(shape=input_shape[1:], name=f"input_{i + 1}")) input_1, input_2, input_3, input_4, input_5 = inputs diff --git a/tests/tensorflow/quantization/test_unified_scales.py b/tests/tensorflow/quantization/test_unified_scales.py index 006afaacc4d..88c2a5535e9 100644 --- a/tests/tensorflow/quantization/test_unified_scales.py +++ b/tests/tensorflow/quantization/test_unified_scales.py @@ -24,7 +24,7 @@ def get_single_concat_test_model(input_shapes): inputs = [] for i, input_shape in enumerate(input_shapes): - inputs.append(tf.keras.Input(shape=input_shape[1:], name="input_{}".format(i + 1))) + inputs.append(tf.keras.Input(shape=input_shape[1:], name=f"input_{i + 1}")) input_1, input_2 = inputs @@ -39,7 +39,7 @@ def get_single_concat_test_model(input_shapes): def get_double_concat_test_model(input_shapes): inputs = [] for i, input_shape in enumerate(input_shapes): - inputs.append(tf.keras.Input(shape=input_shape[1:], name="input_{}".format(i + 1))) + inputs.append(tf.keras.Input(shape=input_shape[1:], name=f"input_{i + 1}")) input_1, input_2 = inputs @@ -55,7 +55,7 @@ def get_double_concat_test_model(input_shapes): def get_unet_like_test_model(input_shapes): inputs = [] for i, input_shape in enumerate(input_shapes): - inputs.append(tf.keras.Input(shape=input_shape[1:], name="input_{}".format(i + 1))) + inputs.append(tf.keras.Input(shape=input_shape[1:], name=f"input_{i + 1}")) input_1, _ = inputs @@ -166,7 +166,7 @@ def test_shared_op_unified_scales(target_device): def get_eltwise_quantizer_linking_test_model(input_shapes): inputs = [] for i, input_shape in enumerate(input_shapes): - inputs.append(tf.keras.Input(shape=input_shape[1:], name="input_{}".format(i + 1))) + inputs.append(tf.keras.Input(shape=input_shape[1:], name=f"input_{i + 1}")) input_1, input_2 = inputs diff --git a/tests/tensorflow/sparsity/rb/test_components.py b/tests/tensorflow/sparsity/rb/test_components.py index 7f57a2a388e..24b2427ec7d 100644 --- a/tests/tensorflow/sparsity/rb/test_components.py +++ b/tests/tensorflow/sparsity/rb/test_components.py @@ -119,7 +119,7 @@ def test_distributed_masks_are_equal(quantization, mirrored_strategy): model.fit(dataset, epochs=1, validation_split=0, callbacks=[compression_callbacks]) # Check seeds in file - with open(MASKS_SEEDS_PATH, "r", encoding="utf8") as f: + with open(MASKS_SEEDS_PATH, encoding="utf8") as f: seeds = f.readlines() seeds_per_replica = defaultdict(list) for row in seeds: diff --git a/tests/tensorflow/test_models/mobilenet.py b/tests/tensorflow/test_models/mobilenet.py index e7ea1df4610..1be1576a8c5 100644 --- a/tests/tensorflow/test_models/mobilenet.py +++ b/tests/tensorflow/test_models/mobilenet.py @@ -75,7 +75,7 @@ def MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=1e-3): x = layers.Activation(activation="softmax", name="predictions")(x) # Create model. - model = tf.keras.Model(img_input, x, name="mobilenet_%0.2f_%s" % (alpha, rows)) + model = tf.keras.Model(img_input, x, name=f"mobilenet_{alpha:0.2f}_{rows}") return model @@ -95,20 +95,20 @@ def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplie if strides == (1, 1): x = inputs else: - x = layers.ZeroPadding2D(((0, 1), (0, 1)), name="conv_pad_%d" % block_id)(inputs) + x = layers.ZeroPadding2D(((0, 1), (0, 1)), name=f"conv_pad_{block_id}")(inputs) x = layers.DepthwiseConv2D( (3, 3), padding="same" if strides == (1, 1) else "valid", depth_multiplier=depth_multiplier, strides=strides, use_bias=False, - name="conv_dw_%d" % block_id, + name=f"conv_dw_{block_id}", )(x) - x = layers.BatchNormalization(axis=channel_axis, name="conv_dw_%d_bn" % block_id)(x) - x = layers.ReLU(6.0, name="conv_dw_%d_relu" % block_id)(x) + x = layers.BatchNormalization(axis=channel_axis, name=f"conv_dw_{block_id}_bn")(x) + x = layers.ReLU(6.0, name=f"conv_dw_{block_id}_relu")(x) x = layers.Conv2D( - pointwise_conv_filters, (1, 1), padding="same", use_bias=False, strides=(1, 1), name="conv_pw_%d" % block_id + pointwise_conv_filters, (1, 1), padding="same", use_bias=False, strides=(1, 1), name=f"conv_pw_{block_id}" )(x) - x = layers.BatchNormalization(axis=channel_axis, name="conv_pw_%d_bn" % block_id)(x) - return layers.ReLU(6.0, name="conv_pw_%d_relu" % block_id)(x) + x = layers.BatchNormalization(axis=channel_axis, name=f"conv_pw_{block_id}_bn")(x) + return layers.ReLU(6.0, name=f"conv_pw_{block_id}_relu")(x) diff --git a/tests/tensorflow/test_models/mobilenet_v2.py b/tests/tensorflow/test_models/mobilenet_v2.py index 26f5c02e478..fbf77814cb1 100644 --- a/tests/tensorflow/test_models/mobilenet_v2.py +++ b/tests/tensorflow/test_models/mobilenet_v2.py @@ -96,7 +96,7 @@ def MobileNetV2(input_shape=None, alpha=1.0): x = layers.Dense(NUM_CLASSES, activation="softmax", name="predictions")(x) # Create model. - model = tf.keras.Model(img_input, x, name="mobilenetv2_%0.2f_%s" % (alpha, rows)) + model = tf.keras.Model(img_input, x, name=f"mobilenetv2_{alpha:0.2f}_{rows}") return model @@ -109,7 +109,7 @@ def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id): pointwise_conv_filters = int(filters * alpha) pointwise_filters = _make_divisible(pointwise_conv_filters, 8) x = inputs - prefix = "block_{}_".format(block_id) + prefix = f"block_{block_id}_" if block_id: # Expand diff --git a/tests/tensorflow/test_models/nasnet.py b/tests/tensorflow/test_models/nasnet.py index 7aeb1e27c9a..90bcd4b6093 100644 --- a/tests/tensorflow/test_models/nasnet.py +++ b/tests/tensorflow/test_models/nasnet.py @@ -28,7 +28,8 @@ def NASNet( default_size=None, ): if backend.image_data_format() != "channels_last": - raise AttributeError('The input data format "channels_last" is only supported.') + msg = 'The input data format "channels_last" is only supported.' + raise AttributeError(msg) if default_size is None: default_size = 331 @@ -44,10 +45,11 @@ def NASNet( img_input = layers.Input(shape=input_shape) if penultimate_filters % (24 * (filter_multiplier**2)) != 0: - raise ValueError( + msg = ( "For NASNet-A models, the `penultimate_filters` must be a multiple " - "of 24 * (`filter_multiplier` ** 2). Current value: %d" % penultimate_filters + f"of 24 * (`filter_multiplier` ** 2). Current value: {penultimate_filters}" ) + raise ValueError(msg) channel_dim = -1 filters = penultimate_filters // 24 @@ -69,21 +71,21 @@ def NASNet( x, p = _reduction_a_cell(x, p, filters // filter_multiplier, block_id="stem_2") for i in range(num_blocks): - x, p = _normal_a_cell(x, p, filters, block_id="%d" % (i)) + x, p = _normal_a_cell(x, p, filters, block_id=f"{i}") - x, p0 = _reduction_a_cell(x, p, filters * filter_multiplier, block_id="reduce_%d" % (num_blocks)) + x, p0 = _reduction_a_cell(x, p, filters * filter_multiplier, block_id=f"reduce_{num_blocks}") p = p0 if not skip_reduction else p for i in range(num_blocks): - x, p = _normal_a_cell(x, p, filters * filter_multiplier, block_id="%d" % (num_blocks + i + 1)) + x, p = _normal_a_cell(x, p, filters * filter_multiplier, block_id=f"{num_blocks + i + 1}") - x, p0 = _reduction_a_cell(x, p, filters * filter_multiplier**2, block_id="reduce_%d" % (2 * num_blocks)) + x, p0 = _reduction_a_cell(x, p, filters * filter_multiplier**2, block_id=f"reduce_{2 * num_blocks}") p = p0 if not skip_reduction else p for i in range(num_blocks): - x, p = _normal_a_cell(x, p, filters * filter_multiplier**2, block_id="%d" % (2 * num_blocks + i + 1)) + x, p = _normal_a_cell(x, p, filters * filter_multiplier**2, block_id=f"{2 * num_blocks + i + 1}") x = layers.Activation("relu")(x) @@ -123,11 +125,11 @@ def NASNetLarge(input_shape=None): def _separable_conv_block(ip, filters, kernel_size=(3, 3), strides=(1, 1), block_id=None): channel_dim = -1 - with backend.name_scope("separable_conv_block_%s" % block_id): + with backend.name_scope(f"separable_conv_block_{block_id}"): x = layers.Activation("relu")(ip) if strides == (2, 2): x = layers.ZeroPadding2D( - padding=imagenet_utils.correct_pad(x, kernel_size), name="separable_conv_1_pad_%s" % block_id + padding=imagenet_utils.correct_pad(x, kernel_size), name=f"separable_conv_1_pad_{block_id}" )(x) conv_pad = "valid" else: @@ -136,25 +138,25 @@ def _separable_conv_block(ip, filters, kernel_size=(3, 3), strides=(1, 1), block filters, kernel_size, strides=strides, - name="separable_conv_1_%s" % block_id, + name=f"separable_conv_1_{block_id}", padding=conv_pad, use_bias=False, kernel_initializer="he_normal", )(x) x = layers.BatchNormalization( - axis=channel_dim, momentum=0.9997, epsilon=1e-3, name="separable_conv_1_bn_%s" % (block_id) + axis=channel_dim, momentum=0.9997, epsilon=1e-3, name=f"separable_conv_1_bn_{block_id}" )(x) x = layers.Activation("relu")(x) x = layers.SeparableConv2D( filters, kernel_size, - name="separable_conv_2_%s" % block_id, + name=f"separable_conv_2_{block_id}", padding="same", use_bias=False, kernel_initializer="he_normal", )(x) x = layers.BatchNormalization( - axis=channel_dim, momentum=0.9997, epsilon=1e-3, name="separable_conv_2_bn_%s" % (block_id) + axis=channel_dim, momentum=0.9997, epsilon=1e-3, name=f"separable_conv_2_bn_{block_id}" )(x) return x @@ -173,53 +175,53 @@ def _adjust_block(p, ip, filters, block_id=None): p = ip elif p_shape[img_dim] != ip_shape[img_dim]: - with backend.name_scope("adjust_reduction_block_%s" % block_id): - p = layers.Activation("relu", name="adjust_relu_1_%s" % block_id)(p) + with backend.name_scope(f"adjust_reduction_block_{block_id}"): + p = layers.Activation("relu", name=f"adjust_relu_1_{block_id}")(p) p1 = layers.AveragePooling2D( - (1, 1), strides=(2, 2), padding="valid", name="adjust_avg_pool_1_%s" % block_id + (1, 1), strides=(2, 2), padding="valid", name=f"adjust_avg_pool_1_{block_id}" )(p) p1 = layers.Conv2D( filters // 2, (1, 1), padding="same", use_bias=False, - name="adjust_conv_1_%s" % block_id, + name=f"adjust_conv_1_{block_id}", kernel_initializer="he_normal", )(p1) p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p) p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2) p2 = layers.AveragePooling2D( - (1, 1), strides=(2, 2), padding="valid", name="adjust_avg_pool_2_%s" % block_id + (1, 1), strides=(2, 2), padding="valid", name=f"adjust_avg_pool_2_{block_id}" )(p2) p2 = layers.Conv2D( filters // 2, (1, 1), padding="same", use_bias=False, - name="adjust_conv_2_%s" % block_id, + name=f"adjust_conv_2_{block_id}", kernel_initializer="he_normal", )(p2) p = layers.concatenate([p1, p2], axis=channel_dim) p = layers.BatchNormalization( - axis=channel_dim, momentum=0.9997, epsilon=1e-3, name="adjust_bn_%s" % block_id + axis=channel_dim, momentum=0.9997, epsilon=1e-3, name=f"adjust_bn_{block_id}" )(p) elif p_shape[channel_dim] != filters: - with backend.name_scope("adjust_projection_block_%s" % block_id): + with backend.name_scope(f"adjust_projection_block_{block_id}"): p = layers.Activation("relu")(p) p = layers.Conv2D( filters, (1, 1), strides=(1, 1), padding="same", - name="adjust_conv_projection_%s" % block_id, + name=f"adjust_conv_projection_{block_id}", use_bias=False, kernel_initializer="he_normal", )(p) p = layers.BatchNormalization( - axis=channel_dim, momentum=0.9997, epsilon=1e-3, name="adjust_bn_%s" % block_id + axis=channel_dim, momentum=0.9997, epsilon=1e-3, name=f"adjust_bn_{block_id}" )(p) return p @@ -227,7 +229,7 @@ def _adjust_block(p, ip, filters, block_id=None): def _normal_a_cell(ip, p, filters, block_id=None): channel_dim = -1 - with backend.name_scope("normal_A_block_%s" % block_id): + with backend.name_scope(f"normal_A_block_{block_id}"): p = _adjust_block(p, ip, filters, block_id) h = layers.Activation("relu")(ip) @@ -236,49 +238,45 @@ def _normal_a_cell(ip, p, filters, block_id=None): (1, 1), strides=(1, 1), padding="same", - name="normal_conv_1_%s" % block_id, + name=f"normal_conv_1_{block_id}", use_bias=False, kernel_initializer="he_normal", )(h) - h = layers.BatchNormalization( - axis=channel_dim, momentum=0.9997, epsilon=1e-3, name="normal_bn_1_%s" % block_id - )(h) + h = layers.BatchNormalization(axis=channel_dim, momentum=0.9997, epsilon=1e-3, name=f"normal_bn_1_{block_id}")( + h + ) with backend.name_scope("block_1"): - x1_1 = _separable_conv_block(h, filters, kernel_size=(5, 5), block_id="normal_left1_%s" % block_id) - x1_2 = _separable_conv_block(p, filters, block_id="normal_right1_%s" % block_id) - x1 = layers.add([x1_1, x1_2], name="normal_add_1_%s" % block_id) + x1_1 = _separable_conv_block(h, filters, kernel_size=(5, 5), block_id=f"normal_left1_{block_id}") + x1_2 = _separable_conv_block(p, filters, block_id=f"normal_right1_{block_id}") + x1 = layers.add([x1_1, x1_2], name=f"normal_add_1_{block_id}") with backend.name_scope("block_2"): - x2_1 = _separable_conv_block(p, filters, (5, 5), block_id="normal_left2_%s" % block_id) - x2_2 = _separable_conv_block(p, filters, (3, 3), block_id="normal_right2_%s" % block_id) - x2 = layers.add([x2_1, x2_2], name="normal_add_2_%s" % block_id) + x2_1 = _separable_conv_block(p, filters, (5, 5), block_id=f"normal_left2_{block_id}") + x2_2 = _separable_conv_block(p, filters, (3, 3), block_id=f"normal_right2_{block_id}") + x2 = layers.add([x2_1, x2_2], name=f"normal_add_2_{block_id}") with backend.name_scope("block_3"): - x3 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same", name="normal_left3_%s" % (block_id))(h) - x3 = layers.add([x3, p], name="normal_add_3_%s" % block_id) + x3 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same", name=f"normal_left3_{block_id}")(h) + x3 = layers.add([x3, p], name=f"normal_add_3_{block_id}") with backend.name_scope("block_4"): - x4_1 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same", name="normal_left4_%s" % (block_id))( - p - ) - x4_2 = layers.AveragePooling2D( - (3, 3), strides=(1, 1), padding="same", name="normal_right4_%s" % (block_id) - )(p) - x4 = layers.add([x4_1, x4_2], name="normal_add_4_%s" % block_id) + x4_1 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same", name=f"normal_left4_{block_id}")(p) + x4_2 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same", name=f"normal_right4_{block_id}")(p) + x4 = layers.add([x4_1, x4_2], name=f"normal_add_4_{block_id}") with backend.name_scope("block_5"): - x5 = _separable_conv_block(h, filters, block_id="normal_left5_%s" % block_id) - x5 = layers.add([x5, h], name="normal_add_5_%s" % block_id) + x5 = _separable_conv_block(h, filters, block_id=f"normal_left5_{block_id}") + x5 = layers.add([x5, h], name=f"normal_add_5_{block_id}") - x = layers.concatenate([p, x1, x2, x3, x4, x5], axis=channel_dim, name="normal_concat_%s" % block_id) + x = layers.concatenate([p, x1, x2, x3, x4, x5], axis=channel_dim, name=f"normal_concat_{block_id}") return x, ip def _reduction_a_cell(ip, p, filters, block_id=None): channel_dim = -1 - with backend.name_scope("reduction_A_block_%s" % block_id): + with backend.name_scope(f"reduction_A_block_{block_id}"): p = _adjust_block(p, ip, filters, block_id) h = layers.Activation("relu")(ip) @@ -287,46 +285,40 @@ def _reduction_a_cell(ip, p, filters, block_id=None): (1, 1), strides=(1, 1), padding="same", - name="reduction_conv_1_%s" % block_id, + name=f"reduction_conv_1_{block_id}", use_bias=False, kernel_initializer="he_normal", )(h) h = layers.BatchNormalization( - axis=channel_dim, momentum=0.9997, epsilon=1e-3, name="reduction_bn_1_%s" % block_id + axis=channel_dim, momentum=0.9997, epsilon=1e-3, name=f"reduction_bn_1_{block_id}" )(h) - h3 = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(h, 3), name="reduction_pad_1_%s" % block_id)(h) + h3 = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(h, 3), name=f"reduction_pad_1_{block_id}")(h) with backend.name_scope("block_1"): - x1_1 = _separable_conv_block(h, filters, (5, 5), strides=(2, 2), block_id="reduction_left1_%s" % block_id) - x1_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), block_id="reduction_right1_%s" % block_id) - x1 = layers.add([x1_1, x1_2], name="reduction_add_1_%s" % block_id) + x1_1 = _separable_conv_block(h, filters, (5, 5), strides=(2, 2), block_id=f"reduction_left1_{block_id}") + x1_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), block_id=f"reduction_right1_{block_id}") + x1 = layers.add([x1_1, x1_2], name=f"reduction_add_1_{block_id}") with backend.name_scope("block_2"): - x2_1 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding="valid", name="reduction_left2_%s" % block_id)( - h3 - ) - x2_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), block_id="reduction_right2_%s" % block_id) - x2 = layers.add([x2_1, x2_2], name="reduction_add_2_%s" % block_id) + x2_1 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding="valid", name=f"reduction_left2_{block_id}")(h3) + x2_2 = _separable_conv_block(p, filters, (7, 7), strides=(2, 2), block_id=f"reduction_right2_{block_id}") + x2 = layers.add([x2_1, x2_2], name=f"reduction_add_2_{block_id}") with backend.name_scope("block_3"): - x3_1 = layers.AveragePooling2D( - (3, 3), strides=(2, 2), padding="valid", name="reduction_left3_%s" % block_id - )(h3) - x3_2 = _separable_conv_block(p, filters, (5, 5), strides=(2, 2), block_id="reduction_right3_%s" % block_id) - x3 = layers.add([x3_1, x3_2], name="reduction_add3_%s" % block_id) + x3_1 = layers.AveragePooling2D((3, 3), strides=(2, 2), padding="valid", name=f"reduction_left3_{block_id}")( + h3 + ) + x3_2 = _separable_conv_block(p, filters, (5, 5), strides=(2, 2), block_id=f"reduction_right3_{block_id}") + x3 = layers.add([x3_1, x3_2], name=f"reduction_add3_{block_id}") with backend.name_scope("block_4"): - x4 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same", name="reduction_left4_%s" % block_id)( - x1 - ) + x4 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same", name=f"reduction_left4_{block_id}")(x1) x4 = layers.add([x2, x4]) with backend.name_scope("block_5"): - x5_1 = _separable_conv_block(x1, filters, (3, 3), block_id="reduction_left4_%s" % block_id) - x5_2 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding="valid", name="reduction_right5_%s" % block_id)( - h3 - ) - x5 = layers.add([x5_1, x5_2], name="reduction_add4_%s" % block_id) + x5_1 = _separable_conv_block(x1, filters, (3, 3), block_id=f"reduction_left4_{block_id}") + x5_2 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding="valid", name=f"reduction_right5_{block_id}")(h3) + x5 = layers.add([x5_1, x5_2], name=f"reduction_add4_{block_id}") - x = layers.concatenate([x2, x3, x4, x5], axis=channel_dim, name="reduction_concat_%s" % block_id) + x = layers.concatenate([x2, x3, x4, x5], axis=channel_dim, name=f"reduction_concat_{block_id}") return x, ip diff --git a/tests/tensorflow/test_sanity_sample.py b/tests/tensorflow/test_sanity_sample.py index 61a6f94568a..b9fc0c6c7e6 100644 --- a/tests/tensorflow/test_sanity_sample.py +++ b/tests/tensorflow/test_sanity_sample.py @@ -57,7 +57,7 @@ def run_around_tests(): def convert_to_argv(args): - return " ".join(key if val is None else "{} {}".format(key, val) for key, val in args.items()).split() + return " ".join(key if val is None else f"{key} {val}" for key, val in args.items()).split() SAMPLE_TYPES = [ @@ -141,7 +141,8 @@ def get_sample_fn(sample_type, modes): variants.append(key) if len(variants) != 1: - raise Exception("Can not choose a function for given arguments") + msg = "Can not choose a function for given arguments" + raise Exception(msg) return SAMPLES[sample_type][variants[0]] @@ -153,7 +154,7 @@ def generate_config_params(): dataset_names, dataset_types = zip(*DATASETS[sample_type]) for params_id, params in enumerate(zip(config_paths, dataset_names, dataset_types, batch_sizes)): - config_params.append((sample_type, *params, "{}_{}".format(sample_id, params_id))) + config_params.append((sample_type, *params, f"{sample_id}_{params_id}")) return config_params @@ -352,7 +353,7 @@ def test_export_with_resume(_config, tmp_path, export_format, _case_common_dirs) "--config": config_factory.serialize(), "--log-dir": tmp_path, "--resume": ckpt_path, - "--to-{}".format(export_format): export_path, + f"--to-{export_format}": export_path, } main = get_sample_fn(_config["sample_type"], modes=["export"]) diff --git a/tests/tensorflow/test_sota_checkpoints.py b/tests/tensorflow/test_sota_checkpoints.py index 0531bfb65ca..b0972123fab 100644 --- a/tests/tensorflow/test_sota_checkpoints.py +++ b/tests/tensorflow/test_sota_checkpoints.py @@ -140,7 +140,8 @@ def read_reference_file(ref_path: Path) -> List[EvalRunParamsStruct]: model_dict = datasets[dataset_name] for model_name, sample_dict in model_dict["topologies"].items(): if model_name in model_names: - raise RuntimeError(f"Model name {model_name} is not unique.") + msg = f"Model name {model_name} is not unique." + raise RuntimeError(msg) model_names.append(model_name) batch = sample_dict.get("batch_per_gpu") resume = sample_dict.get("resume") @@ -359,7 +360,8 @@ def get_weight_params( return {"weights": sota_checkpoints_dir / eval_test_struct.weights} elif PRETRAINED_PARAM_AVAILABILITY[eval_test_struct.sample_type]: return {"pretrained": True} - raise RuntimeError("Incorrect config") + msg = "Incorrect config" + raise RuntimeError(msg) @staticmethod def get_env(): diff --git a/tests/tensorflow/test_weekly.py b/tests/tensorflow/test_weekly.py index 27aa7d31049..33177bc4713 100644 --- a/tests/tensorflow/test_weekly.py +++ b/tests/tensorflow/test_weekly.py @@ -244,7 +244,8 @@ def _params(request, tmp_path_factory, dataset_dir, models_dir, weekly_tests): if models_dir: args["weights"] = os.path.join(models_dir, args["weights"]) if not os.path.exists(args["weights"]): - raise FileExistsError("Weights file does not exist: {}".format(args["weights"])) + msg = "Weights file does not exist: {}".format(args["weights"]) + raise FileExistsError(msg) else: del args["weights"] if execution_arg: @@ -276,7 +277,7 @@ def run_sample(tc, args): actual_acc = get_actual_acc(args["metrics-dump"]) ref_acc = tc["expected_accuracy"] assert actual_acc == approx( - ref_acc, abs=tc["absolute_tolerance_{}".format(mode)] + ref_acc, abs=tc[f"absolute_tolerance_{mode}"] ), "Test accuracy doesn't meet the expected accuracy within threshold." diff --git a/tests/torch/composite/test_sparsity_quantization.py b/tests/torch/composite/test_sparsity_quantization.py index 749702978bb..dba6e512ace 100644 --- a/tests/torch/composite/test_sparsity_quantization.py +++ b/tests/torch/composite/test_sparsity_quantization.py @@ -60,7 +60,7 @@ def test_can_quantize_inputs_for_sparsity_plus_quantization(): assert isinstance(nncf_module.pre_ops["1"].op, SymmetricQuantizer) input_quantizer = get_all_modules(sparse_quantized_model)[ - f"BasicConvTestModel/" f"NNCFNetworkInterface[_nncf]/ModuleDict[{EXTERNAL_QUANTIZERS_STORAGE_NAME}]" + f"BasicConvTestModel/NNCFNetworkInterface[_nncf]/ModuleDict[{EXTERNAL_QUANTIZERS_STORAGE_NAME}]" ] assert len(input_quantizer) == 1 diff --git a/tests/torch/extensions_build_checks.py b/tests/torch/extensions_build_checks.py index eecee4dfe52..0851a1a06d2 100644 --- a/tests/torch/extensions_build_checks.py +++ b/tests/torch/extensions_build_checks.py @@ -16,7 +16,8 @@ if __name__ == "__main__": if len(sys.argv) != 2: - raise nncf.ValidationError("Must be run with target extensions build mode") + msg = "Must be run with target extensions build mode" + raise nncf.ValidationError(msg) mode = sys.argv[1] if mode == "cpu": # Do not remove - the import here is for testing purposes. @@ -32,4 +33,5 @@ os.environ["TORCH_CUDA_ARCH_LIST"] = "7.5+PTX" force_build_cuda_extensions() else: - raise nncf.ValidationError("Invalid mode type!") + msg = "Invalid mode type!" + raise nncf.ValidationError(msg) diff --git a/tests/torch/fx/helpers.py b/tests/torch/fx/helpers.py index b51f6da16f4..245bd7cc8c5 100644 --- a/tests/torch/fx/helpers.py +++ b/tests/torch/fx/helpers.py @@ -56,7 +56,7 @@ def prepare_tiny_imagenet_200(dataset_dir: Path): return val_annotations_file = val_data_dir / "val_annotations.txt" - with open(val_annotations_file, "r") as f: + with open(val_annotations_file) as f: val_annotation_data = map(lambda line: line.split("\t")[:2], f.readlines()) for image_filename, image_label in val_annotation_data: from_image_filepath = val_images_dir / image_filename diff --git a/tests/torch/fx/test_fast_bias_correction.py b/tests/torch/fx/test_fast_bias_correction.py index cf932638b64..af63667a337 100644 --- a/tests/torch/fx/test_fast_bias_correction.py +++ b/tests/torch/fx/test_fast_bias_correction.py @@ -60,7 +60,8 @@ def check_bias(model: torch.fx.GraphModule, ref_bias: list): # TODO(AlexanderDokuchaev): return atol=0.0001 after fix 109189 assert torch.all(torch.isclose(bias_value, ref_bias, atol=0.02)), f"{bias_value} != {ref_bias}" return - raise ValueError("Not found node with bias") + msg = "Not found node with bias" + raise ValueError(msg) @pytest.mark.cuda diff --git a/tests/torch/fx/test_quantizer.py b/tests/torch/fx/test_quantizer.py index 7015182fe4e..dc9655e1eeb 100644 --- a/tests/torch/fx/test_quantizer.py +++ b/tests/torch/fx/test_quantizer.py @@ -229,7 +229,8 @@ def test_OVQuantizer_TorchAOSharedQuantizationSpec_handling( assert isinstance(actual_annotation[annotation.edge_or_node], TorchAOQuantizationSpec) break else: - raise RuntimeError(f"Node {unified_scale_node_names[1]} should be annotated as quantizable") + msg = f"Node {unified_scale_node_names[1]} should be annotated as quantizable" + raise RuntimeError(msg) prepared_model(example_input) ao_quantized_model = convert_pt2e(prepared_model) @@ -244,4 +245,5 @@ def test_OVQuantizer_TorchAOSharedQuantizationSpec_handling( if nodes_visited == 2: break else: - raise RuntimeError(f"Quantizers was not found for the unified scales pair {unified_scale_node_names}") + msg = f"Quantizers was not found for the unified scales pair {unified_scale_node_names}" + raise RuntimeError(msg) diff --git a/tests/torch/fx/test_statistics_caching.py b/tests/torch/fx/test_statistics_caching.py index 46712cf2869..70dff57db8c 100644 --- a/tests/torch/fx/test_statistics_caching.py +++ b/tests/torch/fx/test_statistics_caching.py @@ -25,4 +25,4 @@ def get_statistics_aggregator(self): return FXStatisticsAggregator(None) def _create_dummy_min_max_tensor(self) -> Tensor: - return Tensor(torch.zeros((3))), Tensor(torch.ones((3))) + return Tensor(torch.zeros(3)), Tensor(torch.ones(3)) diff --git a/tests/torch/helpers.py b/tests/torch/helpers.py index 94f655df649..815a026f73c 100644 --- a/tests/torch/helpers.py +++ b/tests/torch/helpers.py @@ -108,9 +108,8 @@ def create_grouped_conv( in_channels, out_channels, kernel_size, groups, weight_init=1, bias_init=0, padding=0, stride=1 ): if in_channels % groups != 0 or out_channels % groups != 0: - raise nncf.ValidationError( - "Cannot create grouped convolution. Either `in_channels` or `out_channels` are not divisible by `groups`" - ) + msg = "Cannot create grouped convolution. Either `in_channels` or `out_channels` are not divisible by `groups`" + raise nncf.ValidationError(msg) conv = nn.Conv2d(in_channels, out_channels, kernel_size, groups=groups, padding=padding, stride=stride) fill_conv_weight(conv, weight_init) fill_bias(conv, bias_init) @@ -398,7 +397,8 @@ def _to_numpy(cls, tensor: TensorType) -> Union[np.ndarray, numbers.Number]: return tensor.cpu().detach().numpy() if isinstance(tensor, (np.ndarray, numbers.Number)): return tensor - raise Exception(f"Tensor must be np.ndarray or torch.Tensor, not {type(tensor)}") + msg = f"Tensor must be np.ndarray or torch.Tensor, not {type(tensor)}" + raise Exception(msg) def create_compressed_model_and_algo_for_test( diff --git a/tests/torch/models_hub_test/common.py b/tests/torch/models_hub_test/common.py index eb9a08f2224..a6ac06b62a5 100644 --- a/tests/torch/models_hub_test/common.py +++ b/tests/torch/models_hub_test/common.py @@ -116,11 +116,10 @@ def get_models_list(path: Path) -> List[ModelInfo]: model_name, model_link, mark, reason = splitted if model_link == "none": model_link = None - assert mark in ["skip", "xfail"], "Incorrect failure mark for model info {}".format(model_info) + assert mark in ["skip", "xfail"], f"Incorrect failure mark for model info {model_info}" else: - raise nncf.ValidationError( - f"Incorrect model info `{model_info}`. It must contain either 1, 2 or 3 fields." - ) + msg = f"Incorrect model info `{model_info}`. It must contain either 1, 2 or 3 fields." + raise nncf.ValidationError(msg) models.append(ModelInfo(model_name, model_link, mark, reason)) return models diff --git a/tests/torch/modules/test_rnn.py b/tests/torch/modules/test_rnn.py index 315a44de116..75f7edb21e7 100644 --- a/tests/torch/modules/test_rnn.py +++ b/tests/torch/modules/test_rnn.py @@ -67,7 +67,7 @@ def get_param_names(bias: bool) -> List[str]: for d in range(custom_lstm.num_directions): for name in get_param_names(custom_lstm.bias): suffix = "_reverse" if d == 1 else "" - param_name = name + "_l{}{}".format(layer_idx, suffix) + param_name = name + f"_l{layer_idx}{suffix}" param = getattr(module_, param_name) getattr(custom_lstm, param_name).data.copy_(param.data) custom_lstm.to(device) @@ -418,7 +418,7 @@ def set_ref_lstm_weights( for name in cls.get_param_names(bias): suffix = "_reverse" if d == 1 else "" param = getattr(data, name) - param_name = name + "_l{}{}".format(layer_idx, suffix) + param_name = name + f"_l{layer_idx}{suffix}" getattr(nn_lstm, param_name).data.copy_(param[i].data) @classmethod diff --git a/tests/torch/nas/test_elastic_depth.py b/tests/torch/nas/test_elastic_depth.py index 1d9a26ac023..f71e6b09751 100644 --- a/tests/torch/nas/test_elastic_depth.py +++ b/tests/torch/nas/test_elastic_depth.py @@ -67,7 +67,7 @@ def __init__(self, depth=3): self.branch_with_blocks = nn.Sequential() for idx in range(depth): conv = create_conv(3, 3, 5, weight_init=idx + 1, bias_init=idx + 1, padding=2) - self.branch_with_blocks.add_module("conv{}".format(idx), conv) + self.branch_with_blocks.add_module(f"conv{idx}", conv) self.last_conv = create_conv(3, 1, 1) def forward(self, x): diff --git a/tests/torch/nncf_network/test_transformation_layout.py b/tests/torch/nncf_network/test_transformation_layout.py index d2881063368..2e989daa63d 100644 --- a/tests/torch/nncf_network/test_transformation_layout.py +++ b/tests/torch/nncf_network/test_transformation_layout.py @@ -162,7 +162,8 @@ def test_all_possible_combinations_of_commands_for_get_applied_commands( for applied_command in applied_commands.transformations ) if sum(map(int, eq_commands)) != 1: - raise RuntimeError(f"Command {command} has no pair in recovered commands") + msg = f"Command {command} has no pair in recovered commands" + raise RuntimeError(msg) @pytest.mark.parametrize("target_type", (TargetType.OPERATION_WITH_WEIGHTS, TargetType.OPERATOR_PRE_HOOK)) diff --git a/tests/torch/pruning/filter_pruning/test_legr.py b/tests/torch/pruning/filter_pruning/test_legr.py index d861d6e0a51..ebdf612de6c 100644 --- a/tests/torch/pruning/filter_pruning/test_legr.py +++ b/tests/torch/pruning/filter_pruning/test_legr.py @@ -58,7 +58,7 @@ def test_legr_coeffs_saving(tmp_path): assert compression_ctrl.ranking_coeffs == ref_ranking_coeffs # check that in specified file some coeffs are saved (1, 0 in case of not-legr) - with open(file_name, "r", encoding="utf8") as f: + with open(file_name, encoding="utf8") as f: saved_coeffs_in_file = json.load(f) assert all(ref_ranking_coeffs[key] == tuple(saved_coeffs_in_file[key]) for key in saved_coeffs_in_file) @@ -80,7 +80,7 @@ def test_legr_coeffs_save_and_load(tmp_path): _, compression_ctrl = create_compressed_model_and_algo_for_test(model, config) assert compression_ctrl.ranking_coeffs == ref_ranking_coeffs - with open(file_name_save, "r", encoding="utf8") as f: + with open(file_name_save, encoding="utf8") as f: saved_coeffs_in_file = json.load(f) assert all(ref_ranking_coeffs[key] == tuple(saved_coeffs_in_file[key]) for key in saved_coeffs_in_file) diff --git a/tests/torch/pruning/test_onnx_export.py b/tests/torch/pruning/test_onnx_export.py index 12b311a462b..d6fc541043e 100644 --- a/tests/torch/pruning/test_onnx_export.py +++ b/tests/torch/pruning/test_onnx_export.py @@ -71,7 +71,7 @@ def test_pruning_export_concat_model(tmp_path, prune_first, ref_shapes): onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path) for i in range(1, 5): - conv_name = "nncf_module.conv{}".format(i) + conv_name = f"nncf_module.conv{i}" check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1]) @@ -91,7 +91,7 @@ def test_pruning_export_eltwise_model(tmp_path, prune_first, ref_shapes): nncf_config["compression"]["pruning_init"] = 0.5 onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path) for i in range(1, 5): - conv_name = "nncf_module.conv{}".format(i) + conv_name = f"nncf_module.conv{i}" check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1]) @@ -111,7 +111,7 @@ def test_pruning_export_diffconvs_model(tmp_path, prune_first, ref_shapes): nncf_config["compression"]["pruning_init"] = 0.5 onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path) for i in range(1, 5): - conv_name = "nncf_module.conv{}".format(i) + conv_name = f"nncf_module.conv{i}" check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1]) diff --git a/tests/torch/ptq/test_fast_bias_correction.py b/tests/torch/ptq/test_fast_bias_correction.py index 0f7a8e14aaa..1fa76053605 100644 --- a/tests/torch/ptq/test_fast_bias_correction.py +++ b/tests/torch/ptq/test_fast_bias_correction.py @@ -59,7 +59,8 @@ def check_bias(model: NNCFNetwork, ref_bias: list): # TODO(AlexanderDokuchaev): return atol=0.0001 after fix 109189 assert torch.all(torch.isclose(bias_value, ref_bias, atol=0.02)), f"{bias_value} != {ref_bias}" return - raise ValueError("Not found node with bias") + msg = "Not found node with bias" + raise ValueError(msg) @pytest.mark.cuda @@ -88,4 +89,5 @@ def check_bias(model: NNCFNetwork, ref_bias: list): # TODO(AlexanderDokuchaev): return atol=0.0001 after fix 109189 assert torch.all(torch.isclose(bias_value, ref_bias, atol=0.02)), f"{bias_value} != {ref_bias}" return - raise ValueError("Not found node with bias") + msg = "Not found node with bias" + raise ValueError(msg) diff --git a/tests/torch/ptq/test_reducers_and_aggregators.py b/tests/torch/ptq/test_reducers_and_aggregators.py index 62126a8b9aa..1db544381b8 100644 --- a/tests/torch/ptq/test_reducers_and_aggregators.py +++ b/tests/torch/ptq/test_reducers_and_aggregators.py @@ -72,7 +72,8 @@ def cast_tensor(self, tensor, dtype: Dtype): return tensor.float() if dtype == Dtype.INTEGER: return tensor.int() - raise nncf.ValidationError(f"Invalid dtype: {dtype}. Supported dtypes are {Dtype.FLOAT} and {Dtype.INTEGER}") + msg = f"Invalid dtype: {dtype}. Supported dtypes are {Dtype.FLOAT} and {Dtype.INTEGER}" + raise nncf.ValidationError(msg) class TestCPUReducersAggregators(BaseTestReducersAggregators): diff --git a/tests/torch/ptq/test_statistics_caching.py b/tests/torch/ptq/test_statistics_caching.py index b534e6644b9..a1f9c2a9386 100644 --- a/tests/torch/ptq/test_statistics_caching.py +++ b/tests/torch/ptq/test_statistics_caching.py @@ -25,4 +25,4 @@ def get_statistics_aggregator(self): return PTStatisticsAggregator(None) def _create_dummy_min_max_tensor(self) -> Tensor: - return Tensor(torch.zeros((3))), Tensor(torch.ones((3))) + return Tensor(torch.zeros(3)), Tensor(torch.ones(3)) diff --git a/tests/torch/ptq/test_weights_compression.py b/tests/torch/ptq/test_weights_compression.py index 0889284b453..8ec06913ca8 100644 --- a/tests/torch/ptq/test_weights_compression.py +++ b/tests/torch/ptq/test_weights_compression.py @@ -47,7 +47,7 @@ class SequentialMatmulModel(nn.Module): def __init__(self): - super(SequentialMatmulModel, self).__init__() + super().__init__() self.main_values = [10000, 1000, 1, 10, 10000] self.layers = nn.ModuleList() diff --git a/tests/torch/qat/helpers.py b/tests/torch/qat/helpers.py index a8c5149bdab..7311cb9969b 100644 --- a/tests/torch/qat/helpers.py +++ b/tests/torch/qat/helpers.py @@ -36,7 +36,8 @@ def convert_quantization_mode(mode: Optional[str]) -> QuantizationScheme: return QuantizationScheme.SYMMETRIC if mode == "asymmetric": return QuantizationScheme.ASYMMETRIC - raise RuntimeError(f"Unknown quantization mode: {mode}") + msg = f"Unknown quantization mode: {mode}" + raise RuntimeError(msg) def convert_quantization_params(conf: Optional[Dict[str, Any]]) -> QuantizationParameters: @@ -63,7 +64,8 @@ def convert_overflow_fix_param(param: Optional[str]) -> OverflowFix: return OverflowFix.DISABLE if param == "first_layer_only": return OverflowFix.FIRST_LAYER - raise RuntimeError(f"Overflow fix param {param} is unknown.") + msg = f"Overflow fix param {param} is unknown." + raise RuntimeError(msg) def convert_quantization_preset(preset: str) -> QuantizationPreset: @@ -71,7 +73,8 @@ def convert_quantization_preset(preset: str) -> QuantizationPreset: return QuantizationPreset.PERFORMANCE if preset == "mixed": return QuantizationPreset.MIXED - raise RuntimeError(f"Preset {preset} is unknown.") + msg = f"Preset {preset} is unknown." + raise RuntimeError(msg) def get_range_init_type(config_quantization_params: Dict[str, Any]) -> RangeEstimatorParameters: diff --git a/tests/torch/qat/test_qat_object_detection.py b/tests/torch/qat/test_qat_object_detection.py index b84c814eb67..55ab3510e11 100644 --- a/tests/torch/qat/test_qat_object_detection.py +++ b/tests/torch/qat/test_qat_object_detection.py @@ -120,7 +120,7 @@ def get_datasets(config: SampleConfig) -> DatasetSet: test_data_loader, train_data_loader, _ = create_dataloaders(config) test_dataset = get_testing_dataset(config.dataset, config.test_anno, config.test_imgs, config) - logger.info("Loaded {} testing images".format(len(test_dataset))) + logger.info(f"Loaded {len(test_dataset)} testing images") if config.distributed: test_sampler = torch.utils.data.DistributedSampler(test_dataset, config.rank, config.world_size) else: @@ -198,7 +198,7 @@ def train( acc_drop = original_metric - current_metric logger.info(f"Metric: {current_metric}, FP32 diff: {acc_drop}") if accuracy_drop_is_acceptable(acc_drop): - logger.info(f"Accuracy is within 1 percent drop," f" pipeline is making early exit on epoch {epoch - 1}") + logger.info(f"Accuracy is within 1 percent drop, pipeline is making early exit on epoch {epoch - 1}") logger.info( f"Epochs in config: {config.epochs}, epochs trained: {epoch}, epochs saved: {config.epochs - epoch}" ) diff --git a/tests/torch/qat/test_qat_segmentation.py b/tests/torch/qat/test_qat_segmentation.py index 3a21cc06499..519b9f8bf61 100644 --- a/tests/torch/qat/test_qat_segmentation.py +++ b/tests/torch/qat/test_qat_segmentation.py @@ -91,7 +91,8 @@ def get_sample_config(quantization_config_path: Path, data_dir: Path, weights_di meta = datset_meta break else: - raise RuntimeError(f"Dataset for the config {str(quantization_config_path)} is unknown.") + msg = f"Dataset for the config {str(quantization_config_path)} is unknown." + raise RuntimeError(msg) weights_path = ( weights_dir / "segmentation" / meta["name"] / (quantization_config_path.stem.split("_int8")[0] + ".pth") @@ -208,14 +209,14 @@ def train( if config.distributed: datasets.train_data_loader.sampler.set_epoch(epoch) - logger.info(">>>> [Epoch: {0:d}] Validation".format(epoch)) + logger.info(f">>>> [Epoch: {epoch:d}] Validation") _, (_, current_miou) = val_obj.run_epoch(config.print_step) # best_metric = max(current_miou, best_metric) acc_drop = original_metric - current_miou best_miou = max(current_miou, best_miou) logger.info(f"Metric: {current_miou}, FP32 diff: {acc_drop}") if accuracy_drop_is_acceptable(acc_drop): - logger.info(f"Accuracy is within 1 percent drop," f" pipeline is making early exit on epoch {epoch - 1}") + logger.info(f"Accuracy is within 1 percent drop, pipeline is making early exit on epoch {epoch - 1}") logger.info( f"Epochs in config: {config.epochs}, epochs trained: {epoch}, epochs saved: {config.epochs - epoch}" ) @@ -224,10 +225,10 @@ def train( logger.info("Training pipeline is finished, accuracy was not recovered.") return acc_drop - logger.info(">>>> [Epoch: {0:d}] Training".format(epoch)) + logger.info(f">>>> [Epoch: {epoch:d}] Training") epoch_loss, (_, miou) = train_obj.run_epoch(config.print_step) - logger.info(">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}".format(epoch, epoch_loss, miou)) + logger.info(f">>>> [Epoch: {epoch:d}] Avg. loss: {epoch_loss:.4f} | Mean IoU: {miou:.4f}") lr_scheduler.step(epoch if not isinstance(lr_scheduler, ReduceLROnPlateau) else best_miou) diff --git a/tests/torch/quantization/test_algo_quantization.py b/tests/torch/quantization/test_algo_quantization.py index 0cd1933fbe6..16f43f8f924 100644 --- a/tests/torch/quantization/test_algo_quantization.py +++ b/tests/torch/quantization/test_algo_quantization.py @@ -219,7 +219,7 @@ def test_can_create_quant_loss_and_scheduler(): def get_path_to_keys(tmp_path, rank): - return "{}_{}".format(tmp_path, str(rank)) + return f"{tmp_path}_{str(rank)}" def activation_quantizers_dumping_worker(current_gpu, config, tmp_path): @@ -229,7 +229,7 @@ def activation_quantizers_dumping_worker(current_gpu, config, tmp_path): print(path) with open(path, "w", encoding="utf8") as f: for aq_id in qctrl.non_weight_quantizers: - f.writelines("%s\n" % str(aq_id)) + f.writelines(f"{str(aq_id)}\n") @pytest.mark.cuda @@ -245,10 +245,10 @@ def test_activation_quantizers_order_is_the_same__for_resnet50(tmp_path, runs_su activation_quantizers_dumping_worker, nprocs=ngpus_per_node, args=(config, tmp_path), join=True ) - with open(get_path_to_keys(tmp_path, 0), "r", encoding="utf8") as f: + with open(get_path_to_keys(tmp_path, 0), encoding="utf8") as f: ref_list = f.readlines() for i in range(1, ngpus_per_node): - with open(get_path_to_keys(tmp_path, i), "r", encoding="utf8") as f: + with open(get_path_to_keys(tmp_path, i), encoding="utf8") as f: curr_list = f.readlines() assert curr_list == ref_list diff --git a/tests/torch/quantization/test_autoq_precision_init.py b/tests/torch/quantization/test_autoq_precision_init.py index 3af30c37d82..44615efa947 100644 --- a/tests/torch/quantization/test_autoq_precision_init.py +++ b/tests/torch/quantization/test_autoq_precision_init.py @@ -179,7 +179,7 @@ def test_autoq_precision_init(_seed, dataset_dir, tmp_path, mocker, params): final_num_of_adjust_pad_ops = len(get_all_modules_by_type(model, "UpdatePaddingValue")) assert adjust_pad_creation_spy.call_count == final_num_of_adjust_pad_ops - path_to_dot = "{}_{}.dot".format(params.model_creator.__name__, params.config_builder.filename_suffix()) + path_to_dot = f"{params.model_creator.__name__}_{params.config_builder.filename_suffix()}.dot" graph_dir = os.path.join("quantized", "autoq") check_bitwidth_graph(algo_ctrl, model, path_to_dot, graph_dir) diff --git a/tests/torch/quantization/test_hawq_precision_init.py b/tests/torch/quantization/test_hawq_precision_init.py index 1c2cab50ff7..ef89abc66e2 100644 --- a/tests/torch/quantization/test_hawq_precision_init.py +++ b/tests/torch/quantization/test_hawq_precision_init.py @@ -113,7 +113,7 @@ def get_bitwidth_per_scope(model, all_quantizations=None): def compare_with_ref_if_exists(actual_state, path_to_ref): if os.path.exists(path_to_ref): - with open(path_to_ref, "r", encoding="utf8") as f: + with open(path_to_ref, encoding="utf8") as f: assert json.load(f) == actual_state else: with open(path_to_ref, "w", encoding="utf8") as f: @@ -388,7 +388,7 @@ def side_effect_fn(self, max_iter=500, tolerance=1e-5): mocked_trace.side_effect = side_effect_fn model, ctrl = create_compressed_model_and_algo_for_test(model, config) - path_to_dot = "{}_{}.dot".format(params.model_creator.__name__, config_builder.filename_suffix()) + path_to_dot = f"{params.model_creator.__name__}_{config_builder.filename_suffix()}.dot" graph_dir = os.path.join("quantized", "hawq") check_bitwidth_graph(ctrl, model, path_to_dot, graph_dir, add_flops=config_builder.should_add_flops) if config_builder.compression_ratio: @@ -589,7 +589,7 @@ def disable_quantizer_gradients(): def get_path_to_bitwidth_dump(tmp_path, rank): - out_file_path = tmp_path / "bitwidth_per_scope_gpu{}.pt".format(rank) + out_file_path = tmp_path / f"bitwidth_per_scope_gpu{rank}.pt" return out_file_path diff --git a/tests/torch/quantization/test_logarithm_scale.py b/tests/torch/quantization/test_logarithm_scale.py index 9770ce1ae7d..5ce396d9a23 100644 --- a/tests/torch/quantization/test_logarithm_scale.py +++ b/tests/torch/quantization/test_logarithm_scale.py @@ -96,6 +96,8 @@ def test_logarithm_scale_parameter(logarithm_scale_setting_1, logarithm_scale_se for k, v0 in sd0.items(): v1 = sd1[k] diff = (v1 - v0).abs().sum().item() / v1.numel() - assert diff < 1e-6, "symmetric {} logarithm_scales {} param {} is corrupted mean({}-{})={}".format( - symmetric, logarithm_scales, k, v0, v1, diff + err_msg = ( + f"symmetric {symmetric} logarithm_scales {logarithm_scales} param {k}" + f" is corrupted mean({v0}-{v1})={diff}" ) + assert diff < 1e-6, err_msg diff --git a/tests/torch/quantization/test_manual_precision_init.py b/tests/torch/quantization/test_manual_precision_init.py index 01871d7a990..3f99ed3fb48 100644 --- a/tests/torch/quantization/test_manual_precision_init.py +++ b/tests/torch/quantization/test_manual_precision_init.py @@ -178,7 +178,7 @@ def test_manual_single_conv(params): create_compressed_model_and_algo_for_test(model, config) else: model, ctrl = create_compressed_model_and_algo_for_test(model, config) - path_to_dot = "{}.dot".format(params.name) + path_to_dot = f"{params.name}.dot" graph_dir = os.path.join("quantized", "hawq") check_bitwidth_graph(ctrl, model, path_to_dot, graph_dir) @@ -245,7 +245,7 @@ def setup_init_spies(mocker): def check_precision_init(self, compression_ctrl: QuantizationController): for qid, quantizer in compression_ctrl.all_quantizations.items(): expected_bit = [ref_bit for (ref_qid, ref_bit) in self.ref_bits if ref_qid == qid][0] - assert quantizer.num_bits == expected_bit, "Unexpected number of bits for {}".format(str(qid)) + assert quantizer.num_bits == expected_bit, f"Unexpected number of bits for {str(qid)}" nncf_stats = compression_ctrl.statistics() actual_stats = nncf_stats.quantization diff --git a/tests/torch/quantization/test_range_init.py b/tests/torch/quantization/test_range_init.py index 8c27c47abed..57c5f55ab4a 100644 --- a/tests/torch/quantization/test_range_init.py +++ b/tests/torch/quantization/test_range_init.py @@ -88,9 +88,9 @@ def scale_signed_dumping_worker(gpu, ngpus_per_node, config, tmp_path): for layer in get_all_modules_by_type(quant_model, "SymmetricQuantizer").values(): act_sum += layer.scale.sum() ref_sum = 3720.864 - assert act_sum.item() == approx(ref_sum, 0.01), "sum of scales is not expected {} vs {} rank {}".format( - act_sum.item(), ref_sum, config.rank - ) + assert act_sum.item() == approx( + ref_sum, 0.01 + ), f"sum of scales is not expected {act_sum.item()} vs {ref_sum} rank {config.rank}" out_file_path = get_path_after_broadcast(tmp_path, config.rank) save_params(quant_model, out_file_path) @@ -112,12 +112,12 @@ def scale_signed_dumping_worker(gpu, ngpus_per_node, config, tmp_path): def get_path_path_after_train_iters(tmp_path, rank): - out_file_path = tmp_path / "scale_signed_after_1_train_iter_gpu{}.pt".format(rank) + out_file_path = tmp_path / f"scale_signed_after_1_train_iter_gpu{rank}.pt" return out_file_path def get_path_after_broadcast(tmp_path, rank): - out_file_path = tmp_path / "scale_signed_after_broadcast_gpu{}.pt".format(rank) + out_file_path = tmp_path / f"scale_signed_after_broadcast_gpu{rank}.pt" return out_file_path @@ -173,11 +173,12 @@ def generate_qp( elif target is QuantizerGroup.ACTIVATIONS: qip = ActivationQuantizationInsertionPoint(target_node_name=node_name, input_port_id=input_port_id) else: - raise nncf.InvalidQuantizerGroupError( + msg = ( f"Invalid quantizer group: {target}. " f"Supported groups are {QuantizerGroup.WEIGHTS}" f"and {QuantizerGroup.ACTIVATIONS}." ) + raise nncf.InvalidQuantizerGroupError(msg) return SingleConfigQuantizationPoint(qip, QuantizerConfig(), [node_name]) @@ -204,8 +205,8 @@ def check_sign_and_scale(model, ref_table): match = re.search(pattern, str(scope)) if match: assert isinstance(module, SymmetricQuantizer) - assert module.signed == ref_values[0], "sign is not matched for {}".format(str(scope)) - assert all(module.scale == ref_values[1]), "scale is not matched for {}".format(str(scope)) + assert module.signed == ref_values[0], f"sign is not matched for {str(scope)}" + assert all(module.scale == ref_values[1]), f"scale is not matched for {str(scope)}" @pytest.mark.parametrize("config_creator", (create_config, create_empty_config_without_init_section)) def test_scale_and_sign_init_for_quant_algo__without_init_section(self, wrap_dataloader, config_creator): @@ -559,27 +560,27 @@ class RangeInitTestCase: collector_name="min_max", weights_refs_symmetric=GranularityQuantizerRefs( per_channel=SymQuantizerScaleRef( - scale=torch.tensor((10000.0, 20000.0, 30000.0)).view(((3, 1, 1, 1))) + scale=torch.tensor((10000.0, 20000.0, 30000.0)).view((3, 1, 1, 1)) ), per_tensor=SymQuantizerScaleRef(scale=30000.0), ), weights_refs_assymetric=GranularityQuantizerRefs( per_channel=AsymQuantizerScaleRef( - input_low=torch.tensor((1.0, -20000.0, 3.0)).view(((3, 1, 1, 1))), - input_range=torch.tensor((9999.0, 19998.0, 29997.0)).view(((3, 1, 1, 1))), + input_low=torch.tensor((1.0, -20000.0, 3.0)).view((3, 1, 1, 1)), + input_range=torch.tensor((9999.0, 19998.0, 29997.0)).view((3, 1, 1, 1)), ), per_tensor=AsymQuantizerScaleRef(input_low=-20000.0, input_range=50000.0), ), activations_refs_symmetric=GranularityQuantizerRefs( per_channel=SymQuantizerScaleRef( - scale=torch.tensor((20000.0, 40000.0, 60000.0)).view(((1, 3, 1, 1))) + scale=torch.tensor((20000.0, 40000.0, 60000.0)).view((1, 3, 1, 1)) ), per_tensor=SymQuantizerScaleRef(scale=60000.0), ), activations_refs_assymetric=GranularityQuantizerRefs( per_channel=AsymQuantizerScaleRef( - input_low=torch.tensor((1.0, -40000.0, 3.0)).view(((1, 3, 1, 1))), - input_range=torch.tensor((19999.0, 39998.0, 59997.0)).view(((1, 3, 1, 1))), + input_low=torch.tensor((1.0, -40000.0, 3.0)).view((1, 3, 1, 1)), + input_range=torch.tensor((19999.0, 39998.0, 59997.0)).view((1, 3, 1, 1)), ), per_tensor=AsymQuantizerScaleRef(input_low=-40000.0, input_range=100000.0), ), @@ -588,27 +589,27 @@ class RangeInitTestCase: collector_name="mixed_min_max", weights_refs_symmetric=GranularityQuantizerRefs( per_channel=SymQuantizerScaleRef( - scale=torch.tensor((10000.0, 20000.0, 30000.0)).view(((3, 1, 1, 1))) + scale=torch.tensor((10000.0, 20000.0, 30000.0)).view((3, 1, 1, 1)) ), per_tensor=SymQuantizerScaleRef(scale=30000.0), ), weights_refs_assymetric=GranularityQuantizerRefs( per_channel=AsymQuantizerScaleRef( - input_low=torch.tensor((1.0, -20000.0, 3.0)).view(((3, 1, 1, 1))), - input_range=torch.tensor((9999.0, 19998.0, 29997.0)).view(((3, 1, 1, 1))), + input_low=torch.tensor((1.0, -20000.0, 3.0)).view((3, 1, 1, 1)), + input_range=torch.tensor((9999.0, 19998.0, 29997.0)).view((3, 1, 1, 1)), ), per_tensor=AsymQuantizerScaleRef(input_low=-20000.0, input_range=50000.0), ), activations_refs_symmetric=GranularityQuantizerRefs( per_channel=SymQuantizerScaleRef( - scale=torch.tensor((20000.0, 40000.0, 60000.0)).view(((1, 3, 1, 1))) + scale=torch.tensor((20000.0, 40000.0, 60000.0)).view((1, 3, 1, 1)) ), per_tensor=SymQuantizerScaleRef(scale=45000.0), ), activations_refs_assymetric=GranularityQuantizerRefs( per_channel=AsymQuantizerScaleRef( - input_low=torch.tensor((1.0, -40000.0, 3.0)).view(((1, 3, 1, 1))), - input_range=torch.tensor((19999.0, 39998.0, 59997.0)).view(((1, 3, 1, 1))), + input_low=torch.tensor((1.0, -40000.0, 3.0)).view((1, 3, 1, 1)), + input_range=torch.tensor((19999.0, 39998.0, 59997.0)).view((1, 3, 1, 1)), ), per_tensor=AsymQuantizerScaleRef(input_low=-30000.0, input_range=75000.0), ), @@ -617,27 +618,27 @@ class RangeInitTestCase: collector_name="mean_min_max", weights_refs_symmetric=GranularityQuantizerRefs( per_channel=SymQuantizerScaleRef( - scale=torch.tensor((10000.0, 20000.0, 30000.0)).view(((3, 1, 1, 1))) + scale=torch.tensor((10000.0, 20000.0, 30000.0)).view((3, 1, 1, 1)) ), per_tensor=SymQuantizerScaleRef(scale=30000.0), ), weights_refs_assymetric=GranularityQuantizerRefs( per_channel=AsymQuantizerScaleRef( - input_low=torch.tensor((1.0, -20000.0, 3.0)).view(((3, 1, 1, 1))), - input_range=torch.tensor((9999.0, 19998.0, 29997.0)).view(((3, 1, 1, 1))), + input_low=torch.tensor((1.0, -20000.0, 3.0)).view((3, 1, 1, 1)), + input_range=torch.tensor((9999.0, 19998.0, 29997.0)).view((3, 1, 1, 1)), ), per_tensor=AsymQuantizerScaleRef(input_low=-20000.0, input_range=50000.0), ), activations_refs_symmetric=GranularityQuantizerRefs( per_channel=SymQuantizerScaleRef( - scale=torch.tensor((15000.0, 30000.0, 45000.0)).view(((1, 3, 1, 1))) + scale=torch.tensor((15000.0, 30000.0, 45000.0)).view((1, 3, 1, 1)) ), per_tensor=SymQuantizerScaleRef(scale=45000.0), ), activations_refs_assymetric=GranularityQuantizerRefs( per_channel=AsymQuantizerScaleRef( - input_low=torch.tensor((1.5, -30000.0, 4.5)).view(((1, 3, 1, 1))), - input_range=torch.tensor((14998.5000, 29997.0000, 44995.5000)).view(((1, 3, 1, 1))), + input_low=torch.tensor((1.5, -30000.0, 4.5)).view((1, 3, 1, 1)), + input_range=torch.tensor((14998.5000, 29997.0000, 44995.5000)).view((1, 3, 1, 1)), ), per_tensor=AsymQuantizerScaleRef(input_low=-30000.0, input_range=75000.0), ), @@ -646,27 +647,27 @@ class RangeInitTestCase: collector_name="threesigma", weights_refs_symmetric=GranularityQuantizerRefs( per_channel=SymQuantizerScaleRef( - scale=torch.tensor((16120.1719, 32240.3438, 48360.5156)).view(((3, 1, 1, 1))) + scale=torch.tensor((16120.1719, 32240.3438, 48360.5156)).view((3, 1, 1, 1)) ), per_tensor=SymQuantizerScaleRef(scale=33780.2891), ), weights_refs_assymetric=GranularityQuantizerRefs( per_channel=AsymQuantizerScaleRef( - input_low=torch.tensor((-6119.1719, -32240.3438, -18357.5156)).view(((3, 1, 1, 1))), - input_range=torch.tensor((22239.3438, 44478.6875, 66718.0312)).view(((3, 1, 1, 1))), + input_low=torch.tensor((-6119.1719, -32240.3438, -18357.5156)).view((3, 1, 1, 1)), + input_range=torch.tensor((22239.3438, 44478.6875, 66718.0312)).view((3, 1, 1, 1)), ), per_tensor=AsymQuantizerScaleRef(input_low=-26279.2871, input_range=60059.5781), ), activations_refs_symmetric=GranularityQuantizerRefs( per_channel=SymQuantizerScaleRef( - scale=torch.tensor((21494.4707, 42988.9414, 64483.4141)).view(((1, 3, 1, 1))) + scale=torch.tensor((21494.4707, 42988.9414, 64483.4141)).view((1, 3, 1, 1)) ), per_tensor=SymQuantizerScaleRef(scale=52662.1367), ), activations_refs_assymetric=GranularityQuantizerRefs( per_channel=AsymQuantizerScaleRef( - input_low=torch.tensor((-8159.4707, -42988.9414, -24478.4141)).view(((1, 3, 1, 1))), - input_range=torch.tensor((29653.9414, 59307.8828, 88961.8281)).view(((1, 3, 1, 1))), + input_low=torch.tensor((-8159.4707, -42988.9414, -24478.4141)).view((1, 3, 1, 1)), + input_range=torch.tensor((29653.9414, 59307.8828, 88961.8281)).view((1, 3, 1, 1)), ), per_tensor=AsymQuantizerScaleRef(input_low=-42660.1367, input_range=95322.2734), ), @@ -675,27 +676,27 @@ class RangeInitTestCase: collector_name="percentile", weights_refs_symmetric=GranularityQuantizerRefs( per_channel=SymQuantizerScaleRef( - scale=torch.tensor((6789.3213, 13580.6416, 20367.9629)).view(((3, 1, 1, 1))) + scale=torch.tensor((6789.3213, 13580.6416, 20367.9629)).view((3, 1, 1, 1)) ), per_tensor=SymQuantizerScaleRef(scale=7776.0), ), weights_refs_assymetric=GranularityQuantizerRefs( per_channel=AsymQuantizerScaleRef( - input_low=torch.tensor((3210.6790, -13580.6416, 9632.0371)).view(((3, 1, 1, 1))), - input_range=torch.tensor((3578.6423, 7157.2837, 10735.9258)).view(((3, 1, 1, 1))), + input_low=torch.tensor((3210.6790, -13580.6416, 9632.0371)).view((3, 1, 1, 1)), + input_range=torch.tensor((3578.6423, 7157.2837, 10735.9258)).view((3, 1, 1, 1)), ), per_tensor=AsymQuantizerScaleRef(input_low=-740.6420, input_range=8516.6416), ), activations_refs_symmetric=GranularityQuantizerRefs( per_channel=SymQuantizerScaleRef( - scale=torch.tensor((9052.3213, 18108.0000, 27156.9629)).view(((1, 3, 1, 1))) + scale=torch.tensor((9052.3213, 18108.0000, 27156.9629)).view((1, 3, 1, 1)) ), per_tensor=SymQuantizerScaleRef(scale=10734.6426), ), activations_refs_assymetric=GranularityQuantizerRefs( per_channel=AsymQuantizerScaleRef( - input_low=torch.tensor((4280.6792, -18108.0000, 12842.0371)).view(((1, 3, 1, 1))), - input_range=torch.tensor((4771.6421, 9544.0000, 14314.9258)).view(((1, 3, 1, 1))), + input_low=torch.tensor((4280.6792, -18108.0000, 12842.0371)).view((1, 3, 1, 1)), + input_range=torch.tensor((4771.6421, 9544.0000, 14314.9258)).view((1, 3, 1, 1)), ), per_tensor=AsymQuantizerScaleRef(input_low=-988.0, input_range=11722.6426), ), @@ -704,27 +705,27 @@ class RangeInitTestCase: collector_name="mean_percentile", weights_refs_symmetric=GranularityQuantizerRefs( per_channel=SymQuantizerScaleRef( - scale=torch.tensor((9990.0010, 19980.0020, 29970.0039)).view(((3, 1, 1, 1))) + scale=torch.tensor((9990.0010, 19980.0020, 29970.0039)).view((3, 1, 1, 1)) ), per_tensor=SymQuantizerScaleRef(scale=29910.0039), ), weights_refs_assymetric=GranularityQuantizerRefs( per_channel=AsymQuantizerScaleRef( - input_low=torch.tensor((10.999, -19980.0, 32.997)).view(((3, 1, 1, 1))), - input_range=torch.tensor((9979.0020, 19958.0039, 29937.0078)).view(((3, 1, 1, 1))), + input_low=torch.tensor((10.999, -19980.0, 32.997)).view((3, 1, 1, 1)), + input_range=torch.tensor((9979.0020, 19958.0039, 29937.0078)).view((3, 1, 1, 1)), ), per_tensor=AsymQuantizerScaleRef(input_low=-19940.0020, input_range=49850.0078), ), activations_refs_symmetric=GranularityQuantizerRefs( per_channel=SymQuantizerScaleRef( - scale=torch.tensor((14985.0020, 29970.0039, 44955.0078)).view(((1, 3, 1, 1))) + scale=torch.tensor((14985.0020, 29970.0039, 44955.0078)).view((1, 3, 1, 1)) ), per_tensor=SymQuantizerScaleRef(scale=44865.0078), ), activations_refs_assymetric=GranularityQuantizerRefs( per_channel=AsymQuantizerScaleRef( - input_low=torch.tensor((16.498, -2.9970e04, 49.496)).view(((1, 3, 1, 1))), - input_range=torch.tensor((14968.5039, 29937.0078, 44905.5117)).view(((1, 3, 1, 1))), + input_low=torch.tensor((16.498, -2.9970e04, 49.496)).view((1, 3, 1, 1)), + input_range=torch.tensor((14968.5039, 29937.0078, 44905.5117)).view((1, 3, 1, 1)), ), per_tensor=AsymQuantizerScaleRef(input_low=-29910.0039, input_range=74775.0156), ), diff --git a/tests/torch/quantization/test_strip.py b/tests/torch/quantization/test_strip.py index c27409d38b2..8344781c3f8 100644 --- a/tests/torch/quantization/test_strip.py +++ b/tests/torch/quantization/test_strip.py @@ -110,7 +110,8 @@ def range_mode_to_args(range_mode: str) -> Tuple[bool, bool]: return True, False if range_mode == "narrow_range": return False, True - raise ValueError(f"{range_mode} is not supported.") + msg = f"{range_mode} is not supported." + raise ValueError(msg) @pytest.mark.parametrize("input_size", INPUT_TEST_SCALES, ids=_idfn) diff --git a/tests/torch/sample_test_validator.py b/tests/torch/sample_test_validator.py index 8bf0ab3a861..41076f6e5ba 100644 --- a/tests/torch/sample_test_validator.py +++ b/tests/torch/sample_test_validator.py @@ -32,7 +32,7 @@ def create_command_line(args: Dict[str, Any], sample_type: str, main_filename: str = "main.py") -> str: executable = EXAMPLES_DIR.joinpath("torch", sample_type, main_filename).as_posix() - cli_args = " ".join(key if (val is None or val is True) else "{} {}".format(key, val) for key, val in args.items()) + cli_args = " ".join(key if (val is None or val is True) else f"{key} {val}" for key, val in args.items()) return f"{sys.executable} {executable} {cli_args}" @@ -103,7 +103,7 @@ def get_metric_value_from_checkpoint( self, checkpoint_save_dir: str, checkpoint_name: Optional[str] = None, config_path: Optional[Path] = None ): checkpoint_path = self.get_checkpoint_path(checkpoint_save_dir, checkpoint_name, config_path) - assert os.path.exists(checkpoint_path), "Path to checkpoint {} does not exist".format(checkpoint_path) + assert os.path.exists(checkpoint_path), f"Path to checkpoint {checkpoint_path} does not exist" accuracy = torch.load(checkpoint_path)["best_acc1"] return accuracy diff --git a/tests/torch/sparsity/movement/test_training.py b/tests/torch/sparsity/movement/test_training.py index 9e25ce40496..16f09d20499 100644 --- a/tests/torch/sparsity/movement/test_training.py +++ b/tests/torch/sparsity/movement/test_training.py @@ -46,7 +46,7 @@ def _get_main_filename(self) -> str: def get_metric_value_from_checkpoint(self, checkpoint_save_dir: str) -> Dict[str, Union[float, int]]: checkpoint_path = self.get_checkpoint_path(checkpoint_save_dir) result_path = checkpoint_path / "all_results.json" - with open(result_path, "r", encoding="utf-8") as f: + with open(result_path, encoding="utf-8") as f: result = json.load(f) return result diff --git a/tests/torch/sparsity/movement/training_scripts/run_glue.py b/tests/torch/sparsity/movement/training_scripts/run_glue.py index 79c6cce221c..ec1d76ca9a8 100644 --- a/tests/torch/sparsity/movement/training_scripts/run_glue.py +++ b/tests/torch/sparsity/movement/training_scripts/run_glue.py @@ -66,7 +66,7 @@ def parse_args() -> Tuple[argparse.Namespace, TrainingArguments]: parser.add_argument( "--quick_check", action="store_true", - help="If set True, will train the model without pretrained weights on only " f"{quick_check_num} samples.", + help=f"If set True, will train the model without pretrained weights on only {quick_check_num} samples.", ) args, other_args = parser.parse_known_args() diff --git a/tests/torch/test_compressed_graph.py b/tests/torch/test_compressed_graph.py index 36c14e7e70f..259ce8dea32 100644 --- a/tests/torch/test_compressed_graph.py +++ b/tests/torch/test_compressed_graph.py @@ -158,7 +158,7 @@ def forward_fn(model, seq_len_, batch_size_, vocab_size_, batch_first_): def gen_packed_sequence(): seq_list = [] - seq_lens = torch.LongTensor((batch_size_)).random_(1, seq_len_ + 1).type(torch.int32).to(device) + seq_lens = torch.LongTensor(batch_size_).random_(1, seq_len_ + 1).type(torch.int32).to(device) seq_lens = torch.sort(seq_lens, descending=True).values for seq_size in seq_lens: seq_list.append(torch.LongTensor(seq_size.item()).random_(1, vocab_size_).to(device)) @@ -919,7 +919,7 @@ def prepare_potential_quantizer_graph(graph: PTNNCFGraph, quantizer_setup: Singl node_name = nncf_node.node_name if node_name in pre_hooked_quantizers_activations_attr: input_port_id, qconf_str = pre_hooked_quantizers_activations_attr[node_name] - label = "Quantizer: {}".format(qconf_str) + label = f"Quantizer: {qconf_str}" additional_node_attrs = dict(label=label, color="purple", id=nncf_node.node_id) pre_hook_quantizer_node_key = node_name + "|IN" + str(input_port_id) @@ -943,7 +943,7 @@ def prepare_potential_quantizer_graph(graph: PTNNCFGraph, quantizer_setup: Singl if node_name in post_hooked_quantizers_activations_attr: qconf_str = post_hooked_quantizers_activations_attr[node_name] - label = "Quantizer: {}".format(qconf_str) + label = f"Quantizer: {qconf_str}" additional_node_attrs = dict(label=label, color="purple", id=nncf_node.node_id) post_hook_quantizer_node_key = node_name + "|OUT" @@ -957,7 +957,7 @@ def prepare_potential_quantizer_graph(graph: PTNNCFGraph, quantizer_setup: Singl nx_graph.add_edge(node_key, post_hook_quantizer_node_key) if node_name in quantizers_weights_attr: - label = "Quantizer: {}".format(quantizers_weights_attr[node_name]) + label = f"Quantizer: {quantizers_weights_attr[node_name]}" weight_quantizer_node_key = node_name + "|WEIGHT" nx_graph.add_node(weight_quantizer_node_key, label=label, color="purple", id=nncf_node.node_id) nx_graph.add_edge(weight_quantizer_node_key, node_key) diff --git a/tests/torch/test_compression_lr_multiplier.py b/tests/torch/test_compression_lr_multiplier.py index 65ddff0d33f..92431e77cb4 100644 --- a/tests/torch/test_compression_lr_multiplier.py +++ b/tests/torch/test_compression_lr_multiplier.py @@ -101,7 +101,8 @@ def merge_configs(configs: List[NNCFConfig], use_algo_list: bool = True) -> NNCF if not use_algo_list: if len(algorithms) > 1: - raise Exception("If there is more than one algorithm you could use only use_algo_list=True") + msg = "If there is more than one algorithm you could use only use_algo_list=True" + raise Exception(msg) res_config["compression"] = algorithms[0] else: res_config["compression"] = algorithms @@ -275,7 +276,8 @@ def create_initialized_one_parameter_model_and_dataloader( elif parameter_cls is CompressionParameter: param = parameter_cls(data, requires_grad=init_requires_grad, compression_lr_multiplier=multiplier) else: - raise Exception(f"Unsupported parameter type: {parameter_cls}") + msg = f"Unsupported parameter type: {parameter_cls}" + raise Exception(msg) for setting_type, requires_grad in requires_grad_settings: if setting_type == "attr": @@ -283,7 +285,8 @@ def create_initialized_one_parameter_model_and_dataloader( elif setting_type == "fn": param.requires_grad_(requires_grad) else: - raise Exception(f"Unsupported setting type: {setting_type}") + msg = f"Unsupported setting type: {setting_type}" + raise Exception(msg) model = OneParameterModel(param) train_loader = DataLoader( diff --git a/tests/torch/test_compression_training.py b/tests/torch/test_compression_training.py index 9d931660977..2cd123548aa 100644 --- a/tests/torch/test_compression_training.py +++ b/tests/torch/test_compression_training.py @@ -72,9 +72,7 @@ def get_default_args(self, tmp_path): def _create_command_line(self, args): executable = self._sample_handler.get_executable() - cli_args = " ".join( - key if (val is None or val is True) else "{} {}".format(key, val) for key, val in args.items() - ) + cli_args = " ".join(key if (val is None or val is True) else f"{key} {val}" for key, val in args.items()) return f"{sys.executable} {executable} {cli_args}" @@ -168,7 +166,7 @@ def finalize(self, dataset_dir, tmp_path_factory, weekly_models_path) -> "Compre ) self.weights_path = self._get_weight_path(weekly_models_path) if self.weights_path is not None: - assert os.path.exists(self.weights_path), "Weights file does not exist: {}".format(self.weights_path) + assert os.path.exists(self.weights_path), f"Weights file does not exist: {self.weights_path}" checkpoint_save_dir = str(tmp_path_factory.mktemp("models")) self.checkpoint_save_dir = os.path.join(checkpoint_save_dir, self.execution_arg.replace("-", "_")) return self diff --git a/tests/torch/test_distributed_data_parallel_mode.py b/tests/torch/test_distributed_data_parallel_mode.py index ade5eed2507..dd304da2d5b 100644 --- a/tests/torch/test_distributed_data_parallel_mode.py +++ b/tests/torch/test_distributed_data_parallel_mode.py @@ -96,4 +96,5 @@ def test_is_ddp_freezing(waiting_time: float) -> None: for process in ctx.processes: if process.is_alive(): process.terminate() - raise TimeoutError("DDP wrapper may be freezing") + msg = "DDP wrapper may be freezing" + raise TimeoutError(msg) diff --git a/tests/torch/test_extensions_build.py b/tests/torch/test_extensions_build.py index b2697e70d62..3c08ab102f2 100644 --- a/tests/torch/test_extensions_build.py +++ b/tests/torch/test_extensions_build.py @@ -64,14 +64,14 @@ def test_force_cuda_build(tmp_path): mode = "cpu" command = Command( - "{} {}/extensions_build_checks.py {}".format(python_executable_with_venv, run_path, mode), + f"{python_executable_with_venv} {run_path}/extensions_build_checks.py {mode}", cwd=run_path, env=env_variables, ) command.run() version_command = Command( - '{} -c "import torch; print(torch.__version__)"'.format(python_executable_with_venv), + f'{python_executable_with_venv} -c "import torch; print(torch.__version__)"', cwd=run_path, env=env_variables, ) @@ -91,7 +91,7 @@ def test_force_cuda_build(tmp_path): mode = "cuda" command = Command( - "{} {}/extensions_build_checks.py {}".format(python_executable_with_venv, run_path, mode), + f"{python_executable_with_venv} {run_path}/extensions_build_checks.py {mode}", cwd=run_path, env=env_variables, ) diff --git a/tests/torch/test_graph_building.py b/tests/torch/test_graph_building.py index 5040770e58b..ea28dda1d04 100644 --- a/tests/torch/test_graph_building.py +++ b/tests/torch/test_graph_building.py @@ -186,8 +186,8 @@ def test_activation_shape_tracing(input_shape: Tuple[int, ...]): ).output_edges input_tensor_shapes = [x.tensor_shape for x in input_edges] output_tensor_shapes = [x.tensor_shape for x in output_edges] - assert input_tensor_shapes == ref_input_shapes, "Failed for node ID: {}".format(node_id) - assert output_tensor_shapes == ref_output_shapes, "Failed for node ID: {}".format(node_id) + assert input_tensor_shapes == ref_input_shapes, f"Failed for node ID: {node_id}" + assert output_tensor_shapes == ref_output_shapes, f"Failed for node ID: {node_id}" class ParallelEdgesModel(nn.Module): diff --git a/tests/torch/test_knowledge_distillation.py b/tests/torch/test_knowledge_distillation.py index 1aae902fda7..e1dfea89798 100644 --- a/tests/torch/test_knowledge_distillation.py +++ b/tests/torch/test_knowledge_distillation.py @@ -42,7 +42,7 @@ def get_device_str(inference_type: str, gpu_id: int): if inference_type == "cpu": return "cpu" if gpu_id is not None: - return "cuda:{}".format(gpu_id) + return f"cuda:{gpu_id}" return "cuda" diff --git a/tests/torch/test_layer_attributes.py b/tests/torch/test_layer_attributes.py index e666f33df89..bdcb87f47b6 100644 --- a/tests/torch/test_layer_attributes.py +++ b/tests/torch/test_layer_attributes.py @@ -78,7 +78,7 @@ def __init__( def __eq__(self, other: "RefNodeDesc"): eq_metatype = self.metatype_cls == other.metatype_cls if not eq_metatype: - print("metatype classes are different: {} vs {}".format(self.metatype_cls, other.metatype_cls)) + print(f"metatype classes are different: {self.metatype_cls} vs {other.metatype_cls}") eq_layer_attributes = self.layer_attributes == other.layer_attributes if self.layer_attributes_comparator is not None: eq_layer_attributes = self.layer_attributes_comparator(self.layer_attributes, other.layer_attributes) diff --git a/tests/torch/test_model_transformer.py b/tests/torch/test_model_transformer.py index 6bb32e6bf6b..e07157a6631 100644 --- a/tests/torch/test_model_transformer.py +++ b/tests/torch/test_model_transformer.py @@ -185,7 +185,8 @@ def hook(x): module = model.nncf.get_module_by_scope(insertion_point.module_scope) assert module.post_ops["0"] is hook else: - raise Exception(f"Not check order for {insertion_point.insertion_type}") + msg = f"Not check order for {insertion_point.insertion_type}" + raise Exception(msg) assert len(model.nncf._groups_vs_hooks_handlers[test_hook_group]) == 1 @@ -251,7 +252,8 @@ def test_pt_insertion_command(self, target_point: PTTargetPoint, multidevice: bo module = model.nncf.get_module_by_scope(insertion_point.module_scope) assert module.post_ops["0"] is hook else: - raise Exception(f"Not check order for {insertion_point.insertion_type}") + msg = f"Not check order for {insertion_point.insertion_type}" + raise Exception(msg) if isinstance(hook, nn.Module) and not multidevice: assert hook.to_device == get_model_device(model) @@ -365,7 +367,8 @@ def test_priority(self, target_type, trace_parameters, priority_type): module = model.nncf.get_containing_module(point.target_node_name) self.check_order(list(module.post_ops.values()), hook_list, order) else: - raise Exception(f"Not check order for {target_type}") + msg = f"Not check order for {target_type}" + raise Exception(msg) MERGE_PATTERN_TEST_CASES = ( @@ -575,7 +578,7 @@ def test_get_ip_graph_with_merged_operations(self, mock_graph_factory, dot_file_ data_dir: Path = TEST_ROOT / "torch/data/reference_graphs/pattern_merging" - path_to_dot_file = data_dir / "{}.dot".format(dot_file_name) + path_to_dot_file = data_dir / f"{dot_file_name}.dot" if os.getenv("NNCF_TEST_REGEN_DOT") is not None: if not os.path.exists(str(data_dir)): diff --git a/tests/torch/test_models/preact_resnet.py b/tests/torch/test_models/preact_resnet.py index e03b7cdaa6f..42181311663 100644 --- a/tests/torch/test_models/preact_resnet.py +++ b/tests/torch/test_models/preact_resnet.py @@ -123,7 +123,7 @@ def PreActResNet152(): def test(): net = PreActResNet18() - y = net((torch.randn(1, 3, 32, 32))) + y = net(torch.randn(1, 3, 32, 32)) print(y.size()) diff --git a/tests/torch/test_models/squeezenet.py b/tests/torch/test_models/squeezenet.py index 92a90ff0a2a..1bc2d12e859 100644 --- a/tests/torch/test_models/squeezenet.py +++ b/tests/torch/test_models/squeezenet.py @@ -38,7 +38,8 @@ class SqueezeNet(nn.Module): def __init__(self, version=1.0, num_classes=1000, dropout=0.5): super().__init__() if version not in [1.0, 1.1]: - raise ValueError("Unsupported SqueezeNet version {version}: 1.0 or 1.1 expected".format(version=version)) + msg = f"Unsupported SqueezeNet version {version}: 1.0 or 1.1 expected" + raise ValueError(msg) self.num_classes = num_classes if version == 1.0: self.features = nn.Sequential( diff --git a/tests/torch/test_models/ssd_mobilenet.py b/tests/torch/test_models/ssd_mobilenet.py index baea3b77e65..e8151d0746f 100644 --- a/tests/torch/test_models/ssd_mobilenet.py +++ b/tests/torch/test_models/ssd_mobilenet.py @@ -97,7 +97,8 @@ def forward(self, x): def build_ssd_mobilenet(cfg, size, num_classes, config): if size != 300: - raise ValueError("Only Mobilenet-SSD with input size 300 is supported") + msg = "Only Mobilenet-SSD with input size 300 is supported" + raise ValueError(msg) mobilenet_ssd = MobileNetSSD(num_classes, cfg) if config.basenet and (config.resuming_checkpoint_path is None) and (config.weights is None): diff --git a/tests/torch/test_models/swin.py b/tests/torch/test_models/swin.py index 20623e9bd4d..5dea312184b 100644 --- a/tests/torch/test_models/swin.py +++ b/tests/torch/test_models/swin.py @@ -262,7 +262,7 @@ def __init__( mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0) else: attn_mask = None diff --git a/tests/torch/test_sota_checkpoints.py b/tests/torch/test_sota_checkpoints.py index 299e89ce50b..d0579ccb482 100644 --- a/tests/torch/test_sota_checkpoints.py +++ b/tests/torch/test_sota_checkpoints.py @@ -116,7 +116,8 @@ def read_reference_file(ref_path: Path) -> List[EvalRunParamsStruct]: model_dict = datasets[dataset_name] for model_name, sample_dict in model_dict.items(): if model_name in model_names: - raise nncf.InternalError(f"Model name {model_name} is not unique.") + msg = f"Model name {model_name} is not unique." + raise nncf.InternalError(msg) model_names.append(model_name) param_list.append( EvalRunParamsStruct( diff --git a/tests/torch2/function_hook/graph/test_build_graph_mode.py b/tests/torch2/function_hook/graph/test_build_graph_mode.py index d1f1dd4c022..91c67cac325 100644 --- a/tests/torch2/function_hook/graph/test_build_graph_mode.py +++ b/tests/torch2/function_hook/graph/test_build_graph_mode.py @@ -178,7 +178,8 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return x.T if self.attr == ".mT": return x.mT - raise ValueError(f"Unexpected attribute: {self.attr}") + msg = f"Unexpected attribute: {self.attr}" + raise ValueError(msg) @pytest.mark.parametrize("attr", [".T", ".mT"]) diff --git a/tools/add_new_quantization_parameters.py b/tools/add_new_quantization_parameters.py index 7a21c8a3c3e..bbcb6b3496f 100644 --- a/tools/add_new_quantization_parameters.py +++ b/tools/add_new_quantization_parameters.py @@ -88,7 +88,7 @@ def main(argv): files_to_copy.append(pair) for src_file, dst_file in files_to_copy: - print("\nCopying {}".format(dst_file)) + print(f"\nCopying {dst_file}") copyfile(src_file, dst_file) diff --git a/tools/benchmark.py b/tools/benchmark.py index 27949cee498..cb4ba599810 100644 --- a/tools/benchmark.py +++ b/tools/benchmark.py @@ -46,7 +46,7 @@ def run_wall(layer, input_size_, device, runs, is_print=True, dtype=torch.float) fbtime = elapsed / runs * scale if is_print: - print("Forward&Backward: {0:.3f} {1}".format(fbtime, ctime)) + print(f"Forward&Backward: {fbtime:.3f} {ctime}") return {"forward + backward": fbtime} @@ -87,9 +87,8 @@ def run_profile(layer, input_size_, device, runs, forward_only=False, dtype=torc backward_average = backward_time / runs * scale print( - "Forward: min {0:.3f}{4} / avg {1:.3f}{4} | Backward: min {2:.3f}{4} / avg {3:.3f}{4}".format( - forward_min, forward_average, backward_min, backward_average, ctime - ) + f"Forward: min {forward_min:.3f}{ctime} / avg {forward_average:.3f}{ctime} |" + f" Backward: min {backward_min:.3f}{ctime} / avg {backward_average:.3f}{ctime}" ) return { @@ -103,7 +102,7 @@ def run_profile(layer, input_size_, device, runs, forward_only=False, dtype=torc def run_worker(gpu, world_size, layer, input_size_, runs, dtype=torch.float, output: List[Dict[str, int]] = None): dist.init_process_group(backend="nccl", init_method="tcp://127.0.0.1:8899", world_size=world_size, rank=gpu) - device = torch.device("cuda:%d" % gpu) + device = torch.device(f"cuda:{gpu}") torch.cuda.set_device(gpu) batch = (int)(input_size_[0] / world_size) diff --git a/tools/clip_dot.py b/tools/clip_dot.py index 68466f10f27..a62754c9796 100644 --- a/tools/clip_dot.py +++ b/tools/clip_dot.py @@ -43,7 +43,8 @@ def main(argv): break if start_key is None: - raise nncf.InternalError("Could not find the node with ID {} to start from!".format(args.start_id)) + msg = f"Could not find the node with ID {args.start_id} to start from!" + raise nncf.InternalError(msg) for edge in nx.edge_bfs(graph, start_key, orientation="ignore"): from_key, to_key, _ = edge diff --git a/tools/correct_checkpoint.py b/tools/correct_checkpoint.py index eb96d402e02..6066578c2c5 100644 --- a/tools/correct_checkpoint.py +++ b/tools/correct_checkpoint.py @@ -65,7 +65,7 @@ def main(argv): for k, v in sd.items(): new_k = replace_key_fn(k) if new_k != k: - print("{}\n{}\n\n".format(k, new_k)) + print(f"{k}\n{new_k}\n\n") new_sd[replace_key_fn(k)] = v pth["state_dict"] = new_sd diff --git a/tools/extract_ov_subgraph.py b/tools/extract_ov_subgraph.py index 21d2959950d..8ebb24430fd 100644 --- a/tools/extract_ov_subgraph.py +++ b/tools/extract_ov_subgraph.py @@ -257,7 +257,8 @@ def take_model_subgraph(xml_dict: Dict, source_node_name: str, distance: int): output_path = Path(args.output_path) if args.output_path is not None else None if distance <= 0: - raise ValueError("Distance should be positive") + msg = "Distance should be positive" + raise ValueError(msg) if output_path is None or output_path.suffix == "": output_filename = f"{input_path.stem}_{Path(node_name).stem}_{distance}.xml" @@ -271,7 +272,8 @@ def take_model_subgraph(xml_dict: Dict, source_node_name: str, distance: int): output_dir = output_path.parent if output_path.exists(): - raise ValueError(f"There is already and IR at {output_path}. Exiting.") + msg = f"There is already and IR at {output_path}. Exiting." + raise ValueError(msg) # Read IR xml as dict tree = dET.parse(input_path) diff --git a/tools/ir_utils.py b/tools/ir_utils.py index eaaed3622cb..45cfab0225f 100644 --- a/tools/ir_utils.py +++ b/tools/ir_utils.py @@ -80,7 +80,7 @@ def extract_params(buffer, all_parameters, layer, get_weight_shape_fn): weight_size = int(weight.get("size")) weight_data = get_blob(buffer, weight_offset, weight_size, weight_shape, precision) param = Parameter(weight_data, weight_size, weight_offset, weight_shape) - all_parameters["{}.weight".format(layer_name)] = param + all_parameters[f"{layer_name}.weight"] = param if biases is not None: bias_shape = [output_shape[1]] bias_size = int(biases.get("size")) @@ -88,7 +88,7 @@ def extract_params(buffer, all_parameters, layer, get_weight_shape_fn): bias_data = get_blob(buffer, bias_offset, bias_size, bias_shape, precision) bias_param = Parameter(bias_data, bias_size, bias_offset, bias_shape) - all_parameters["{}.bias".format(layer_name)] = bias_param + all_parameters[f"{layer_name}.bias"] = bias_param def get_blob(buffer, offset, size, shape, dtype=np.float32): diff --git a/tools/memory_monitor.py b/tools/memory_monitor.py index f230d95d03d..1a60b0152e4 100644 --- a/tools/memory_monitor.py +++ b/tools/memory_monitor.py @@ -97,7 +97,8 @@ def __init__( if include_child_processes is None: include_child_processes = True else: - raise ValueError("Unknown memory type to log") + msg = "Unknown memory type to log" + raise ValueError(msg) self.memory_unit = memory_unit self.include_child_processes = include_child_processes @@ -119,7 +120,8 @@ def start(self, at_exit_fn: Optional[Callable] = None) -> "MemoryMonitor": ``` """ if self._monitoring_in_progress: - raise Exception("Monitoring already in progress") + msg = "Monitoring already in progress" + raise Exception(msg) self._memory_values_queue = queue.Queue() self._monitoring_thread_should_stop = False @@ -217,7 +219,7 @@ def save_memory_plot(self, log_filepath: Path, plot_title: Optional[str] = "", f :param plot_title: A title to give to a plot. :param filename_suffix: A string suffix to give to the saved figure. """ - with open(log_filepath, "r") as f: + with open(log_filepath) as f: lines = f.readlines() time_values, memory_values = [], [] for line in lines[:-2]: @@ -252,7 +254,8 @@ def _monitor_memory(self): elif self.memory_type == MemoryType.SYSTEM: bytes_used = psutil.virtual_memory().total - psutil.virtual_memory().available else: - raise Exception("Unknown memory type to log") + msg = "Unknown memory type to log" + raise Exception(msg) if self._monitoring_thread_should_stop: break self._memory_values_queue.put((time.perf_counter(), bytes_used)) diff --git a/tools/visualize_compression_results.py b/tools/visualize_compression_results.py index fe7329c31c4..df7928c687d 100644 --- a/tools/visualize_compression_results.py +++ b/tools/visualize_compression_results.py @@ -57,13 +57,13 @@ def check_format(df): missing_columns = [col for col in EXPECTED_COLUMNS if col not in df.columns] if missing_columns: - raise RuntimeError(f"The following columns are missing: {missing_columns}") + msg = f"The following columns are missing: {missing_columns}" + raise RuntimeError(msg) missing_in_mode = [item for item in EXPECTED_IN_MODE_COLUMN if not any(df[MODE].str.contains(item))] if missing_in_mode: - raise RuntimeError( - f"The `{MODE}` column must have at least one entry that includes the following words: {missing_in_mode}" - ) + msg = f"The `{MODE}` column must have at least one entry that includes the following words: {missing_in_mode}" + raise RuntimeError(msg) def add_relative_metrics(df):