Skip to content

Commit

Permalink
Enable pyupgrade and flake8-errmsg rules in ruff (#3221)
Browse files Browse the repository at this point in the history
### Changes

https://docs.astral.sh/ruff/rules/#pyupgrade-up
https://docs.astral.sh/ruff/rules/#flake8-errmsg-em

Rules regarding annotations will be enableв in next pr
```
    "UP006", # non-pep585-annotation
    "UP007", # non-pep604-annotation-union
    "UP035", # deprecated-import
    "UP038", # non-pep604-isinstance
    "UP045", # non-pep604-annotation-optional
```
  • Loading branch information
AlexanderDokuchaev authored Jan 31, 2025
1 parent 41a79c8 commit f21215b
Show file tree
Hide file tree
Showing 411 changed files with 2,195 additions and 1,679 deletions.
3 changes: 2 additions & 1 deletion custom_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ def get_custom_version() -> str:
r"^__version__ = ['\"]((\d+\.\d+\.\d+)([^'\"]*))['\"]", Path(NNCF_VERSION_FILE).read_text(), re.M
)
if not version_match:
raise RuntimeError("Unable to find version string.")
msg = "Unable to find version string."
raise RuntimeError(msg)

version_full = version_match.group(1)
version_value = version_match.group(2)
Expand Down
2 changes: 1 addition & 1 deletion examples/common/paths.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
def configure_paths(config: SampleConfig, run_name: str):
config.name = run_name
d = datetime.datetime.now()
run_id = "{:%Y-%m-%d__%H-%M-%S}".format(d)
run_id = f"{d:%Y-%m-%d__%H-%M-%S}"
log_dir = Path(config.log_dir) / run_name / run_id
log_dir.mkdir(parents=True, exist_ok=True)
config.log_dir = str(log_dir)
Expand Down
20 changes: 6 additions & 14 deletions examples/experimental/torch/classification/bootstrap_nas.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,31 +204,23 @@ def validate_model_fn_top1(model_, loader_):
validate_model_fn_top1, val_loader, config.checkpoint_save_dir, tensorboard_writer=config.tb
)

logger.info("Best config: {best_config}".format(best_config=best_config))
logger.info("Performance metrics: {performance_metrics}".format(performance_metrics=performance_metrics))
logger.info(f"Best config: {best_config}")
logger.info(f"Performance metrics: {performance_metrics}")
search_algo.visualize_search_progression()

# Maximal subnet
elasticity_ctrl.multi_elasticity_handler.activate_maximum_subnet()
search_algo.bn_adaptation.run(nncf_network)
top1_acc = validate_model_fn_top1(nncf_network, val_loader)
logger.info(
"Maximal subnet Top1 acc: {top1_acc}, Macs: {macs}".format(
top1_acc=top1_acc,
macs=elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000,
)
)
macs = elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000
logger.info(f"Maximal subnet Top1 acc: {top1_acc}, Macs: {macs}")

# Best found subnet
elasticity_ctrl.multi_elasticity_handler.activate_subnet_for_config(best_config)
search_algo.bn_adaptation.run(nncf_network)
top1_acc = validate_model_fn_top1(nncf_network, val_loader)
logger.info(
"Best found subnet Top1 acc: {top1_acc}, Macs: {macs}".format(
top1_acc=top1_acc,
macs=elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000,
)
)
macs = elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000
logger.info(f"Best found subnet Top1 acc: {top1_acc}, Macs: {macs}")
elasticity_ctrl.export_model(osp.join(config.log_dir, "best_subnet.onnx"))

if "test" in config.mode:
Expand Down
18 changes: 5 additions & 13 deletions examples/experimental/torch/classification/bootstrap_nas_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def validate_model_fn_top1(model_, loader_):
load_state(model, model_weights, is_resume=True)

top1_acc = validate_model_fn_top1(model, val_loader)
logger.info("SuperNetwork Top 1: {top1_acc}".format(top1_acc=top1_acc))
logger.info(f"SuperNetwork Top 1: {top1_acc}")

search_algo = BaseSearchAlgorithm.from_config(model, elasticity_ctrl, nncf_config)

Expand All @@ -163,23 +163,15 @@ def validate_model_fn_top1(model_, loader_):
elasticity_ctrl.multi_elasticity_handler.activate_maximum_subnet()
search_algo.bn_adaptation.run(nncf_network)
top1_acc = validate_model_fn_top1(nncf_network, val_loader)
logger.info(
"Maximal subnet Top1 acc: {top1_acc}, Macs: {macs}".format(
top1_acc=top1_acc,
macs=elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000,
)
)
macs = elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000
logger.info(f"Maximal subnet Top1 acc: {top1_acc}, Macs: {macs}")

# Best found subnet
elasticity_ctrl.multi_elasticity_handler.activate_subnet_for_config(best_config)
search_algo.bn_adaptation.run(nncf_network)
top1_acc = validate_model_fn_top1(nncf_network, val_loader)
logger.info(
"Best found subnet Top1 acc: {top1_acc}, Macs: {macs}".format(
top1_acc=top1_acc,
macs=elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000,
)
)
macs = elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000
logger.info(f"Best found subnet Top1 acc: {top1_acc}, Macs: {macs}")
elasticity_ctrl.export_model(osp.join(config.log_dir, "best_subnet.onnx"))

search_algo.search_progression_to_csv()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def run_example():
download_and_extract(MODEL_PATH, MODEL_INFO)
ov_model = ov.Core().read_model(MODEL_PATH / "stfpm_capsule.xml")

with open(MODEL_PATH / "meta_data_stfpm_capsule.json", "r", encoding="utf-8") as f:
with open(MODEL_PATH / "meta_data_stfpm_capsule.json", encoding="utf-8") as f:
validation_params = json.load(f)

###############################################################################
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def __getitem__(self, item: int) -> Tuple[torch.Tensor, Dict]:
target = dict(image_id=[image_id], boxes=[], labels=[])
label_filepath = self.labels_path / f"{image_id:012d}.txt"
if label_filepath.exists():
with open(label_filepath, "r", encoding="utf-8") as f:
with open(label_filepath, encoding="utf-8") as f:
for box_descr in f.readlines():
category_id, rel_x, rel_y, rel_w, rel_h = tuple(map(float, box_descr.split(" ")))
box_x1, box_y1 = img_w * (rel_x - rel_w / 2), img_h * (rel_y - rel_h / 2)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def prepare_tiny_imagenet_200(dataset_dir: Path):
return

val_annotations_file = val_data_dir / "val_annotations.txt"
with open(val_annotations_file, "r") as f:
with open(val_annotations_file) as f:
val_annotation_data = map(lambda line: line.split("\t")[:2], f.readlines())
for image_filename, image_label in val_annotation_data:
from_image_filepath = val_images_dir / image_filename
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def prepare_tiny_imagenet_200(dataset_dir: Path):
return

val_annotations_file = val_data_dir / "val_annotations.txt"
with open(val_annotations_file, "r") as f:
with open(val_annotations_file) as f:
val_annotation_data = map(lambda line: line.split("\t")[:2], f.readlines())
for image_filename, image_label in val_annotation_data:
from_image_filepath = val_images_dir / image_filename
Expand Down
3 changes: 2 additions & 1 deletion examples/tensorflow/classification/datasets/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,8 @@ def dtype(self):
}
dtype = dtype_map.get(self._dtype, None)
if dtype is None:
raise nncf.ValidationError("Invalid DType provided. Supported types: {}".format(dtype_map.keys()))
msg = f"Invalid DType provided. Supported types: {dtype_map.keys()}"
raise nncf.ValidationError(msg)

return dtype

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,12 @@ def mean_image_subtraction(
:return: the centered image.
"""
if image.get_shape().ndims != 3:
raise nncf.ValidationError("Input must be of size [height, width, C>0]")
msg = "Input must be of size [height, width, C>0]"
raise nncf.ValidationError(msg)

if len(means) != num_channels:
raise nncf.ValidationError("len(means) must match the number of channels")
msg = "len(means) must match the number of channels"
raise nncf.ValidationError(msg)

means = tf.broadcast_to(means, tf.shape(image))
if dtype is not None:
Expand All @@ -66,10 +68,12 @@ def standardize_image(
:return: the centered image.
"""
if image.get_shape().ndims != 3:
raise nncf.ValidationError("Input must be of size [height, width, C>0]")
msg = "Input must be of size [height, width, C>0]"
raise nncf.ValidationError(msg)

if len(stddev) != num_channels:
raise nncf.ValidationError("len(stddev) must match the number of channels")
msg = "len(stddev) must match the number of channels"
raise nncf.ValidationError(msg)

stddev = tf.broadcast_to(stddev, tf.shape(image))
if dtype is not None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,8 @@ def get_preprocessing(dataset_name, model_name, preset=None):
if not preset:
preset = dataset_name
if preset not in PREPROCESSING_FN_MAP:
raise nncf.ValidationError(
"Preprocessing for dataset {} and model {} was not recognized".format(dataset_name, model_name)
)
msg = f"Preprocessing for dataset {dataset_name} and model {model_name} was not recognized"
raise nncf.ValidationError(msg)

ext_kwargs = {}
if preset == "imagenet2012":
Expand Down
15 changes: 8 additions & 7 deletions examples/tensorflow/classification/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,25 +96,26 @@ def get_num_classes(dataset):
else:
num_classes = 1000

logger.info("The sample is started with {} classes".format(num_classes))
logger.info(f"The sample is started with {num_classes} classes")
return num_classes


def load_checkpoint(checkpoint, ckpt_path):
logger.info("Load from checkpoint is enabled.")
if tf.io.gfile.isdir(ckpt_path):
path_to_checkpoint = tf.train.latest_checkpoint(ckpt_path)
logger.info("Latest checkpoint: {}".format(path_to_checkpoint))
logger.info(f"Latest checkpoint: {path_to_checkpoint}")
else:
path_to_checkpoint = ckpt_path if tf.io.gfile.exists(ckpt_path + ".index") else None
logger.info("Provided checkpoint: {}".format(path_to_checkpoint))
logger.info(f"Provided checkpoint: {path_to_checkpoint}")

if not path_to_checkpoint:
logger.info("No checkpoint detected.")
if ckpt_path:
raise nncf.ValidationError(f"ckpt_path was given, but no checkpoint detected in path: {ckpt_path}")
msg = f"ckpt_path was given, but no checkpoint detected in path: {ckpt_path}"
raise nncf.ValidationError(msg)

logger.info("Checkpoint file {} found and restoring from checkpoint".format(path_to_checkpoint))
logger.info(f"Checkpoint file {path_to_checkpoint} found and restoring from checkpoint")

status = checkpoint.restore(path_to_checkpoint)
status.expect_partial()
Expand Down Expand Up @@ -284,7 +285,7 @@ def run(config):
if "export" in config.mode:
save_path, save_format = get_saving_parameters(config)
export_model(compression_ctrl.strip(), save_path, save_format)
logger.info("Saved to {}".format(save_path))
logger.info(f"Saved to {save_path}")


def export(config):
Expand Down Expand Up @@ -319,7 +320,7 @@ def export(config):

save_path, save_format = get_saving_parameters(config)
export_model(compression_ctrl.strip(), save_path, save_format)
logger.info("Saved to {}".format(save_path))
logger.info(f"Saved to {save_path}")


def main(argv):
Expand Down
10 changes: 6 additions & 4 deletions examples/tensorflow/common/dataset_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,15 +88,16 @@ def build(self):

builder = dataset_builders.get(self._dataset_type, None)
if builder is None:
raise nncf.UnknownDatasetError("Unknown dataset type {}".format(self._dataset_type))
msg = f"Unknown dataset type {self._dataset_type}"
raise nncf.UnknownDatasetError(msg)

dataset = builder()
dataset = self._pipeline(dataset)

return dataset

def _load_tfds(self):
logger.info("Using TFDS to load {} data.".format(self._split))
logger.info(f"Using TFDS to load {self._split} data.")

set_hard_limit_num_open_files()

Expand All @@ -119,13 +120,14 @@ def _load_tfds(self):
return dataset

def _load_tfrecords(self):
logger.info("Using TFRecords to load {} data.".format(self._split))
logger.info(f"Using TFRecords to load {self._split} data.")

dataset_key = self._dataset_name.replace("/", "")
if dataset_key in self._tfrecord_datasets:
self._dataset_loader = self._tfrecord_datasets[dataset_key](config=self._config, is_train=self._is_train)
else:
raise nncf.UnknownDatasetError("Unknown dataset name: {}".format(self._dataset_name))
msg = f"Unknown dataset name: {self._dataset_name}"
raise nncf.UnknownDatasetError(msg)

dataset = self._dataset_loader.as_dataset()

Expand Down
7 changes: 4 additions & 3 deletions examples/tensorflow/common/distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,13 @@ def get_distribution_strategy(config):
if "CUDA_VISIBLE_DEVICES" not in os.environ or _gpu_id in os.environ["CUDA_VISIBLE_DEVICES"].split(","):
os.environ["CUDA_VISIBLE_DEVICES"] = _gpu_id
else:
raise nncf.ValidationError(
"GPU with id = {id} was not found in the specified "
msg = (
f"GPU with id = {_gpu_id} was not found in the specified "
"CUDA_VISIBLE_DEVICES environment variable. "
"Please do not export the CUDA_VISIBLE_DEVICES environment variable "
"or specify GPU with id = {id} in it".format(id=_gpu_id)
f"or specify GPU with id = {_gpu_id} in it"
)
raise nncf.ValidationError(msg)

gpus = tf.config.list_physical_devices("GPU")

Expand Down
3 changes: 2 additions & 1 deletion examples/tensorflow/common/model_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ def get_model(model_name, input_shape=None, pretrained=True, num_classes=1000, w
if model_name in AVAILABLE_MODELS:
model = AVAILABLE_MODELS[model_name]
else:
raise Exception("Undefined model name: {}".format(model_name))
msg = f"Undefined model name: {model_name}"
raise Exception(msg)

model_params = {"classes": num_classes}
if weights is not None:
Expand Down
6 changes: 3 additions & 3 deletions examples/tensorflow/common/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,15 +81,15 @@ def MobileNetV3(stack_fn, last_point_ch, input_shape=None, model_type="large", *
x = tf.keras.layers.Activation(activation="softmax", name="Predictions")(x)

# Create model.
model = tf.keras.Model(img_input, x, name="MobilenetV3{}".format(model_type))
model = tf.keras.Model(img_input, x, name=f"MobilenetV3{model_type}")

BASE_WEIGHT_PATH = "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v3/"
WEIGHTS_HASHES = {
"large": "59e551e166be033d707958cf9e29a6a7",
"small": "8768d4c2e7dee89b9d02b2d03d65d862",
}

file_name = "weights_mobilenet_v3_{}_224_1.0_float.h5".format(model_type)
file_name = f"weights_mobilenet_v3_{model_type}_224_1.0_float.h5"
file_hash = WEIGHTS_HASHES[model_type]

weights_path = tf.keras.utils.get_file(
Expand Down Expand Up @@ -185,7 +185,7 @@ def _inverted_res_block(x, expansion, filters, kernel_size, stride, se_ratio, ac
infilters = tf.keras.backend.int_shape(x)[channel_axis]
if block_id:
# Expand
prefix = "expanded_conv_{}/".format(block_id)
prefix = f"expanded_conv_{block_id}/"
x = tf.keras.layers.Conv2D(
_depth(infilters * expansion), kernel_size=1, padding="same", use_bias=False, name=prefix + "expand"
)(x)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,17 @@ def backbone_generator(params):
norm_activation=norm_activation_generator(params.model_params.norm_activation),
)
else:
raise ValueError("Backbone {} is not supported for {} model.".format(backbone_name, params.model))
msg = f"Backbone {backbone_name} is not supported for {params.model} model."
raise ValueError(msg)
elif params.model == "YOLOv4":
if backbone_name == "darknet":
backbone_fn = darknet.CSPDarknet53()
else:
raise ValueError("Backbone {} is not supported for {} model.".format(backbone_name, params.model))
msg = f"Backbone {backbone_name} is not supported for {params.model} model."
raise ValueError(msg)
else:
raise ValueError("Model {} is not supported.".format(params.model))
msg = f"Model {params.model} is not supported."
raise ValueError(msg)

return backbone_fn

Expand Down
15 changes: 8 additions & 7 deletions examples/tensorflow/common/object_detection/architecture/fpn.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ def __init__(
elif activation == "swish":
self._activation_op = tf.nn.swish
else:
raise ValueError("Unsupported activation `{}`.".format(activation))
msg = f"Unsupported activation `{activation}`."
raise ValueError(msg)

self._use_batch_norm = use_batch_norm
self._norm_activation = norm_activation
Expand All @@ -72,22 +73,22 @@ def __init__(

for level in range(self._min_level, self._max_level + 1):
if self._use_batch_norm:
self._norm_activations[level] = norm_activation(use_activation=False, name="p%d-bn" % level)
self._norm_activations[level] = norm_activation(use_activation=False, name=f"p{level}-bn")

self._lateral_conv2d_op[level] = self._conv2d_op(
filters=self._fpn_feat_dims, kernel_size=(1, 1), padding="same", name="l%d" % level
filters=self._fpn_feat_dims, kernel_size=(1, 1), padding="same", name=f"l{level}"
)

self._post_hoc_conv2d_op[level] = self._conv2d_op(
filters=self._fpn_feat_dims,
strides=(1, 1),
kernel_size=(3, 3),
padding="same",
name="post_hoc_d%d" % level,
name=f"post_hoc_d{level}",
)

self._coarse_conv2d_op[level] = self._conv2d_op(
filters=self._fpn_feat_dims, strides=(2, 2), kernel_size=(3, 3), padding="same", name="p%d" % level
filters=self._fpn_feat_dims, strides=(2, 2), kernel_size=(3, 3), padding="same", name=f"p{level}"
)

def __call__(self, multilevel_features, is_training=None):
Expand All @@ -108,8 +109,8 @@ def __call__(self, multilevel_features, is_training=None):
input_levels = list(multilevel_features.keys())
if min(input_levels) > self._min_level:
raise ValueError(
"The minimum backbone level {} should be ".format(min(input_levels))
+ "less or equal to FPN minimum level {}.".format(self._min_level)
f"The minimum backbone level {min(input_levels)} should be "
+ f"less or equal to FPN minimum level {self._min_level}."
)

backbone_max_level = min(max(input_levels), self._max_level)
Expand Down
Loading

0 comments on commit f21215b

Please sign in to comment.