Skip to content

Commit 5e1098f

Browse files
authored
Merge pull request #624 from DeepRank/622_linting_dbodor
ci: update ruff and fix linting rules
2 parents c1986d8 + 5c3a192 commit 5e1098f

File tree

6 files changed

+17
-53
lines changed

6 files changed

+17
-53
lines changed

deeprank2/tools/target.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -35,12 +35,9 @@ def add_target( # noqa: C901
3535
1ATN_xxx-3 0
3636
1ATN_xxx-4 0
3737
"""
38-
target_dict = {}
39-
4038
labels = np.loadtxt(target_list, delimiter=sep, usecols=[0], dtype=str)
4139
values = np.loadtxt(target_list, delimiter=sep, usecols=[1])
42-
for label, value in zip(labels, values, strict=True):
43-
target_dict[label] = value
40+
target_dict = dict(zip(labels, values, strict=False))
4441

4542
if os.path.isdir(graph_path):
4643
graphs = glob.glob(f"{graph_path}/*.hdf5")

deeprank2/trainer.py

Lines changed: 9 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -348,8 +348,7 @@ def _precluster(self, dataset: GraphDataset) -> None:
348348
f5.close()
349349

350350
def _put_model_to_device(self, dataset: GraphDataset | GridDataset) -> None:
351-
"""
352-
Puts the model on the available device.
351+
"""Puts the model on the available device.
353352
354353
Args:
355354
dataset (:class:`GraphDataset` | :class:`GridDataset`): GraphDataset object.
@@ -405,8 +404,7 @@ def configure_optimizers(
405404
lr: float = 0.001,
406405
weight_decay: float = 1e-05,
407406
) -> None:
408-
"""
409-
Configure optimizer and its main parameters.
407+
"""Configure optimizer and its main parameters.
410408
411409
Args:
412410
optimizer (:class:`torch.optim`, optional): PyTorch optimizer object. If none, defaults to :class:`torch.optim.Adam`.
@@ -435,8 +433,7 @@ def set_lossfunction( # noqa: C901
435433
lossfunction: nn.modules.loss._Loss | None = None,
436434
override_invalid: bool = False,
437435
) -> None:
438-
"""
439-
Set the loss function.
436+
"""Set the loss function.
440437
441438
Args:
442439
lossfunction (optional): Make sure to use a loss function that is appropriate for
@@ -524,8 +521,7 @@ def train( # noqa: PLR0915, C901
524521
best_model: bool = True,
525522
filename: str | None = "model.pth.tar",
526523
) -> None:
527-
"""
528-
Performs the training of the model.
524+
"""Performs the training of the model.
529525
530526
Args:
531527
nepoch (int, optional): Maximum number of epochs to run.
@@ -685,8 +681,7 @@ def train( # noqa: PLR0915, C901
685681
self.model.load_state_dict(self.model_load_state_dict)
686682

687683
def _epoch(self, epoch_number: int, pass_name: str) -> float | None:
688-
"""
689-
Runs a single epoch.
684+
"""Runs a single epoch.
690685
691686
Args:
692687
epoch_number (int): the current epoch number
@@ -751,8 +746,7 @@ def _eval(
751746
epoch_number: int,
752747
pass_name: str,
753748
) -> float | None:
754-
"""
755-
Evaluates the model.
749+
"""Evaluates the model.
756750
757751
Args:
758752
loader (Dataloader): Data to evaluate on.
@@ -818,8 +812,7 @@ def _eval(
818812

819813
@staticmethod
820814
def _log_epoch_data(stage: str, loss: float, time: float) -> None:
821-
"""
822-
Prints the data of each epoch.
815+
"""Prints the data of each epoch.
823816
824817
Args:
825818
stage (str): Train or valid.
@@ -863,8 +856,7 @@ def test(
863856
batch_size: int = 32,
864857
num_workers: int = 0,
865858
) -> None:
866-
"""
867-
Performs the testing of the model.
859+
"""Performs the testing of the model.
868860
869861
Args:
870862
batch_size (int, optional): Sets the size of the batch.
@@ -935,8 +927,7 @@ def _load_params(self) -> None:
935927
self.ngpu = state["ngpu"]
936928

937929
def _save_model(self) -> dict[str, Any]:
938-
"""
939-
Saves the model to a file.
930+
"""Saves the model to a file.
940931
941932
Args:
942933
filename (str, optional): Name of the file. Defaults to None.

deeprank2/utils/parsing/residue.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,7 @@ def __init__(
1010
absent_atom_names: list[str],
1111
):
1212
self.class_name = class_name
13-
1413
self.amino_acid_names = amino_acid_names
15-
1614
self.present_atom_names = present_atom_names
1715
self.absent_atom_names = absent_atom_names
1816

@@ -26,11 +24,7 @@ def matches(self, amino_acid_name: str, atom_names: list[str]) -> bool:
2624
return False
2725

2826
# check the atom names that should be present
29-
if not all(atom_name in atom_names for atom_name in self.present_atom_names):
30-
return False
31-
32-
# all checks passed
33-
return True
27+
return all(atom_name in atom_names for atom_name in self.present_atom_names)
3428

3529

3630
class ResidueClassParser: # noqa: D101

pyproject.toml

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -91,10 +91,12 @@ include = ["deeprank2*"]
9191
addopts = "-ra"
9292

9393
[tool.ruff]
94+
output-format = "concise"
9495
line-length = 159
9596

9697
[tool.ruff.lint]
9798
select = ["ALL"]
99+
pydocstyle.convention = "google" # docstring settings
98100
ignore = [
99101
# Unrealistic for this code base
100102
"PTH", # flake8-use-pathlib
@@ -117,20 +119,6 @@ ignore = [
117119
"D104", # Missing public package docstring
118120
"D105", # Missing docstring in magic method
119121
"D107", # Missing docstring in `__init__`
120-
# Docstring rules irrelevant to the Google style
121-
"D203", # 1 blank line required before class docstring
122-
"D204", # 1 blank line required after class docstring
123-
"D212", # Multi-line docstring summary should start at the first line
124-
"D213", # Multi-line docstring summary should start at the second line
125-
"D215", # Section underline is over-indented
126-
"D400", # First line should end with a period (clashes with D415:First line should end with a period, question mark, or exclamation point)
127-
"D401", # First line of docstring should be in imperative mood
128-
"D404", # First word of the docstring should not be This
129-
"D406", # Section name should end with a newline
130-
"D407", # Missing dashed underline after section
131-
"D408", # Section underline should be in the line following the section's name
132-
"D409", # Section underline should match the length of its name
133-
"D413", # Missing blank line after last section
134122
]
135123

136124
# Autofix settings

tests/test_integration.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,7 @@
3232

3333

3434
def test_cnn() -> None:
35-
"""
36-
Tests processing several PDB files into their features representation HDF5 file.
35+
"""Tests processing several PDB files into their features representation HDF5 file.
3736
3837
Then uses HDF5 generated files to train and test a CnnRegression network.
3938
"""

tests/test_querycollection.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,7 @@ def _querycollection_tester(
2222
cpu_count: int = 1,
2323
combine_output: bool = True,
2424
) -> tuple[QueryCollection, str, list[str]]:
25-
"""
26-
Generic function to test QueryCollection class.
25+
"""Generic function to test QueryCollection class.
2726
2827
Args:
2928
query_type (str): query type to be generated. It accepts only 'ppi' (ProteinProteinInterface) or 'srv' (SingleResidueVariant).
@@ -203,15 +202,11 @@ def test_querycollection_process_combine_output_true() -> None:
203202
_, output_directory_f, output_paths_f = _querycollection_tester(query_type, feature_modules=modules, combine_output=False, cpu_count=2)
204203
assert len(output_paths_t) == 1
205204

206-
keys_t = {}
207205
with h5py.File(output_paths_t[0], "r") as file_t:
208-
for key, value in file_t.items():
209-
keys_t[key] = value
210-
keys_f = {}
206+
keys_t = dict(file_t.items())
211207
for output_path in output_paths_f:
212208
with h5py.File(output_path, "r") as file_f:
213-
for key, value in file_f.items():
214-
keys_f[key] = value
209+
keys_f = dict(file_f.items())
215210
assert keys_t == keys_f
216211

217212
rmtree(output_directory_t)

0 commit comments

Comments
 (0)