Skip to content

Commit 111971b

Browse files
igorsugakfacebook-github-bot
authored andcommitted
[CODEMOD][pytorch] replace uses of np.ndarray with npt.NDArray
Summary: X-link: pytorch/opacus#681 X-link: pytorch/captum#1389 X-link: pytorch/botorch#2586 X-link: pytorch/audio#3846 This replaces uses of `numpy.ndarray` in type annotations with `numpy.typing.NDArray`. In Numpy-1.24.0+ `numpy.ndarray` is annotated as generic type. Without template parameters it triggers static analysis errors: ```counterexample Generic type `ndarray` expects 2 type parameters. ``` `numpy.typing.NDArray` is an alias that provides default template parameters. Reviewed By: ryanthomasjohnson Differential Revision: D64619891 fbshipit-source-id: dffc096b1ce90d11e73d475f0bbcb8867ed9ef01
1 parent 0854585 commit 111971b

File tree

15 files changed

+52
-37
lines changed

15 files changed

+52
-37
lines changed

references/depth/stereo/train.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
from typing import List, Union
66

77
import numpy as np
8+
import numpy.typing as npt
89
import torch
910
import torch.distributed as dist
1011
import torchvision.models.optical_flow
@@ -33,7 +34,7 @@ def make_stereo_flow(flow: Union[torch.Tensor, List[torch.Tensor]], model_out_ch
3334
return flow
3435

3536

36-
def make_lr_schedule(args: argparse.Namespace, optimizer: torch.optim.Optimizer) -> np.ndarray:
37+
def make_lr_schedule(args: argparse.Namespace, optimizer: torch.optim.Optimizer) -> npt.NDArray:
3738
"""Helper function to return a learning rate scheduler for CRE-stereo"""
3839
if args.decay_after_steps < args.warmup_steps:
3940
raise ValueError(f"decay_after_steps: {args.function} must be greater than warmup_steps: {args.warmup_steps}")

references/depth/stereo/visualization.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from typing import List
33

44
import numpy as np
5+
import numpy.typing as npt
56
import torch
67
from torch import Tensor
78
from torchvision.utils import make_grid
@@ -64,7 +65,7 @@ def make_training_sample_grid(
6465
disparities: Tensor,
6566
masks: Tensor,
6667
predictions: List[Tensor],
67-
) -> np.ndarray:
68+
) -> npt.NDArray:
6869
# detach images and renormalize to [0, 1]
6970
images_left = left_images.detach().cpu() * 0.5 + 0.5
7071
images_right = right_images.detach().cpu() * 0.5 + 0.5
@@ -84,7 +85,7 @@ def make_training_sample_grid(
8485

8586

8687
@torch.no_grad()
87-
def make_disparity_sequence_grid(predictions: List[Tensor], disparities: Tensor) -> np.ndarray:
88+
def make_disparity_sequence_grid(predictions: List[Tensor], disparities: Tensor) -> npt.NDArray:
8889
# right most we will be adding the ground truth
8990
seq_len = len(predictions) + 1
9091
predictions = list(map(lambda x: x[:, :1, :, :].detach().cpu(), predictions + [disparities]))

test/datasets_utils.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union
1919

2020
import numpy as np
21+
import numpy.typing as npt
2122

2223
import PIL
2324
import PIL.Image
@@ -825,8 +826,8 @@ def size(idx: int) -> Tuple[int, int, int]:
825826
def shape_test_for_stereo(
826827
left: PIL.Image.Image,
827828
right: PIL.Image.Image,
828-
disparity: Optional[np.ndarray] = None,
829-
valid_mask: Optional[np.ndarray] = None,
829+
disparity: Optional[npt.NDArray] = None,
830+
valid_mask: Optional[npt.NDArray] = None,
830831
):
831832
left_dims = get_dimensions(left)
832833
right_dims = get_dimensions(right)

torchvision/datasets/_optical_flow.py

+8-7
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from typing import Callable, List, Optional, Tuple, Union
77

88
import numpy as np
9+
import numpy.typing as npt
910
import torch
1011
from PIL import Image
1112

@@ -164,7 +165,7 @@ def __getitem__(self, index: int) -> Union[T1, T2]:
164165
"""
165166
return super().__getitem__(index)
166167

167-
def _read_flow(self, file_name: str) -> np.ndarray:
168+
def _read_flow(self, file_name: str) -> npt.NDArray:
168169
return _read_flo(file_name)
169170

170171

@@ -225,7 +226,7 @@ def __getitem__(self, index: int) -> Union[T1, T2]:
225226
"""
226227
return super().__getitem__(index)
227228

228-
def _read_flow(self, file_name: str) -> Tuple[np.ndarray, np.ndarray]:
229+
def _read_flow(self, file_name: str) -> Tuple[npt.NDArray, npt.NDArray]:
229230
return _read_16bits_png_with_flow_and_valid_mask(file_name)
230231

231232

@@ -293,7 +294,7 @@ def __getitem__(self, index: int) -> Union[T1, T2]:
293294
"""
294295
return super().__getitem__(index)
295296

296-
def _read_flow(self, file_name: str) -> np.ndarray:
297+
def _read_flow(self, file_name: str) -> npt.NDArray:
297298
return _read_flo(file_name)
298299

299300

@@ -391,7 +392,7 @@ def __getitem__(self, index: int) -> Union[T1, T2]:
391392
"""
392393
return super().__getitem__(index)
393394

394-
def _read_flow(self, file_name: str) -> np.ndarray:
395+
def _read_flow(self, file_name: str) -> npt.NDArray:
395396
return _read_pfm(file_name)
396397

397398

@@ -443,7 +444,7 @@ def __init__(self, root: Union[str, Path], split: str = "train", transforms: Opt
443444
"Could not find the HD1K images. Please make sure the directory structure is correct."
444445
)
445446

446-
def _read_flow(self, file_name: str) -> Tuple[np.ndarray, np.ndarray]:
447+
def _read_flow(self, file_name: str) -> Tuple[npt.NDArray, npt.NDArray]:
447448
return _read_16bits_png_with_flow_and_valid_mask(file_name)
448449

449450
def __getitem__(self, index: int) -> Union[T1, T2]:
@@ -462,7 +463,7 @@ def __getitem__(self, index: int) -> Union[T1, T2]:
462463
return super().__getitem__(index)
463464

464465

465-
def _read_flo(file_name: str) -> np.ndarray:
466+
def _read_flo(file_name: str) -> npt.NDArray:
466467
"""Read .flo file in Middlebury format"""
467468
# Code adapted from:
468469
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
@@ -479,7 +480,7 @@ def _read_flo(file_name: str) -> np.ndarray:
479480
return data.reshape(h, w, 2).transpose(2, 0, 1)
480481

481482

482-
def _read_16bits_png_with_flow_and_valid_mask(file_name: str) -> Tuple[np.ndarray, np.ndarray]:
483+
def _read_16bits_png_with_flow_and_valid_mask(file_name: str) -> Tuple[npt.NDArray, npt.NDArray]:
483484

484485
flow_and_valid = decode_png(read_file(file_name)).to(torch.float32)
485486
flow, valid_flow_mask = flow_and_valid[:2, :, :], flow_and_valid[2, :, :]

torchvision/datasets/_stereo_matching.py

+12-11
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from typing import Callable, cast, List, Optional, Tuple, Union
1010

1111
import numpy as np
12+
import numpy.typing as npt
1213
from PIL import Image
1314

1415
from .utils import _read_pfm, download_and_extract_archive, verify_str_arg
@@ -92,7 +93,7 @@ def _scan_pairs(
9293
return paths
9394

9495
@abstractmethod
95-
def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
96+
def _read_disparity(self, file_path: str) -> Tuple[Optional[npt.NDArray], Optional[npt.NDArray]]:
9697
# function that returns a disparity map and an occlusion map
9798
pass
9899

@@ -178,7 +179,7 @@ def __init__(self, root: Union[str, Path], transforms: Optional[Callable] = None
178179
disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
179180
self._disparities = disparities
180181

181-
def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
182+
def _read_disparity(self, file_path: str) -> Tuple[npt.NDArray, None]:
182183
disparity_map = _read_pfm_file(file_path)
183184
disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
184185
valid_mask = None
@@ -257,7 +258,7 @@ def __init__(self, root: Union[str, Path], split: str = "train", transforms: Opt
257258
else:
258259
self._disparities = list((None, None) for _ in self._images)
259260

260-
def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], None]:
261+
def _read_disparity(self, file_path: str) -> Tuple[Optional[npt.NDArray], None]:
261262
# test split has no disparity maps
262263
if file_path is None:
263264
return None, None
@@ -345,7 +346,7 @@ def __init__(self, root: Union[str, Path], split: str = "train", transforms: Opt
345346
else:
346347
self._disparities = list((None, None) for _ in self._images)
347348

348-
def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], None]:
349+
def _read_disparity(self, file_path: str) -> Tuple[Optional[npt.NDArray], None]:
349350
# test split has no disparity maps
350351
if file_path is None:
351352
return None, None
@@ -565,7 +566,7 @@ def _read_img(self, file_path: Union[str, Path]) -> Image.Image:
565566
file_path = random.choice(ambient_file_paths) # type: ignore
566567
return super()._read_img(file_path)
567568

568-
def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
569+
def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[npt.NDArray, npt.NDArray]]:
569570
# test split has not disparity maps
570571
if file_path is None:
571572
return None, None
@@ -695,7 +696,7 @@ def __init__(
695696
disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
696697
self._disparities += disparities
697698

698-
def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
699+
def _read_disparity(self, file_path: str) -> Tuple[npt.NDArray, None]:
699700
disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
700701
# unsqueeze the disparity map into (C, H, W) format
701702
disparity_map = disparity_map[None, :, :] / 32.0
@@ -789,7 +790,7 @@ def __init__(self, root: Union[str, Path], variant: str = "single", transforms:
789790
right_disparity_pattern = str(root / s / split_prefix[s] / "*.right.depth.png")
790791
self._disparities += self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
791792

792-
def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
793+
def _read_disparity(self, file_path: str) -> Tuple[npt.NDArray, None]:
793794
# (H, W) image
794795
depth = np.asarray(Image.open(file_path))
795796
# as per https://research.nvidia.com/sites/default/files/pubs/2018-06_Falling-Things/readme_0.txt
@@ -912,7 +913,7 @@ def __init__(
912913
right_disparity_pattern = str(root / "disparity" / prefix_directories[variant] / "right" / "*.pfm")
913914
self._disparities += self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
914915

915-
def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
916+
def _read_disparity(self, file_path: str) -> Tuple[npt.NDArray, None]:
916917
disparity_map = _read_pfm_file(file_path)
917918
disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
918919
valid_mask = None
@@ -1021,7 +1022,7 @@ def _get_occlussion_mask_paths(self, file_path: str) -> Tuple[str, str]:
10211022

10221023
return occlusion_path, outofframe_path
10231024

1024-
def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
1025+
def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[npt.NDArray, npt.NDArray]]:
10251026
if file_path is None:
10261027
return None, None
10271028

@@ -1102,7 +1103,7 @@ def __init__(self, root: Union[str, Path], split: str = "train", transforms: Opt
11021103
right_disparity_pattern = str(root / "*" / "right_disp.png")
11031104
self._disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
11041105

1105-
def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
1106+
def _read_disparity(self, file_path: str) -> Tuple[npt.NDArray, None]:
11061107
disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
11071108
# unsqueeze disparity to (C, H, W)
11081109
disparity_map = disparity_map[None, :, :] / 1024.0
@@ -1196,7 +1197,7 @@ def __init__(self, root: Union[str, Path], split: str = "train", transforms: Opt
11961197
disparity_pattern = str(root / anot_dir / "*" / "disp0GT.pfm")
11971198
self._disparities = self._scan_pairs(disparity_pattern, None)
11981199

1199-
def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
1200+
def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[npt.NDArray, npt.NDArray]]:
12001201
# test split has no disparity maps
12011202
if file_path is None:
12021203
return None, None

torchvision/datasets/phototour.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from typing import Any, Callable, List, Optional, Tuple, Union
44

55
import numpy as np
6+
import numpy.typing as npt
67
import torch
78
from PIL import Image
89

@@ -187,7 +188,7 @@ def extra_repr(self) -> str:
187188
def read_image_file(data_dir: str, image_ext: str, n: int) -> torch.Tensor:
188189
"""Return a Tensor containing the patches"""
189190

190-
def PIL2array(_img: Image.Image) -> np.ndarray:
191+
def PIL2array(_img: Image.Image) -> npt.NDArray:
191192
"""Convert PIL image type to numpy 2D array"""
192193
return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64)
193194

torchvision/datasets/sbd.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from typing import Any, Callable, Optional, Tuple, Union
55

66
import numpy as np
7+
import numpy.typing as npt
78
from PIL import Image
89

910
from .utils import download_and_extract_archive, download_url, verify_str_arg
@@ -102,7 +103,7 @@ def _get_segmentation_target(self, filepath: str) -> Image.Image:
102103
mat = self._loadmat(filepath)
103104
return Image.fromarray(mat["GTcls"][0]["Segmentation"][0])
104105

105-
def _get_boundaries_target(self, filepath: str) -> np.ndarray:
106+
def _get_boundaries_target(self, filepath: str) -> npt.NDArray:
106107
mat = self._loadmat(filepath)
107108
return np.concatenate(
108109
[np.expand_dims(mat["GTcls"][0]["Boundaries"][0][i][0].toarray(), axis=0) for i in range(self.num_classes)],

torchvision/datasets/stl10.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from typing import Any, Callable, cast, Optional, Tuple, Union
44

55
import numpy as np
6+
import numpy.typing as npt
67
from PIL import Image
78

89
from .utils import check_integrity, download_and_extract_archive, verify_str_arg
@@ -63,7 +64,7 @@ def __init__(
6364
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
6465

6566
# now load the picked numpy arrays
66-
self.labels: Optional[np.ndarray]
67+
self.labels: Optional[npt.NDArray]
6768
if self.split == "train":
6869
self.data, self.labels = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
6970
self.labels = cast(np.ndarray, self.labels)
@@ -129,7 +130,7 @@ def __getitem__(self, index: int) -> Tuple[Any, Any]:
129130
def __len__(self) -> int:
130131
return self.data.shape[0]
131132

132-
def __loadfile(self, data_file: str, labels_file: Optional[str] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
133+
def __loadfile(self, data_file: str, labels_file: Optional[str] = None) -> Tuple[npt.NDArray, Optional[npt.NDArray]]:
133134
labels = None
134135
if labels_file:
135136
path_to_labels = os.path.join(self.root, self.base_folder, labels_file)

torchvision/datasets/utils.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
from urllib.parse import urlparse
1717

1818
import numpy as np
19+
import numpy.typing as npt
1920
import torch
2021
from torch.utils.model_zoo import tqdm
2122

@@ -434,7 +435,7 @@ def verify_str_arg(
434435
return value
435436

436437

437-
def _read_pfm(file_name: Union[str, pathlib.Path], slice_channels: int = 2) -> np.ndarray:
438+
def _read_pfm(file_name: Union[str, pathlib.Path], slice_channels: int = 2) -> npt.NDArray:
438439
"""Read file in .pfm format. Might contain either 1 or 3 channels of data.
439440
440441
Args:

torchvision/prototype/datasets/_builtin/cifar.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
from typing import Any, BinaryIO, cast, Dict, Iterator, List, Optional, Tuple, Union
66

77
import numpy as np
8+
import numpy.typing as npt
89
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
910
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
1011
from torchvision.prototype.datasets.utils._internal import (
@@ -24,7 +25,7 @@ def __init__(self, datapipe: IterDataPipe[Dict[str, Any]], *, labels_key: str) -
2425
self.datapipe = datapipe
2526
self.labels_key = labels_key
2627

27-
def __iter__(self) -> Iterator[Tuple[np.ndarray, int]]:
28+
def __iter__(self) -> Iterator[Tuple[npt.NDArray, int]]:
2829
for mapping in self.datapipe:
2930
image_arrays = mapping["data"].reshape((-1, 3, 32, 32))
3031
category_idcs = mapping[self.labels_key]
@@ -67,7 +68,7 @@ def _unpickle(self, data: Tuple[str, io.BytesIO]) -> Dict[str, Any]:
6768
file.close()
6869
return content
6970

70-
def _prepare_sample(self, data: Tuple[np.ndarray, int]) -> Dict[str, Any]:
71+
def _prepare_sample(self, data: Tuple[npt.NDArray, int]) -> Dict[str, Any]:
7172
image_array, category_idx = data
7273
return dict(
7374
image=Image(image_array),

torchvision/prototype/datasets/_builtin/svhn.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from typing import Any, BinaryIO, Dict, List, Tuple, Union
33

44
import numpy as np
5+
import numpy.typing as npt
56
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
67
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
78
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling, read_mat
@@ -50,7 +51,7 @@ def _resources(self) -> List[OnlineResource]:
5051

5152
return [data]
5253

53-
def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[np.ndarray, np.ndarray]]:
54+
def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[npt.NDArray, npt.NDArray]]:
5455
_, buffer = data
5556
content = read_mat(buffer)
5657
return list(
@@ -60,7 +61,7 @@ def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[np.n
6061
)
6162
)
6263

63-
def _prepare_sample(self, data: Tuple[np.ndarray, np.ndarray]) -> Dict[str, Any]:
64+
def _prepare_sample(self, data: Tuple[npt.NDArray, npt.NDArray]) -> Dict[str, Any]:
6465
image_array, label_array = data
6566

6667
return dict(

torchvision/transforms/functional.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from typing import Any, List, Optional, Tuple, Union
77

88
import numpy as np
9+
import numpy.typing as npt
910
import torch
1011
from PIL import Image
1112
from PIL.Image import Image as PILImage
@@ -124,7 +125,7 @@ def _is_numpy_image(img: Any) -> bool:
124125
return img.ndim in {2, 3}
125126

126127

127-
def to_tensor(pic: Union[PILImage, np.ndarray]) -> Tensor:
128+
def to_tensor(pic: Union[PILImage, npt.NDArray]) -> Tensor:
128129
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
129130
This function does not support torchscript.
130131

0 commit comments

Comments
 (0)