Skip to content

Commit 292af3c

Browse files
Skylion007pytorchmergebot
authored andcommitted
[BE][Ez]: ISC001 Auto concatenate implicit one line strings (pytorch#146408)
Apply ruff rule about implicit string concatenation, this autofixes strings that are all the same type and on the same line. These lines are broken up likely as the result of autoformatters in the past. All fixes are automated using the autofixes in ISC001. Pull Request resolved: pytorch#146408 Approved by: https://github.com/justinchuby, https://github.com/janeyx99
1 parent f38a2ea commit 292af3c

36 files changed

+56
-64
lines changed

functorch/examples/dp_cifar10/cifar10_opacus.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def test(args, model, test_loader, device):
119119

120120
top1_avg = np.mean(top1_acc)
121121

122-
print(f"\tTest set:" f"Loss: {np.mean(losses):.6f} " f"Acc@1: {top1_avg :.6f} ")
122+
print(f"\tTest set:Loss: {np.mean(losses):.6f} Acc@1: {top1_avg :.6f} ")
123123
return np.mean(top1_acc)
124124

125125

functorch/examples/dp_cifar10/cifar10_transforms.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ def test(args, model, test_loader, device):
185185

186186
top1_avg = np.mean(top1_acc)
187187

188-
print(f"\tTest set:" f"Loss: {np.mean(losses):.6f} " f"Acc@1: {top1_avg :.6f} ")
188+
print(f"\tTest set:Loss: {np.mean(losses):.6f} Acc@1: {top1_avg :.6f} ")
189189
return np.mean(top1_acc)
190190

191191

test/distributed/fsdp/test_fsdp_grad_acc.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ class _GradAccConfig:
6161
def __repr__(self) -> str:
6262
# Override to remove any spaces in the string to appease the internal
6363
# build's test name parser
64-
return f"(use_no_sync={self.use_no_sync}," f"num_iters={self.num_iters})"
64+
return f"(use_no_sync={self.use_no_sync},num_iters={self.num_iters})"
6565

6666

6767
@dataclass

test/jit/test_exception.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def close_match(x):
3636

3737
with self.assertRaisesRegex(
3838
RuntimeError,
39-
"This op may not exist or may not be currently " "supported in TorchScript",
39+
"This op may not exist or may not be currently supported in TorchScript",
4040
):
4141

4242
@torch.jit.script

test/jit/test_tracer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1702,7 +1702,7 @@ def test_index_put(target, indices, rhs):
17021702
def test_trace_checker_dot_data(self):
17031703
with self.assertRaisesRegex(
17041704
torch.jit.TracingCheckError,
1705-
r"Tensor-valued Constant nodes differed in value " r"across invocations",
1705+
r"Tensor-valued Constant nodes differed in value across invocations",
17061706
):
17071707

17081708
@_trace(torch.rand(3, 4), check_inputs=[(torch.rand(3, 4),)])

test/jit/test_union.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -754,7 +754,7 @@ def fn():
754754
template,
755755
"Union[List[str], List[torch.Tensor]]",
756756
lhs["list_literal_of_mixed"],
757-
"none of those types match the types of the" " given list elements",
757+
"none of those types match the types of the given list elements",
758758
)
759759

760760
self._assert_passes(
@@ -784,21 +784,21 @@ def fn():
784784
template,
785785
"Union[int, torch.Tensor]",
786786
lhs["list_literal_empty"],
787-
"Expected an Union type annotation with an " "inner List type",
787+
"Expected an Union type annotation with an inner List type",
788788
)
789789

790790
self._assert_raises(
791791
template,
792792
"Union[int, torch.Tensor]",
793793
lhs["list_literal_of_tensor"],
794-
"Expected an Union type annotation with an " "inner List type",
794+
"Expected an Union type annotation with an inner List type",
795795
)
796796

797797
self._assert_raises(
798798
template,
799799
"Union[int, torch.Tensor]",
800800
lhs["list_comprehension_of_tensor"],
801-
"Expected an Union type annotation with an " "inner List type",
801+
"Expected an Union type annotation with an inner List type",
802802
)
803803

804804
"""
@@ -890,7 +890,7 @@ def fn():
890890
template,
891891
"Union[List[str], List[torch.Tensor]]",
892892
lhs["dict_literal_empty"],
893-
"Expected an Union type annotation with an " "inner Dict type",
893+
"Expected an Union type annotation with an inner Dict type",
894894
)
895895

896896
self._assert_passes(
@@ -974,14 +974,14 @@ def fn():
974974
template,
975975
"Union[int, torch.Tensor]",
976976
lhs["dict_literal_empty"],
977-
"Expected an Union type annotation with " "an inner Dict type",
977+
"Expected an Union type annotation with an inner Dict type",
978978
)
979979

980980
self._assert_raises(
981981
template,
982982
"Union[int, torch.Tensor]",
983983
lhs["dict_literal_of_str_tensor"],
984-
"Expected an Union type annotation with " "an inner Dict type",
984+
"Expected an Union type annotation with an inner Dict type",
985985
)
986986

987987
# See above--string frontend does not support tuple unpacking

test/jit/test_union_pep604.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -762,7 +762,7 @@ def fn():
762762
template,
763763
"List[str] | List[torch.Tensor]",
764764
lhs["list_literal_of_mixed"],
765-
"none of those types match the types of the" " given list elements",
765+
"none of those types match the types of the given list elements",
766766
)
767767

768768
self._assert_passes(
@@ -790,21 +790,21 @@ def fn():
790790
template,
791791
"int | torch.Tensor",
792792
lhs["list_literal_empty"],
793-
"Expected an Union type annotation with an " "inner List type",
793+
"Expected an Union type annotation with an inner List type",
794794
)
795795

796796
self._assert_raises(
797797
template,
798798
"int | torch.Tensor",
799799
lhs["list_literal_of_tensor"],
800-
"Expected an Union type annotation with an " "inner List type",
800+
"Expected an Union type annotation with an inner List type",
801801
)
802802

803803
self._assert_raises(
804804
template,
805805
"int | torch.Tensor",
806806
lhs["list_comprehension_of_tensor"],
807-
"Expected an Union type annotation with an " "inner List type",
807+
"Expected an Union type annotation with an inner List type",
808808
)
809809

810810
"""
@@ -894,7 +894,7 @@ def fn():
894894
template,
895895
"List[str] | List[torch.Tensor]",
896896
lhs["dict_literal_empty"],
897-
"Expected an Union type annotation with an " "inner Dict type",
897+
"Expected an Union type annotation with an inner Dict type",
898898
)
899899

900900
self._assert_passes(
@@ -978,14 +978,14 @@ def fn():
978978
template,
979979
"int | torch.Tensor",
980980
lhs["dict_literal_empty"],
981-
"Expected an Union type annotation with " "an inner Dict type",
981+
"Expected an Union type annotation with an inner Dict type",
982982
)
983983

984984
self._assert_raises(
985985
template,
986986
"int | torch.Tensor",
987987
lhs["dict_literal_of_str_tensor"],
988-
"Expected an Union type annotation with " "an inner Dict type",
988+
"Expected an Union type annotation with an inner Dict type",
989989
)
990990

991991
# See above--string frontend does not support tuple unpacking

test/onnx/test_verification.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ def test_preserve_mismatch_source_location(self):
253253
leaf_info.pretty_print_mismatch(graph=True)
254254
self.assertRegex(
255255
f.getvalue(),
256-
r"(.|\n)*" r"aten::relu.*/torch/nn/functional.py:[0-9]+(.|\n)*",
256+
r"(.|\n)*aten::relu.*/torch/nn/functional.py:[0-9]+(.|\n)*",
257257
)
258258

259259
def test_find_all_mismatch_operators(self):

test/test_autograd.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -8091,7 +8091,7 @@ def backward(ctx, grad):
80918091
view_a = a.unbind()[0]
80928092
with self.assertRaisesRegex(
80938093
RuntimeError,
8094-
"This view is the output of a function that returns " "multiple views.",
8094+
"This view is the output of a function that returns multiple views.",
80958095
):
80968096
view_a.copy_(b)
80978097

test/test_xnnpack_integration.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323

2424
@unittest.skipUnless(
2525
torch.backends.xnnpack.enabled,
26-
" XNNPACK must be enabled for these tests." " Please build with USE_XNNPACK=1.",
26+
" XNNPACK must be enabled for these tests. Please build with USE_XNNPACK=1.",
2727
)
2828
@unittest.skipIf(
2929
TEST_WITH_TSAN,
@@ -231,7 +231,7 @@ def test_conv2d_transpose(
231231

232232
@unittest.skipUnless(
233233
torch.backends.xnnpack.enabled,
234-
" XNNPACK must be enabled for these tests." " Please build with USE_XNNPACK=1.",
234+
" XNNPACK must be enabled for these tests. Please build with USE_XNNPACK=1.",
235235
)
236236
@unittest.skipIf(
237237
TEST_WITH_TSAN,
@@ -753,7 +753,7 @@ def forward(self, x):
753753

754754
@unittest.skipUnless(
755755
torch.backends.xnnpack.enabled,
756-
" XNNPACK must be enabled for these tests." " Please build with USE_XNNPACK=1.",
756+
" XNNPACK must be enabled for these tests. Please build with USE_XNNPACK=1.",
757757
)
758758
@unittest.skipIf(
759759
TEST_WITH_TSAN,
@@ -1241,7 +1241,7 @@ def forward(self, x):
12411241

12421242
@unittest.skipUnless(
12431243
torch.backends.xnnpack.enabled,
1244-
" XNNPACK must be enabled for these tests." " Please build with USE_XNNPACK=1.",
1244+
" XNNPACK must be enabled for these tests. Please build with USE_XNNPACK=1.",
12451245
)
12461246
@unittest.skipIf(
12471247
TEST_WITH_TSAN,

test/torch_np/numpy_tests/core/test_numeric.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -3029,7 +3029,7 @@ def test_broadcast_error_kwargs(self):
30293029
def test_shape_mismatch_error_message(self):
30303030
with assert_raises(
30313031
ValueError,
3032-
match=r"arg 0 with shape \(1, 3\) and " r"arg 2 with shape \(2,\)",
3032+
match=r"arg 0 with shape \(1, 3\) and arg 2 with shape \(2,\)",
30333033
):
30343034
np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7])
30353035

test/torch_np/numpy_tests/core/test_scalarmath.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ def test_mixed_types(self):
250250
a = t1(3)
251251
b = t2(2)
252252
result = a**b
253-
msg = f"error with {t1!r} and {t2!r}:" f"got {result!r}, expected {9!r}"
253+
msg = f"error with {t1!r} and {t2!r}:got {result!r}, expected {9!r}"
254254
if np.issubdtype(np.dtype(result), np.integer):
255255
assert_(result == 9, msg)
256256
else:

tools/packaging/build_wheel.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ def parse_args() -> argparse.Namespace:
105105
"--destination",
106106
default="dist/",
107107
type=str,
108-
help=("Destination to put the compailed binaries" ""),
108+
help=("Destination to put the compailed binaries"),
109109
)
110110
return parser.parse_args()
111111

torch/_dynamo/variables/higher_order_ops.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1482,7 +1482,7 @@ def non_single_tensor_return_unsupported(api, ret):
14821482

14831483
if not isinstance(ret, TensorVariable):
14841484
raise Unsupported(
1485-
f"{api} over function that returns something " f"other than one Tensor"
1485+
f"{api} over function that returns something other than one Tensor"
14861486
)
14871487

14881488

torch/_functorch/eager_transforms.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -73,9 +73,7 @@ def create_differentiable(x):
7373
if isinstance(x, torch.Tensor):
7474
with enable_inplace_requires_grad(True):
7575
return _set_tensor_requires_grad(x)
76-
raise ValueError(
77-
f"Thing passed to transform API must be Tensor, " f"got {type(x)}"
78-
)
76+
raise ValueError(f"Thing passed to transform API must be Tensor, got {type(x)}")
7977

8078
return tree_map(create_differentiable, inps)
8179

@@ -954,7 +952,7 @@ def assert_non_empty_list_of_tensors(
954952
if isinstance(out, torch.Tensor):
955953
continue
956954
raise RuntimeError(
957-
f"{api}: Expected {argname} to only contain Tensors, got " f"{type(out)}"
955+
f"{api}: Expected {argname} to only contain Tensors, got {type(out)}"
958956
)
959957

960958

torch/_functorch/partitioners.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -885,7 +885,7 @@ def is_fusible(a, b):
885885
import networkx as nx
886886
except ImportError as e:
887887
raise RuntimeError(
888-
"Need networkx installed to perform smart recomputation " "heuristics"
888+
"Need networkx installed to perform smart recomputation heuristics"
889889
) from e
890890

891891
def is_materialized_backwards(node):

torch/_library/infer_schema.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,7 @@ def infer_schema(
6666
sig = inspect.signature(prototype_function)
6767

6868
def error_fn(what):
69-
raise ValueError(
70-
f"infer_schema(func): {what} " f"Got func with signature {sig})"
71-
)
69+
raise ValueError(f"infer_schema(func): {what} Got func with signature {sig})")
7270

7371
def convert_type_string(annotation_type: str):
7472
try:

torch/_tensor_str.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -689,9 +689,7 @@ def _functorch_wrapper_str_intern(tensor, *, tensor_contents=None):
689689
f")"
690690
)
691691
if torch._C._functorch.is_gradtrackingtensor(tensor):
692-
return (
693-
f"GradTrackingTensor(lvl={level}, value=\n" f"{indented_value_repr}\n" f")"
694-
)
692+
return f"GradTrackingTensor(lvl={level}, value=\n{indented_value_repr}\n)"
695693
if torch._C._functorch.is_functionaltensor(tensor):
696694
return f"FunctionalTensor(lvl={level}, value=\\\n{value_repr})"
697695

torch/distributed/_shard/sharded_tensor/utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ def _validate_output_tensor_for_gather(
6969
)
7070
elif dst_tensor:
7171
raise ValueError(
72-
"Argument ``dst_tensor`` must NOT be specified " "on non-destination ranks."
72+
"Argument ``dst_tensor`` must NOT be specified on non-destination ranks."
7373
)
7474

7575

torch/distributed/elastic/multiprocessing/errors/error_handler.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ def dump_error_file(self, rootcause_error_file: str, error_code: int = 0):
117117
rootcause_error_file, rootcause_error, error_code
118118
)
119119
logger.debug(
120-
"child error file (%s) contents:\n" "%s",
120+
"child error file (%s) contents:\n%s",
121121
rootcause_error_file,
122122
json.dumps(rootcause_error, indent=2),
123123
)

torch/distributed/elastic/timer/api.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ def _run_watchdog(self):
194194
reaped_worker_ids = set()
195195
for worker_id, expired_timers in self.get_expired_timers(now).items():
196196
logger.info(
197-
"Reaping worker_id=[%s]." " Expired timers: %s",
197+
"Reaping worker_id=[%s]. Expired timers: %s",
198198
worker_id,
199199
self._get_scopes(expired_timers),
200200
)
@@ -212,7 +212,7 @@ def _get_scopes(self, timer_requests):
212212

213213
def start(self) -> None:
214214
logger.info(
215-
"Starting %s..." " max_interval=%s," " daemon=%s",
215+
"Starting %s... max_interval=%s, daemon=%s",
216216
type(self).__name__,
217217
self._max_interval,
218218
self._daemon,

torch/distributed/optim/utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def as_functional_optim(optim_cls: type, *args, **kwargs):
5050
functional_cls = functional_optim_map[optim_cls]
5151
except KeyError as e:
5252
raise ValueError(
53-
f"Optimizer {optim_cls} does not have a functional " f"counterpart!"
53+
f"Optimizer {optim_cls} does not have a functional counterpart!"
5454
) from e
5555

5656
return _create_functional_optim(functional_cls, *args, **kwargs)

torch/distributed/optim/zero_redundancy_optimizer.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1517,14 +1517,14 @@ def _init_local_optimizer(self) -> None:
15171517
self._bucket_assignments_per_rank[self.global_rank]
15181518
)
15191519
logger.info(
1520-
"rank %s with %s parameters " "across %s buckets",
1520+
"rank %s with %s parameters across %s buckets",
15211521
self.global_rank,
15221522
local_numel,
15231523
num_assigned_buckets,
15241524
)
15251525
if self.global_rank == 0:
15261526
logger.info(
1527-
"%s DDP " "buckets and " "%s bucket " "assignments",
1527+
"%s DDP buckets and %s bucket assignments",
15281528
len(self._overlap_info.params_per_bucket),
15291529
self._overlap_info.num_bucket_assignments,
15301530
)

torch/distributed/rpc/backend_registry.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -188,9 +188,7 @@ def _validate_device_maps(
188188
for node in all_names:
189189
devices = all_devices[node]
190190
if len(set(devices)) != len(devices):
191-
raise ValueError(
192-
f"Node {node} has duplicated devices\n" f"devices = {devices}"
193-
)
191+
raise ValueError(f"Node {node} has duplicated devices\ndevices = {devices}")
194192
if not _tensorpipe_validate_devices(devices, all_device_counts[node]):
195193
raise ValueError(
196194
f"Node {node} has devices with invalid indices\n"

torch/distributed/tensor/_sharding_prop.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -421,7 +421,7 @@ def spec_to_strategy(spec: object) -> object:
421421
raise e
422422
except Exception as e:
423423
raise RuntimeError(
424-
f"Sharding propagation failed on op {op_schema}.\n" f"Error: {e}"
424+
f"Sharding propagation failed on op {op_schema}.\nError: {e}"
425425
) from e
426426

427427
# step 2. if can't get output_spec from sharding

torch/fx/experimental/symbolic_shapes.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -6397,7 +6397,7 @@ def _get_stack_summary(
63976397
maybe_extra_debug += "\nC++ stack trace:\n" + "".join(cpp_stack.format())
63986398
elif is_debug:
63996399
maybe_extra_debug += (
6400-
"\nFor C++ stack trace, run with " "TORCHDYNAMO_EXTENDED_DEBUG_CPP=1"
6400+
"\nFor C++ stack trace, run with TORCHDYNAMO_EXTENDED_DEBUG_CPP=1"
64016401
)
64026402

64036403
return SLoc(floc, maybe_user_loc), maybe_extra_debug

0 commit comments

Comments
 (0)