Skip to content

Commit 351ace7

Browse files
authored
STY Uses black's with target_version >= 3.7 (scikit-learn#20294)
1 parent e8f58cd commit 351ace7

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

79 files changed

+164
-164
lines changed

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,7 @@ def setup_package():
280280
python_requires=">=3.7",
281281
install_requires=min_deps.tag_to_packages["install"],
282282
package_data={"": ["*.pxd"]},
283-
**extra_setuptools_args
283+
**extra_setuptools_args,
284284
)
285285

286286
commands = [arg for arg in sys.argv[1:] if not arg.startswith("-")]

sklearn/cluster/_affinity_propagation.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def affinity_propagation(
4242
copy=True,
4343
verbose=False,
4444
return_n_iter=False,
45-
random_state=None
45+
random_state=None,
4646
):
4747
"""Perform Affinity Propagation Clustering of data.
4848
@@ -385,7 +385,7 @@ def __init__(
385385
preference=None,
386386
affinity="euclidean",
387387
verbose=False,
388-
random_state=None
388+
random_state=None,
389389
):
390390

391391
self.damping = damping

sklearn/cluster/_agglomerative.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -853,7 +853,7 @@ def __init__(
853853
compute_full_tree="auto",
854854
linkage="ward",
855855
distance_threshold=None,
856-
compute_distances=False
856+
compute_distances=False,
857857
):
858858
self.n_clusters = n_clusters
859859
self.distance_threshold = distance_threshold
@@ -953,7 +953,7 @@ def fit(self, X, y=None):
953953
connectivity=connectivity,
954954
n_clusters=n_clusters,
955955
return_distance=return_distance,
956-
**kwargs
956+
**kwargs,
957957
)
958958
(self.children_, self.n_connected_components_, self.n_leaves_, parents) = out[
959959
:4
@@ -1141,7 +1141,7 @@ def __init__(
11411141
linkage="ward",
11421142
pooling_func=np.mean,
11431143
distance_threshold=None,
1144-
compute_distances=False
1144+
compute_distances=False,
11451145
):
11461146
super().__init__(
11471147
n_clusters=n_clusters,

sklearn/cluster/_bicluster.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,7 @@ def __init__(
309309
mini_batch=False,
310310
init="k-means++",
311311
n_init=10,
312-
random_state=None
312+
random_state=None,
313313
):
314314
super().__init__(
315315
n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state
@@ -461,7 +461,7 @@ def __init__(
461461
mini_batch=False,
462462
init="k-means++",
463463
n_init=10,
464-
random_state=None
464+
random_state=None,
465465
):
466466
super().__init__(
467467
n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state

sklearn/cluster/_birch.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -463,7 +463,7 @@ def __init__(
463463
branching_factor=50,
464464
n_clusters=3,
465465
compute_labels=True,
466-
copy=True
466+
copy=True,
467467
):
468468
self.threshold = threshold
469469
self.branching_factor = branching_factor

sklearn/cluster/_dbscan.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def dbscan(
3131
leaf_size=30,
3232
p=2,
3333
sample_weight=None,
34-
n_jobs=None
34+
n_jobs=None,
3535
):
3636
"""Perform DBSCAN clustering from vector array or distance matrix.
3737
@@ -301,7 +301,7 @@ def __init__(
301301
algorithm="auto",
302302
leaf_size=30,
303303
p=None,
304-
n_jobs=None
304+
n_jobs=None,
305305
):
306306
self.eps = eps
307307
self.min_samples = min_samples

sklearn/cluster/_mean_shift.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def mean_shift(
116116
min_bin_freq=1,
117117
cluster_all=True,
118118
max_iter=300,
119-
n_jobs=None
119+
n_jobs=None,
120120
):
121121
"""Perform mean shift clustering of data using a flat kernel.
122122
@@ -380,7 +380,7 @@ def __init__(
380380
min_bin_freq=1,
381381
cluster_all=True,
382382
n_jobs=None,
383-
max_iter=300
383+
max_iter=300,
384384
):
385385
self.bandwidth = bandwidth
386386
self.seeds = seeds

sklearn/compose/_target.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ def __init__(
122122
transformer=None,
123123
func=None,
124124
inverse_func=None,
125-
check_inverse=True
125+
check_inverse=True,
126126
):
127127
self.regressor = regressor
128128
self.transformer = transformer

sklearn/covariance/_elliptic_envelope.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ def __init__(
132132
assume_centered=False,
133133
support_fraction=None,
134134
contamination=0.1,
135-
random_state=None
135+
random_state=None,
136136
):
137137
super().__init__(
138138
store_precision=store_precision,

sklearn/covariance/_graph_lasso.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def graphical_lasso(
8787
verbose=False,
8888
return_costs=False,
8989
eps=np.finfo(np.float64).eps,
90-
return_n_iter=False
90+
return_n_iter=False,
9191
):
9292
"""l1-penalized covariance estimator
9393
@@ -408,7 +408,7 @@ def __init__(
408408
enet_tol=1e-4,
409409
max_iter=100,
410410
verbose=False,
411-
assume_centered=False
411+
assume_centered=False,
412412
):
413413
super().__init__(assume_centered=assume_centered)
414414
self.alpha = alpha
@@ -758,7 +758,7 @@ def __init__(
758758
mode="cd",
759759
n_jobs=None,
760760
verbose=False,
761-
assume_centered=False
761+
assume_centered=False,
762762
):
763763
super().__init__(
764764
mode=mode,

sklearn/covariance/_robust_covariance.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -687,7 +687,7 @@ def __init__(
687687
store_precision=True,
688688
assume_centered=False,
689689
support_fraction=None,
690-
random_state=None
690+
random_state=None,
691691
):
692692
self.store_precision = store_precision
693693
self.assume_centered = assume_centered

sklearn/datasets/_base.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def load_files(
9393
shuffle=True,
9494
encoding=None,
9595
decode_error="strict",
96-
random_state=0
96+
random_state=0,
9797
):
9898
"""Load text files with categories as subfolder names.
9999

sklearn/datasets/_lfw.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ def fetch_lfw_people(
232232
color=False,
233233
slice_=(slice(70, 195), slice(78, 172)),
234234
download_if_missing=True,
235-
return_X_y=False
235+
return_X_y=False,
236236
):
237237
"""Load the Labeled Faces in the Wild (LFW) people dataset \
238238
(classification).
@@ -413,7 +413,7 @@ def fetch_lfw_pairs(
413413
resize=0.5,
414414
color=False,
415415
slice_=(slice(70, 195), slice(78, 172)),
416-
download_if_missing=True
416+
download_if_missing=True,
417417
):
418418
"""Load the Labeled Faces in the Wild (LFW) pairs dataset (classification).
419419

sklearn/datasets/_olivetti_faces.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def fetch_olivetti_faces(
4141
shuffle=False,
4242
random_state=0,
4343
download_if_missing=True,
44-
return_X_y=False
44+
return_X_y=False,
4545
):
4646
"""Load the Olivetti faces data-set from AT&T (classification).
4747

sklearn/datasets/_openml.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -551,7 +551,7 @@ def _download_data_to_bunch(
551551
data_columns: List[int],
552552
target_columns: List,
553553
shape: Optional[Tuple[int, int]],
554-
md5_checksum: str
554+
md5_checksum: str,
555555
):
556556
"""Download OpenML ARFF and convert to Bunch of data"""
557557
# NB: this function is long in order to handle retry for any failure
@@ -724,7 +724,7 @@ def fetch_openml(
724724
target_column: Optional[Union[str, List]] = "default-target",
725725
cache: bool = True,
726726
return_X_y: bool = False,
727-
as_frame: Union[str, bool] = "auto"
727+
as_frame: Union[str, bool] = "auto",
728728
):
729729
"""Fetch dataset from openml by name or dataset id.
730730

sklearn/datasets/_rcv1.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def fetch_rcv1(
9292
download_if_missing=True,
9393
random_state=None,
9494
shuffle=False,
95-
return_X_y=False
95+
return_X_y=False,
9696
):
9797
"""Load the RCV1 multilabel dataset (classification).
9898

sklearn/datasets/_svmlight_format_io.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def load_svmlight_file(
4848
zero_based="auto",
4949
query_id=False,
5050
offset=0,
51-
length=-1
51+
length=-1,
5252
):
5353
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
5454
@@ -225,7 +225,7 @@ def load_svmlight_files(
225225
zero_based="auto",
226226
query_id=False,
227227
offset=0,
228-
length=-1
228+
length=-1,
229229
):
230230
"""Load dataset from multiple files in SVMlight format
231231

sklearn/decomposition/_factor_analysis.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def __init__(
164164
svd_method="randomized",
165165
iterated_power=3,
166166
rotation=None,
167-
random_state=0
167+
random_state=0,
168168
):
169169
self.n_components = n_components
170170
self.copy = copy

sklearn/decomposition/_fastica.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ def fastica(
161161
random_state=None,
162162
return_X_mean=False,
163163
compute_sources=True,
164-
return_n_iter=False
164+
return_n_iter=False,
165165
):
166166
"""Perform Fast Independent Component Analysis.
167167
@@ -426,7 +426,7 @@ def __init__(
426426
max_iter=200,
427427
tol=1e-4,
428428
w_init=None,
429-
random_state=None
429+
random_state=None,
430430
):
431431
super().__init__()
432432
if max_iter < 1:

sklearn/decomposition/_lda.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,7 @@ def __init__(
325325
max_doc_update_iter=100,
326326
n_jobs=None,
327327
verbose=0,
328-
random_state=None
328+
random_state=None,
329329
):
330330
self.n_components = n_components
331331
self.doc_topic_prior = doc_topic_prior

sklearn/decomposition/_nmf.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -918,7 +918,7 @@ def non_negative_factorization(
918918
regularization=None,
919919
random_state=None,
920920
verbose=0,
921-
shuffle=False
921+
shuffle=False,
922922
):
923923
"""Compute Non-negative Matrix Factorization (NMF).
924924
@@ -1292,7 +1292,7 @@ def __init__(
12921292
l1_ratio=0.0,
12931293
verbose=0,
12941294
shuffle=False,
1295-
regularization="both"
1295+
regularization="both",
12961296
):
12971297
self.n_components = n_components
12981298
self.init = init

sklearn/decomposition/_pca.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,7 @@ def __init__(
345345
svd_solver="auto",
346346
tol=0.0,
347347
iterated_power="auto",
348-
random_state=None
348+
random_state=None,
349349
):
350350
self.n_components = n_components
351351
self.copy = copy

sklearn/decomposition/_sparse_pca.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ def __init__(
129129
U_init=None,
130130
V_init=None,
131131
verbose=False,
132-
random_state=None
132+
random_state=None,
133133
):
134134
self.n_components = n_components
135135
self.alpha = alpha
@@ -342,7 +342,7 @@ def __init__(
342342
shuffle=True,
343343
n_jobs=None,
344344
method="lars",
345-
random_state=None
345+
random_state=None,
346346
):
347347
super().__init__(
348348
n_components=n_components,

sklearn/decomposition/_truncated_svd.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def __init__(
134134
algorithm="randomized",
135135
n_iter=5,
136136
random_state=None,
137-
tol=0.0
137+
tol=0.0,
138138
):
139139
self.algorithm = algorithm
140140
self.n_components = n_components

sklearn/ensemble/_bagging.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ def __init__(
220220
warm_start=False,
221221
n_jobs=None,
222222
random_state=None,
223-
verbose=0
223+
verbose=0,
224224
):
225225
super().__init__(base_estimator=base_estimator, n_estimators=n_estimators)
226226

@@ -648,7 +648,7 @@ def __init__(
648648
warm_start=False,
649649
n_jobs=None,
650650
random_state=None,
651-
verbose=0
651+
verbose=0,
652652
):
653653

654654
super().__init__(
@@ -1062,7 +1062,7 @@ def __init__(
10621062
warm_start=False,
10631063
n_jobs=None,
10641064
random_state=None,
1065-
verbose=0
1065+
verbose=0,
10661066
):
10671067
super().__init__(
10681068
base_estimator,

sklearn/ensemble/_gb.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ def __init__(
162162
warm_start=False,
163163
validation_fraction=0.1,
164164
n_iter_no_change=None,
165-
tol=1e-4
165+
tol=1e-4,
166166
):
167167

168168
self.n_estimators = n_estimators
@@ -1227,7 +1227,7 @@ def __init__(
12271227
validation_fraction=0.1,
12281228
n_iter_no_change=None,
12291229
tol=1e-4,
1230-
ccp_alpha=0.0
1230+
ccp_alpha=0.0,
12311231
):
12321232

12331233
super().__init__(
@@ -1800,7 +1800,7 @@ def __init__(
18001800
validation_fraction=0.1,
18011801
n_iter_no_change=None,
18021802
tol=1e-4,
1803-
ccp_alpha=0.0
1803+
ccp_alpha=0.0,
18041804
):
18051805

18061806
super().__init__(

0 commit comments

Comments
 (0)