@@ -32,7 +32,7 @@ K-means method instead of the original samples::
32
32
[(0, 64), (1, 262), (2, 4674)]
33
33
>>> from imblearn.under_sampling import ClusterCentroids
34
34
>>> cc = ClusterCentroids(random_state=0)
35
- >>> X_resampled, y_resampled = cc.fit_sample (X, y)
35
+ >>> X_resampled, y_resampled = cc.fit_resample (X, y)
36
36
>>> print(sorted(Counter(y_resampled).items()))
37
37
[(0, 64), (1, 64), (2, 64)]
38
38
@@ -82,7 +82,7 @@ randomly selecting a subset of data for the targeted classes::
82
82
83
83
>>> from imblearn.under_sampling import RandomUnderSampler
84
84
>>> rus = RandomUnderSampler(random_state=0)
85
- >>> X_resampled, y_resampled = rus.fit_sample (X, y)
85
+ >>> X_resampled, y_resampled = rus.fit_resample (X, y)
86
86
>>> print(sorted(Counter(y_resampled).items()))
87
87
[(0, 64), (1, 64), (2, 64)]
88
88
@@ -99,7 +99,7 @@ by considering independently each targeted class::
99
99
>>> print(np.vstack({tuple(row) for row in X_resampled}).shape)
100
100
(192, 2)
101
101
>>> rus = RandomUnderSampler(random_state=0, replacement=True)
102
- >>> X_resampled, y_resampled = rus.fit_sample (X, y)
102
+ >>> X_resampled, y_resampled = rus.fit_resample (X, y)
103
103
>>> print(np.vstack({tuple(row) for row in X_resampled}).shape)
104
104
(181, 2)
105
105
@@ -109,7 +109,7 @@ In addition, :class:`RandomUnderSampler` allows to sample heterogeneous data
109
109
>>> X_hetero = np.array([['xxx', 1, 1.0], ['yyy', 2, 2.0], ['zzz', 3, 3.0]],
110
110
... dtype=np.object)
111
111
>>> y_hetero = np.array([0, 0, 1])
112
- >>> X_resampled, y_resampled = rus.fit_sample (X_hetero, y_hetero)
112
+ >>> X_resampled, y_resampled = rus.fit_resample (X_hetero, y_hetero)
113
113
>>> print(X_resampled)
114
114
[['xxx' 1 1.0]
115
115
['zzz' 3 3.0]]
@@ -126,7 +126,7 @@ be selected with the parameter ``version``::
126
126
127
127
>>> from imblearn.under_sampling import NearMiss
128
128
>>> nm1 = NearMiss(version=1)
129
- >>> X_resampled_nm1, y_resampled = nm1.fit_sample (X, y)
129
+ >>> X_resampled_nm1, y_resampled = nm1.fit_resample (X, y)
130
130
>>> print(sorted(Counter(y_resampled).items()))
131
131
[(0, 64), (1, 64), (2, 64)]
132
132
@@ -261,7 +261,7 @@ the sample inspected to keep it in the dataset::
261
261
[(0, 64), (1, 262), (2, 4674)]
262
262
>>> from imblearn.under_sampling import EditedNearestNeighbours
263
263
>>> enn = EditedNearestNeighbours()
264
- >>> X_resampled, y_resampled = enn.fit_sample (X, y)
264
+ >>> X_resampled, y_resampled = enn.fit_resample (X, y)
265
265
>>> print(sorted(Counter(y_resampled).items()))
266
266
[(0, 64), (1, 213), (2, 4568)]
267
267
@@ -275,7 +275,7 @@ Generally, repeating the algorithm will delete more data::
275
275
276
276
>>> from imblearn.under_sampling import RepeatedEditedNearestNeighbours
277
277
>>> renn = RepeatedEditedNearestNeighbours()
278
- >>> X_resampled, y_resampled = renn.fit_sample (X, y)
278
+ >>> X_resampled, y_resampled = renn.fit_resample (X, y)
279
279
>>> print(sorted(Counter(y_resampled).items()))
280
280
[(0, 64), (1, 208), (2, 4551)]
281
281
@@ -285,7 +285,7 @@ internal nearest neighbors algorithm is increased at each iteration::
285
285
286
286
>>> from imblearn.under_sampling import AllKNN
287
287
>>> allknn = AllKNN()
288
- >>> X_resampled, y_resampled = allknn.fit_sample (X, y)
288
+ >>> X_resampled, y_resampled = allknn.fit_resample (X, y)
289
289
>>> print(sorted(Counter(y_resampled).items()))
290
290
[(0, 64), (1, 220), (2, 4601)]
291
291
@@ -323,7 +323,7 @@ The :class:`CondensedNearestNeighbour` can be used in the following manner::
323
323
324
324
>>> from imblearn.under_sampling import CondensedNearestNeighbour
325
325
>>> cnn = CondensedNearestNeighbour(random_state=0)
326
- >>> X_resampled, y_resampled = cnn.fit_sample (X, y)
326
+ >>> X_resampled, y_resampled = cnn.fit_resample (X, y)
327
327
>>> print(sorted(Counter(y_resampled).items()))
328
328
[(0, 64), (1, 24), (2, 115)]
329
329
@@ -338,7 +338,7 @@ used as::
338
338
339
339
>>> from imblearn.under_sampling import OneSidedSelection
340
340
>>> oss = OneSidedSelection(random_state=0)
341
- >>> X_resampled, y_resampled = oss.fit_sample (X, y)
341
+ >>> X_resampled, y_resampled = oss.fit_resample (X, y)
342
342
>>> print(sorted(Counter(y_resampled).items()))
343
343
[(0, 64), (1, 174), (2, 4403)]
344
344
@@ -352,7 +352,7 @@ neighbors classifier. The class can be used as::
352
352
353
353
>>> from imblearn.under_sampling import NeighbourhoodCleaningRule
354
354
>>> ncr = NeighbourhoodCleaningRule()
355
- >>> X_resampled, y_resampled = ncr.fit_sample (X, y)
355
+ >>> X_resampled, y_resampled = ncr.fit_resample (X, y)
356
356
>>> print(sorted(Counter(y_resampled).items()))
357
357
[(0, 64), (1, 234), (2, 4666)]
358
358
@@ -380,7 +380,7 @@ removed. The class can be used as::
380
380
>>> from imblearn.under_sampling import InstanceHardnessThreshold
381
381
>>> iht = InstanceHardnessThreshold(random_state=0,
382
382
... estimator=LogisticRegression())
383
- >>> X_resampled, y_resampled = iht.fit_sample (X, y)
383
+ >>> X_resampled, y_resampled = iht.fit_resample (X, y)
384
384
>>> print(sorted(Counter(y_resampled).items()))
385
385
[(0, 64), (1, 64), (2, 64)]
386
386
0 commit comments