diff --git a/.gitignore b/.gitignore index 250c89a..1e83d4e 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ dist/ .cache/ .idea/ .pytest_cache/ +.DS_Store diff --git a/README.rst b/README.rst index 630329b..ec544d7 100644 --- a/README.rst +++ b/README.rst @@ -9,8 +9,6 @@ Sklearn-pandas .. image:: https://anaconda.org/conda-forge/sklearn-pandas/badges/version.svg :target: https://anaconda.org/conda-forge/sklearn-pandas/ -.. highlight:: python - This module provides a bridge between `Scikit-Learn `__'s machine learning methods and `pandas `__-style Data Frames. In particular, it provides a way to map ``DataFrame`` columns to transformations, which are later recombined into features. @@ -91,7 +89,7 @@ Let's see an example:: The difference between specifying the column selector as ``'column'`` (as a simple string) and ``['column']`` (as a list with one element) is the shape of the array that is passed to the transformer. In the first case, a one dimensional array will be passed, while in the second case it will be a 2-dimensional array with one column, i.e. a column vector. -This behaviour mimics the same pattern as pandas' dataframes ``__getitem__`` indexing:: +This behaviour mimics the same pattern as pandas' dataframes ``__getitem__`` indexing: >>> data['children'].shape (8,) @@ -166,9 +164,8 @@ Alternatively, you can also specify prefix and/or suffix to add to the column na Dynamic Columns *********************** -In some situations the columns are not known before hand and we would like to dynamically select them during the fit operation. As shown below, in such situations you can provide either a custom callable or use `make_column_selector `__. +In some situations the columns are not known before hand and we would like to dynamically select them during the fit operation. As shown below, in such situations you can provide either a custom callable or use `make_column_selector `__. -:: >>> class GetColumnsStartingWith: ... def __init__(self, start_str): @@ -197,7 +194,7 @@ In some situations the columns are not known before hand and we would like to dy >>> t.fit(df).transform(df).shape (3, 6) >>> t.transformed_names_ - ['x_0', 'x_1', 'x_2', 'x_3', 'petal_0', 'petal_1'] + ['x_sepal length (cm)', 'x_sepal width (cm)', 'x_petal length (cm)', 'x_petal width (cm)', 'petal_0', 'petal_1'] @@ -276,14 +273,14 @@ Dropping columns explictly Sometimes it is required to drop a specific column/ list of columns. For this purpose, ``drop_cols`` argument for ``DataFrameMapper`` can be used. -Default value is ``None``:: +Default value is ``None`` >>> mapper_df = DataFrameMapper([ ... ('pet', sklearn.preprocessing.LabelBinarizer()), ... (['children'], sklearn.preprocessing.StandardScaler()) ... ], drop_cols=['salary']) -Now running ``fit_transform`` will run transformations on 'pet' and 'children' and drop 'salary' column:: +Now running ``fit_transform`` will run transformations on 'pet' and 'children' and drop 'salary' column: >>> np.round(mapper_df.fit_transform(data.copy()), 1) array([[ 1. , 0. , 0. , 0.2], @@ -358,7 +355,7 @@ Applying a default transformer ****************************** A default transformer can be applied to columns not explicitly selected -passing it as the ``default`` argument to the mapper:: +passing it as the ``default`` argument to the mapper: >>> mapper4 = DataFrameMapper([ ... ('pet', sklearn.preprocessing.LabelBinarizer()), @@ -388,7 +385,7 @@ acceptable by ``DataFrameMapper``. For example, consider a dataset with three categorical columns, 'col1', 'col2', and 'col3', To binarize each of them, one could pass column names and ``LabelBinarizer`` transformer class -into generator, and then use returned definition as ``features`` argument for ``DataFrameMapper``:: +into generator, and then use returned definition as ``features`` argument for ``DataFrameMapper``: >>> from sklearn_pandas import gen_features >>> feature_def = gen_features( @@ -410,7 +407,7 @@ into generator, and then use returned definition as ``features`` argument for `` If it is required to override some of transformer parameters, then a dict with 'class' key and transformer parameters should be provided. For example, consider a dataset with missing values. -Then the following code could be used to override default imputing strategy:: +Then the following code could be used to override default imputing strategy: >>> from sklearn.impute import SimpleImputer >>> import numpy as np @@ -454,8 +451,6 @@ Feature selection and other supervised transformations ``DataFrameMapper`` supports transformers that require both X and y arguments. An example of this is feature selection. Treating the 'pet' column as the target, we will select the column that best predicts it. -:: - >>> from sklearn.feature_selection import SelectKBest, chi2 >>> mapper_fs = DataFrameMapper([(['children','salary'], SelectKBest(chi2, k=1))]) >>> mapper_fs.fit_transform(data[['children','salary']], data['pet']) @@ -472,7 +467,7 @@ Working with sparse features **************************** A ``DataFrameMapper`` will return a dense feature array by default. Setting ``sparse=True`` in the mapper will return -a sparse array whenever any of the extracted features is sparse. Example:: +a sparse array whenever any of the extracted features is sparse. Example: >>> mapper5 = DataFrameMapper([ ... ('pet', CountVectorizer()), @@ -483,36 +478,12 @@ a sparse array whenever any of the extracted features is sparse. Example:: The stacking of the sparse features is done without ever densifying them. -Using ``NumericalTransformer`` -*********************************** - -While you can use ``FunctionTransformation`` to generate arbitrary transformers, it can present serialization issues -when pickling. Use ``NumericalTransformer`` instead, which takes the function name as a string parameter and hence -can be easily serialized. - -:: - - >>> from sklearn_pandas import NumericalTransformer - >>> mapper5 = DataFrameMapper([ - ... ('children', NumericalTransformer('log')), - ... ]) - >>> mapper5.fit_transform(data) - array([[1.38629436], - [1.79175947], - [1.09861229], - [1.09861229], - [0.69314718], - [1.09861229], - [1.60943791], - [1.38629436]]) - Changing Logging level *********************************** You can change log level to info to print time take to fit/transform features. Setting it to higher level will stop printing elapsed time. Below example shows how to change logging level. -:: >>> import logging >>> logging.getLogger('sklearn_pandas').setLevel(logging.INFO) @@ -521,6 +492,12 @@ Below example shows how to change logging level. Changelog --------- +3.0.0 (2022-08-07) +****************** +* Leveraging `get_feature_names_out` to get vectorized feature names. Note that this is a breaking change as some of the generated +features names will be different from those generated in the previous major version. +* Removed support for `NumericalTransformer` + 2.2.0 (2021-05-07) ****************** @@ -677,6 +654,7 @@ The code for ``DataFrameMapper`` is based on code originally written by `Ben Ham Other contributors: * Ariel Rossanigo (@arielrossanigo) +* Antonio Carlos Falcão Petri (@falcaopetri) * Arnau Gil Amat (@arnau126) * Assaf Ben-David (@AssafBenDavid) * Brendan Herger (@bjherger) diff --git a/noxfile.py b/noxfile.py index ee6f1e7..1c1210a 100644 --- a/noxfile.py +++ b/noxfile.py @@ -9,14 +9,16 @@ def lint(session): session.run('flake8', 'sklearn_pandas/', 'tests') @nox.session -@nox.parametrize('numpy', ['1.18.1', '1.19.4', '1.20.1']) -@nox.parametrize('scipy', ['1.5.4', '1.6.0']) -@nox.parametrize('pandas', ['1.1.4', '1.2.2']) -def tests(session, numpy, scipy, pandas): +@nox.parametrize('numpy', ['1.18.1', '1.20.1']) +@nox.parametrize('sklearn', ['1.1.0', '1.1.2']) +@nox.parametrize('scipy', ['1.6.0']) +@nox.parametrize('pandas', ['1.2.2']) +def tests(session, numpy, sklearn, scipy, pandas): session.install('pytest>=5.3.5', 'setuptools>=45.2', 'wheel>=0.34.2', f'numpy=={numpy}', + f'scikit-learn=={sklearn}', f'scipy=={scipy}', f'pandas=={pandas}' ) diff --git a/setup.py b/setup.py index fb5af98..9cbb69c 100644 --- a/setup.py +++ b/setup.py @@ -1,9 +1,10 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +import re + from setuptools import setup from setuptools.command.test import test as TestCommand -import re for line in open('sklearn_pandas/__init__.py'): match = re.match("__version__ *= *'(.*)'", line) @@ -38,10 +39,10 @@ def run(self): packages=['sklearn_pandas'], keywords=['scikit', 'sklearn', 'pandas'], install_requires=[ - 'scikit-learn>=0.23.0', + 'scikit-learn>=1.1.0', 'scipy>=1.5.1', 'pandas>=1.1.4', - 'numpy>=1.18.1' + 'numpy>=1.19.4' ], tests_require=['pytest', 'mock'], cmdclass={'test': PyTest}, diff --git a/sklearn_pandas/__init__.py b/sklearn_pandas/__init__.py index 535ecf6..f0a6d64 100644 --- a/sklearn_pandas/__init__.py +++ b/sklearn_pandas/__init__.py @@ -1,8 +1,8 @@ -__version__ = '2.2.0' +__version__ = '3.0.0' import logging + logger = logging.getLogger(__name__) from .dataframe_mapper import DataFrameMapper # NOQA from .features_generator import gen_features # NOQA -from .transformers import NumericalTransformer # NOQA diff --git a/sklearn_pandas/dataframe_mapper.py b/sklearn_pandas/dataframe_mapper.py index ca4568e..5ca7b61 100644 --- a/sklearn_pandas/dataframe_mapper.py +++ b/sklearn_pandas/dataframe_mapper.py @@ -1,12 +1,14 @@ import contextlib from datetime import datetime -import pandas as pd + import numpy as np +import pandas as pd from scipy import sparse from sklearn.base import BaseEstimator, TransformerMixin -from .cross_validation import DataWrapper -from .pipeline import make_transformer_pipeline, _call_fit, TransformerPipeline + from . import logger +from .cross_validation import DataWrapper +from .pipeline import TransformerPipeline, _call_fit, make_transformer_pipeline string_types = text_type = str @@ -41,14 +43,16 @@ def _elapsed_secs(t1): return (datetime.now()-t1).total_seconds() -def _get_feature_names(estimator): +def _get_feature_names(estimator, input_features=None): """ Attempt to extract feature names based on a given estimator """ - if hasattr(estimator, 'classes_'): - return estimator.classes_ + if hasattr(estimator, 'get_feature_names_out'): + return estimator.get_feature_names_out(input_features=input_features) elif hasattr(estimator, 'get_feature_names'): return estimator.get_feature_names() + elif hasattr(estimator, 'classes_'): + return estimator.classes_ return None @@ -283,18 +287,18 @@ def get_names(self, columns, transformer, x, alias=None, prefix='', # last one if isinstance(transformer, TransformerPipeline): inverse_steps = transformer.steps[::-1] - estimators = (estimator for name, estimator in inverse_steps) - names_steps = (_get_feature_names(e) for e in estimators) + estimators = (estimator for _, estimator in inverse_steps) + names_steps = (_get_feature_names(e, input_features = columns) for e in estimators) # noqa names = next((n for n in names_steps if n is not None), None) # Otherwise use the only estimator present else: - names = _get_feature_names(transformer) + names = _get_feature_names(transformer, input_features=columns) - if names is not None and len(names) == num_cols: - output = [f"{name}_{o}" for o in names] - # otherwise, return name concatenated with '_1', '_2', etc. - else: - output = [name + '_' + str(o) for o in range(num_cols)] + if names is None or len(names) != num_cols: + # return name concatenated with '_0', '_1', etc. + names = range(num_cols) + + output = [f"{name}_{o}" for o in names] else: output = [name] diff --git a/sklearn_pandas/transformers.py b/sklearn_pandas/transformers.py deleted file mode 100644 index ae693c2..0000000 --- a/sklearn_pandas/transformers.py +++ /dev/null @@ -1,55 +0,0 @@ -import numpy as np -import pandas as pd -from sklearn.base import TransformerMixin -import warnings - - -def _get_mask(X, value): - """ - Compute the boolean mask X == missing_values. - """ - if value == "NaN" or \ - value is None or \ - (isinstance(value, float) and np.isnan(value)): - return pd.isnull(X) - else: - return X == value - - -class NumericalTransformer(TransformerMixin): - """ - Provides commonly used numerical transformers. - """ - SUPPORTED_FUNCTIONS = ['log', 'log1p'] - - def __init__(self, func): - """ - Params - - func function to apply to input columns. The function will be - applied to each value. Supported functions are defined - in SUPPORTED_FUNCTIONS variable. Throws assertion error if the - not supported. - """ - - warnings.warn(""" - NumericalTransformer will be deprecated in 3.0 version. - Please use Sklearn.base.TransformerMixin to write - customer transformers - """, DeprecationWarning) - - assert func in self.SUPPORTED_FUNCTIONS, \ - f"Only following func are supported: {self.SUPPORTED_FUNCTIONS}" - super(NumericalTransformer, self).__init__() - self.__func = func - - def fit(self, X, y=None): - return self - - def transform(self, X, y=None): - if self.__func == 'log1p': - return np.vectorize(np.log1p)(X) - elif self.__func == 'log': - return np.vectorize(np.log)(X) - - raise ValueError(f"Invalid function name: {self.__func}") diff --git a/tests/test_dataframe_mapper.py b/tests/test_dataframe_mapper.py index ce87297..92c7515 100644 --- a/tests/test_dataframe_mapper.py +++ b/tests/test_dataframe_mapper.py @@ -385,8 +385,26 @@ def test_onehot_df(): transformed = mapper.fit_transform(df) cols = transformed.columns assert len(cols) == 4 - assert cols[0] == 'target_x0_0' - assert cols[3] == 'target_x0_3' + assert cols[0] == 'target_target_0' + assert cols[3] == 'target_target_3' + + +def test_onehot_2cols_df(): + """ + Check level ids from one-hot when mapping 2 columns + """ + df = pd.DataFrame({ + 'col': [0, 0, 1, 1, 2, 3, 0], + 'target': [0, 0, 1, 1, 2, 3, 0] + }) + mapper = DataFrameMapper([ + (['col', 'target'], OneHotEncoder()) + ], df_out=True) + transformed = mapper.fit_transform(df) + cols = transformed.columns + assert len(cols) == 8 + assert cols[0] == 'col_target_col_0' + assert cols[4] == 'col_target_target_0' def test_customtransform_df(): @@ -450,8 +468,8 @@ def test_pca(complex_dataframe): transformed = mapper.fit_transform(df) cols = transformed.columns assert len(cols) == 2 - assert cols[0] == 'feat1_feat2_0' - assert cols[1] == 'feat1_feat2_1' + assert cols[0] == 'feat1_feat2_pca0' + assert cols[1] == 'feat1_feat2_pca1' def test_fit_transform(simple_dataframe): diff --git a/tests/test_transformers.py b/tests/test_transformers.py deleted file mode 100644 index 0cd8d0b..0000000 --- a/tests/test_transformers.py +++ /dev/null @@ -1,47 +0,0 @@ -import tempfile -import pytest -import numpy as np -from pandas import DataFrame -import joblib - -from sklearn_pandas import DataFrameMapper -from sklearn_pandas import NumericalTransformer - - -@pytest.fixture -def simple_dataset(): - return DataFrame({ - 'feat1': [1, 2, 1, 3, 1], - 'feat2': [1, 2, 2, 2, 3], - 'feat3': [1, 2, 3, 4, 5], - }) - - -def test_common_numerical_transformer(simple_dataset): - """ - Test log transformation - """ - transfomer = DataFrameMapper([ - ('feat1', NumericalTransformer('log')) - ], df_out=True) - df = simple_dataset - outDF = transfomer.fit_transform(df) - assert list(outDF.columns) == ['feat1'] - assert np.array_equal(df['feat1'].apply(np.log).values, outDF.feat1.values) - - -def test_numerical_transformer_serialization(simple_dataset): - """ - Test if you can serialize transformer - """ - transfomer = DataFrameMapper([ - ('feat1', NumericalTransformer('log')) - ]) - - df = simple_dataset - transfomer.fit(df) - f = tempfile.NamedTemporaryFile(delete=True) - joblib.dump(transfomer, f.name) - transfomer2 = joblib.load(f.name) - np.array_equal(transfomer.transform(df), transfomer2.transform(df)) - f.close()