diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 73ac14f1ed5ce..d899a3136dd83 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -190,9 +190,6 @@ repos: # Check for deprecated messages without sphinx directive |(DEPRECATED|DEPRECATE|Deprecated)(:|,|\.) - # {foo!r} instead of {repr(foo)} - |!r} - # builtin filter function |(? str: if len(keys) == 0: if not silent: _warn_if_deprecated(pat) - raise OptionError(f"No such keys(s): {repr(pat)}") + raise OptionError(f"No such keys(s): {pat!r}") if len(keys) > 1: raise OptionError("Pattern matched multiple keys") key = keys[0] diff --git a/pandas/_testing/_warnings.py b/pandas/_testing/_warnings.py index f9cf390ba59de..731a6df776c4c 100644 --- a/pandas/_testing/_warnings.py +++ b/pandas/_testing/_warnings.py @@ -152,13 +152,12 @@ def _assert_caught_expected_warning( if not saw_warning: raise AssertionError( - f"Did not see expected warning of class " - f"{repr(expected_warning.__name__)}" + f"Did not see expected warning of class {expected_warning.__name__!r}" ) if match and not matched_message: raise AssertionError( - f"Did not see warning {repr(expected_warning.__name__)} " + f"Did not see warning {expected_warning.__name__!r} " f"matching '{match}'. The emitted warning messages are " f"{unmatched_messages}" ) @@ -200,7 +199,7 @@ def _assert_caught_no_extra_warnings( ) if extra_warnings: - raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}") + raise AssertionError(f"Caused unexpected warning(s): {extra_warnings!r}") def _is_unexpected_warning( diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index 3de982498e996..5ad5d02360f0b 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -432,14 +432,14 @@ def assert_is_valid_plot_return_object(objs) -> None: for el in objs.ravel(): msg = ( "one of 'objs' is not a matplotlib Axes instance, " - f"type encountered {repr(type(el).__name__)}" + f"type encountered {type(el).__name__!r}" ) assert isinstance(el, (Axes, dict)), msg else: msg = ( "objs is neither an ndarray of Artist instances nor a single " "ArtistArtist instance, tuple, or dict, 'objs' is a " - f"{repr(type(objs).__name__)}" + f"{type(objs).__name__!r}" ) assert isinstance(objs, (Artist, tuple, dict)), msg @@ -661,10 +661,10 @@ def _get_base(obj): if check_same == "same": if left_base is not right_base: - raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}") + raise AssertionError(f"{left_base!r} is not {right_base!r}") elif check_same == "copy": if left_base is right_base: - raise AssertionError(f"{repr(left_base)} is {repr(right_base)}") + raise AssertionError(f"{left_base!r} is {right_base!r}") def _raise(left, right, err_msg) -> NoReturn: if err_msg is None: @@ -935,7 +935,7 @@ def assert_series_equal( raise_assert_detail(obj, "Series length are different", msg1, msg2) if check_flags: - assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + assert left.flags == right.flags, f"{left.flags!r} != {right.flags!r}" if check_index: # GH #38183 @@ -1215,11 +1215,11 @@ def assert_frame_equal( # shape comparison if left.shape != right.shape: raise_assert_detail( - obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}" + obj, f"{obj} shape mismatch", f"{left.shape!r}", f"{right.shape!r}" ) if check_flags: - assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + assert left.flags == right.flags, f"{left.flags!r} != {right.flags!r}" # index comparison assert_index_equal( @@ -1369,7 +1369,7 @@ def assert_sp_array_equal(left, right) -> None: def assert_contains_all(iterable, dic) -> None: for k in iterable: - assert k in dic, f"Did not contain item: {repr(k)}" + assert k in dic, f"Did not contain item: {k!r}" def assert_copy(iter1, iter2, **eql_kwargs) -> None: @@ -1384,7 +1384,7 @@ def assert_copy(iter1, iter2, **eql_kwargs) -> None: for elem1, elem2 in zip(iter1, iter2): assert_almost_equal(elem1, elem2, **eql_kwargs) msg = ( - f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be " + f"Expected object {type(elem1)!r} and object {type(elem2)!r} to be " "different objects, but they were the same object." ) assert elem1 is not elem2, msg diff --git a/pandas/_version.py b/pandas/_version.py index f8a960630126d..08a7111324e3b 100644 --- a/pandas/_version.py +++ b/pandas/_version.py @@ -141,7 +141,7 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): if verbose: print( - f"Tried directories {str(rootdirs)} \ + f"Tried directories {rootdirs!s} \ but none started with prefix {parentdir_prefix}" ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") diff --git a/pandas/conftest.py b/pandas/conftest.py index e2d3731e9f4dd..94805313ccfc1 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -273,7 +273,7 @@ def configure_tests() -> None: # ---------------------------------------------------------------- # Common arguments # ---------------------------------------------------------------- -@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis={repr(x)}") +@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis={x!r}") def axis(request): """ Fixture for returning the axis numbers of a DataFrame. diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index 9098a6f9664a9..683af644cbdb3 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -306,8 +306,8 @@ def plot(self): def decorator(accessor): if hasattr(cls, name): warnings.warn( - f"registration of accessor {repr(accessor)} under name " - f"{repr(name)} for type {repr(cls)} is overriding a preexisting " + f"registration of accessor {accessor!r} under name " + f"{name!r} for type {cls!r} is overriding a preexisting " f"attribute with the same name.", UserWarning, stacklevel=find_stack_level(), diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py index 60fc172139f13..7293a46eb9a60 100644 --- a/pandas/core/array_algos/replace.py +++ b/pandas/core/array_algos/replace.py @@ -78,7 +78,7 @@ def _check_comparison_types( type_names[0] = f"ndarray(dtype={a.dtype})" raise TypeError( - f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}" + f"Cannot compare types {type_names[0]!r} and {type_names[1]!r}" ) if not regex or not should_use_regex(regex, b): diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index b9889a65feb34..bf04d86e8e476 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -1109,7 +1109,7 @@ def fillna( try: fill_value = self._box_pa(value, pa_type=self._pa_array.type) except pa.ArrowTypeError as err: - msg = f"Invalid value '{str(value)}' for dtype {self.dtype}" + msg = f"Invalid value '{value!s}' for dtype {self.dtype}" raise TypeError(msg) from err try: @@ -2065,7 +2065,7 @@ def _maybe_convert_setitem_value(self, value): try: value = self._box_pa(value, self._pa_array.type) except pa.ArrowTypeError as err: - msg = f"Invalid value '{str(value)}' for dtype {self.dtype}" + msg = f"Invalid value '{value!s}' for dtype {self.dtype}" raise TypeError(msg) from err return value diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 5a88aed1419e3..46b76e2365df9 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2024,7 +2024,7 @@ def sort_values( """ inplace = validate_bool_kwarg(inplace, "inplace") if na_position not in ["last", "first"]: - raise ValueError(f"invalid na_position: {repr(na_position)}") + raise ValueError(f"invalid na_position: {na_position!r}") sorted_idx = nargsort(self, ascending=ascending, na_position=na_position) diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 63d3d917c87c9..d5ae6a6025029 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -303,7 +303,7 @@ def _validate_setitem_value(self, value): # Note: without the "str" here, the f-string rendering raises in # py38 builds. - raise TypeError(f"Invalid value '{str(value)}' for dtype {self.dtype}") + raise TypeError(f"Invalid value '{value!s}' for dtype {self.dtype}") def __setitem__(self, key, value) -> None: key = check_array_indexer(self, key) diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index cd852ba9249cf..ea293538bae8f 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -127,7 +127,7 @@ def _align_core(terms): if ordm >= 1 and reindexer_size >= 10000: w = ( f"Alignment difference on axis {axis} is larger " - f"than an order of magnitude on term {repr(terms[i].name)}, " + f"than an order of magnitude on term {terms[i].name!r}, " f"by more than {ordm:.4g}; performance may suffer." ) warnings.warn( diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 6219cac4aeb16..17a68478196da 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -214,9 +214,9 @@ def _bool_arith_fallback(op_str, a, b) -> bool: if _has_bool_dtype(a) and _has_bool_dtype(b): if op_str in _BOOL_OP_UNSUPPORTED: warnings.warn( - f"evaluating in Python space because the {repr(op_str)} " + f"evaluating in Python space because the {op_str!r} " "operator is not supported by numexpr for the bool dtype, " - f"use {repr(_BOOL_OP_UNSUPPORTED[op_str])} instead.", + f"use {_BOOL_OP_UNSUPPORTED[op_str]!r} instead.", stacklevel=find_stack_level(), ) return True diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 9422434b5cde3..062e9f43b2eb9 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -160,7 +160,7 @@ def type(self): @property def raw(self) -> str: - return f"{type(self).__name__}(name={repr(self.name)}, type={self.type})" + return f"{type(self).__name__}(name={self.name!r}, type={self.type})" @property def is_datetime(self) -> bool: @@ -387,7 +387,7 @@ def __init__(self, op: str, lhs, rhs) -> None: # has to be made a list for python3 keys = list(_binary_ops_dict.keys()) raise ValueError( - f"Invalid binary operator {repr(op)}, valid operators are {keys}" + f"Invalid binary operator {op!r}, valid operators are {keys}" ) from err def __call__(self, env): @@ -571,7 +571,7 @@ def __init__(self, op: Literal["+", "-", "~", "not"], operand) -> None: self.func = _unary_ops_dict[op] except KeyError as err: raise ValueError( - f"Invalid unary operator {repr(op)}, " + f"Invalid unary operator {op!r}, " f"valid operators are {UNARY_OPS_SYMS}" ) from err diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 04a8ad7ef0be6..cec8a89abc0b2 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -81,7 +81,7 @@ def _resolve_name(self): if self.side == "left": # Note: The behavior of __new__ ensures that self.name is a str here if self.name not in self.env.queryables: - raise NameError(f"name {repr(self.name)} is not defined") + raise NameError(f"name {self.name!r} is not defined") return self.name # resolve the rhs (and allow it to be None) @@ -467,9 +467,7 @@ def visit_Subscript(self, node, **kwargs) -> ops.Term: try: return self.const_type(value[slobj], self.env) except TypeError as err: - raise ValueError( - f"cannot subscript {repr(value)} with {repr(slobj)}" - ) from err + raise ValueError(f"cannot subscript {value!r} with {slobj!r}") from err def visit_Attribute(self, node, **kwargs): attr = node.attr diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 5a0867d0251e8..73a73bad94127 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -243,7 +243,7 @@ def _disallow_mismatched_datetimelike(value, dtype: DtypeObj) -> None: elif (vdtype.kind == "m" and dtype.kind == "M") or ( vdtype.kind == "M" and dtype.kind == "m" ): - raise TypeError(f"Cannot cast {repr(value)} to {dtype}") + raise TypeError(f"Cannot cast {value!r} to {dtype}") @overload diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 2245359fd8eac..5e5b7bdad74d8 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1548,8 +1548,8 @@ def _validate_date_like_dtype(dtype) -> None: raise TypeError(e) from e if typ not in ["generic", "ns"]: raise ValueError( - f"{repr(dtype.name)} is too specific of a frequency, " - f"try passing {repr(dtype.type.__name__)}" + f"{dtype.name!r} is too specific of a frequency, " + f"try passing {dtype.type.__name__!r}" ) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index e90e92fa0ee1c..0a288f435c50c 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -318,7 +318,7 @@ def _from_values_or_dtype( dtype = CategoricalDtype(categories, ordered) else: - raise ValueError(f"Unknown dtype {repr(dtype)}") + raise ValueError(f"Unknown dtype {dtype!r}") elif categories is not None or ordered is not None: raise ValueError( "Cannot specify `categories` or `ordered` together with `dtype`." @@ -566,7 +566,7 @@ def validate_categories(categories, fastpath: bool = False) -> Index: if not fastpath and not is_list_like(categories): raise TypeError( - f"Parameter 'categories' must be list-like, was {repr(categories)}" + f"Parameter 'categories' must be list-like, was {categories!r}" ) if not isinstance(categories, ABCIndex): categories = Index._with_infer(categories, tupleize_cols=False) @@ -602,7 +602,7 @@ def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype: elif not self.is_dtype(dtype): raise ValueError( f"a CategoricalDtype must be passed to perform an update, " - f"got {repr(dtype)}" + f"got {dtype!r}" ) else: # from here on, dtype is a CategoricalDtype @@ -1458,7 +1458,7 @@ def __init__(self, dtype: npt.DTypeLike | NumpyEADtype | None) -> None: self._dtype = np.dtype(dtype) def __repr__(self) -> str: - return f"NumpyEADtype({repr(self.name)})" + return f"NumpyEADtype({self.name!r})" @property def numpy_dtype(self) -> np.dtype: @@ -1814,7 +1814,7 @@ def subtype(self): @property def name(self) -> str: - return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]" + return f"Sparse[{self.subtype.name}, {self.fill_value!r}]" def __repr__(self) -> str: return self.name @@ -2173,7 +2173,7 @@ def name(self) -> str: # type: ignore[override] """ A string identifying the data type. """ - return f"{str(self.pyarrow_dtype)}[{self.storage}]" + return f"{self.pyarrow_dtype!s}[{self.storage}]" @cache_readonly def numpy_dtype(self) -> np.dtype: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c01e551b38c32..ad6926406fe9b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -10395,9 +10395,7 @@ def map( 1 11.262736 20.857489 """ if na_action not in {"ignore", None}: - raise ValueError( - f"na_action must be 'ignore' or None. Got {repr(na_action)}" - ) + raise ValueError(f"na_action must be 'ignore' or None. Got {na_action!r}") if self.empty: return self.copy() @@ -11860,7 +11858,7 @@ def _get_agg_axis(self, axis_num: int) -> Index: elif axis_num == 1: return self.index else: - raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})") + raise ValueError(f"Axis must be 0 or 1 (got {axis_num!r})") def mode( self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0a2c977cc8419..77359d7e54627 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7954,7 +7954,7 @@ def replace( raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " - f"{repr(type(to_replace).__name__)}" + f"{type(to_replace).__name__!r}" ) inplace = validate_bool_kwarg(inplace, "inplace") @@ -8125,7 +8125,7 @@ def replace( raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " - f"you passed a {repr(type(regex).__name__)}" + f"you passed a {type(regex).__name__!r}" ) return self.replace( regex, value, inplace=inplace, limit=limit, regex=True @@ -8156,7 +8156,7 @@ def replace( ) else: raise TypeError( - f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' + f'Invalid "to_replace" type: {type(to_replace).__name__!r}' ) result = self._constructor_from_mgr(new_data, axes=new_data.axes) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index a93cf33590c3e..d36bfa62f4be5 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -476,7 +476,7 @@ def groups(self): @final def __repr__(self) -> str: attrs_list = ( - f"{attr_name}={repr(getattr(self, attr_name))}" + f"{attr_name}={getattr(self, attr_name)!r}" for attr_name in self._attributes if getattr(self, attr_name) is not None ) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 1663a5a78225f..cb1ee87da86f6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4043,7 +4043,7 @@ def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarra raise ValueError( f"tolerance argument for {type(self).__name__} with dtype {self.dtype} " - f"must be numeric if it is a scalar: {repr(tolerance)}" + f"must be numeric if it is a scalar: {tolerance!r}" ) return tolerance @@ -4090,7 +4090,7 @@ def _get_fill_indexer_searchsorted( """ if limit is not None: raise ValueError( - f"limit argument for {repr(method)} method only well-defined " + f"limit argument for {method!r} method only well-defined " "if index and target are monotonic" ) @@ -6810,7 +6810,7 @@ def get_slice_bound(self, label, side: Literal["left", "right"]) -> int: if isinstance(slc, np.ndarray): raise KeyError( f"Cannot get {side} slice bound for non-unique " - f"label: {repr(original_label)}" + f"label: {original_label!r}" ) if isinstance(slc, slice): diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py index 9d528d34e3684..2a8c777db47e1 100644 --- a/pandas/core/indexes/frozen.py +++ b/pandas/core/indexes/frozen.py @@ -112,7 +112,7 @@ def __str__(self) -> str: return pprint_thing(self, quote_strings=True, escape_chars=("\t", "\r", "\n")) def __repr__(self) -> str: - return f"{type(self).__name__}({str(self)})" + return f"{type(self).__name__}({self!s})" __setitem__ = __setslice__ = _disabled # type: ignore[assignment] __delitem__ = __delslice__ = _disabled diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index b62c19bef74be..f3f3e286e43e5 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -128,7 +128,7 @@ def _get_next_label(label): elif is_float_dtype(dtype): return np.nextafter(label, np.inf) else: - raise TypeError(f"cannot determine next label for type {repr(type(label))}") + raise TypeError(f"cannot determine next label for type {type(label)!r}") def _get_prev_label(label): @@ -145,7 +145,7 @@ def _get_prev_label(label): elif is_float_dtype(dtype): return np.nextafter(label, -np.inf) else: - raise TypeError(f"cannot determine next label for type {repr(type(label))}") + raise TypeError(f"cannot determine next label for type {type(label)!r}") def _new_IntervalIndex(cls, d): diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 62afcf8badb50..b7c37e6cf67ed 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -190,7 +190,7 @@ def from_range(cls, data: range, name=None, dtype: Dtype | None = None) -> Self: if not isinstance(data, range): raise TypeError( f"{cls.__name__}(...) must be called with object coercible to a " - f"range, {repr(data)} was passed" + f"range, {data!r} was passed" ) cls._validate_dtype(dtype) return cls._simple_new(data, name=name) diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 4445627732a9b..010ac34b2966a 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -354,7 +354,7 @@ def __init__(self, block: Block) -> None: self.block = block def __repr__(self) -> str: - return f"{type(self).__name__}({repr(self.block)})" + return f"{type(self).__name__}({self.block!r})" def _is_valid_na_for(self, dtype: DtypeObj) -> bool: """ diff --git a/pandas/core/methods/selectn.py b/pandas/core/methods/selectn.py index 5256c0a1c73a4..20aec3ccadded 100644 --- a/pandas/core/methods/selectn.py +++ b/pandas/core/methods/selectn.py @@ -209,8 +209,8 @@ def compute(self, method: str) -> DataFrame: dtype = frame[column].dtype if not self.is_valid_dtype_n_method(dtype): raise TypeError( - f"Column {repr(column)} has dtype {dtype}, " - f"cannot use method {repr(method)} with this dtype" + f"Column {column!r} has dtype {dtype}, " + f"cannot use method {method!r} with this dtype" ) def get_indexer(current_indexer, other_indexer): diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 6ca403bdb439a..51e2751aef88c 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1580,7 +1580,7 @@ def _validate_left_right_on(self, left_on, right_on): not left_cols.join(common_cols, how="inner").is_unique or not right_cols.join(common_cols, how="inner").is_unique ): - raise MergeError(f"Data columns not unique: {repr(common_cols)}") + raise MergeError(f"Data columns not unique: {common_cols!r}") left_on = right_on = common_cols elif self.on is not None: if left_on is not None or right_on is not None: @@ -2071,8 +2071,8 @@ def _validate_left_right_on(self, left_on, right_on): or is_string_dtype(ro_dtype) ): raise MergeError( - f"Incompatible merge dtype, {repr(ro_dtype)} and " - f"{repr(lo_dtype)}, both sides must have numeric dtype" + f"Incompatible merge dtype, {ro_dtype!r} and " + f"{lo_dtype!r}, both sides must have numeric dtype" ) # add 'by' to our key-list so we can have it in the @@ -2110,13 +2110,13 @@ def _check_dtype_match(left: ArrayLike, right: ArrayLike, i: int) -> None: # later with a ValueError, so we don't *need* to check # for them here. msg = ( - f"incompatible merge keys [{i}] {repr(left.dtype)} and " - f"{repr(right.dtype)}, both sides category, but not equal ones" + f"incompatible merge keys [{i}] {left.dtype!r} and " + f"{right.dtype!r}, both sides category, but not equal ones" ) else: msg = ( - f"incompatible merge keys [{i}] {repr(left.dtype)} and " - f"{repr(right.dtype)}, must be the same type" + f"incompatible merge keys [{i}] {left.dtype!r} and " + f"{right.dtype!r}, must be the same type" ) raise MergeError(msg) @@ -2146,7 +2146,7 @@ def _validate_tolerance(self, left_join_keys: list[ArrayLike]) -> None: msg = ( f"incompatible tolerance {self.tolerance}, must be compat " - f"with type {repr(lt.dtype)}" + f"with type {lt.dtype!r}" ) if needs_i8_conversion(lt.dtype) or ( diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 2b0c6fbb8e3bf..ecbac32366028 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -441,7 +441,7 @@ def _bins_to_cuts( if len(unique_bins) < len(bins) and len(bins) != 2: if duplicates == "raise": raise ValueError( - f"Bin edges must be unique: {repr(bins)}.\n" + f"Bin edges must be unique: {bins!r}.\n" f"You can drop duplicate edges by setting the 'duplicates' kwarg" ) bins = unique_bins diff --git a/pandas/core/series.py b/pandas/core/series.py index fdb1ed4d41a5e..0e1e9c964632e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1878,7 +1878,7 @@ def to_string( if not isinstance(result, str): raise AssertionError( "result must be of type str, type " - f"of result is {repr(type(result).__name__)}" + f"of result is {type(result).__name__!r}" ) if buf is None: diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index 3cda1273d4ae7..ee8051449e103 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -587,7 +587,7 @@ class UndefinedVariableError(NameError): """ def __init__(self, name: str, is_local: bool | None = None) -> None: - base_msg = f"{repr(name)} is not defined" + base_msg = f"{name!r} is not defined" if is_local: msg = f"local variable {base_msg}" else: diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py index cddff9a97056a..89f7cb9c4dec6 100644 --- a/pandas/io/formats/css.py +++ b/pandas/io/formats/css.py @@ -342,7 +342,7 @@ def _update_other_units(self, props: dict[str, str]) -> dict[str, str]: def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS) -> str: def _error() -> str: warnings.warn( - f"Unhandled size: {repr(in_val)}", + f"Unhandled size: {in_val!r}", CSSWarning, stacklevel=find_stack_level(), ) @@ -415,7 +415,7 @@ def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]: yield prop, val else: warnings.warn( - f"Ill-formatted attribute: expected a colon in {repr(decl)}", + f"Ill-formatted attribute: expected a colon in {decl!r}", CSSWarning, stacklevel=find_stack_level(), ) diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 5fd23cd7d918a..0c3a53eb1cfea 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -333,7 +333,7 @@ def _border_style(self, style: str | None, width: str | None, color: str | None) return self.BORDER_STYLE_MAP[style] else: warnings.warn( - f"Unhandled border style format: {repr(style)}", + f"Unhandled border style format: {style!r}", CSSWarning, stacklevel=find_stack_level(), ) @@ -469,7 +469,7 @@ def color_to_excel(self, val: str | None) -> str | None: return self.NAMED_COLORS[val] except KeyError: warnings.warn( - f"Unhandled color format: {repr(val)}", + f"Unhandled color format: {val!r}", CSSWarning, stacklevel=find_stack_level(), ) diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index d43cd731e86c3..6f255fdfc82db 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1715,12 +1715,12 @@ def _apply( if not isinstance(result, DataFrame): if not isinstance(result, np.ndarray): raise TypeError( - f"Function {repr(func)} must return a DataFrame or ndarray " + f"Function {func!r} must return a DataFrame or ndarray " f"when passed to `Styler.apply` with axis=None" ) if data.shape != result.shape: raise ValueError( - f"Function {repr(func)} returned ndarray with wrong shape.\n" + f"Function {func!r} returned ndarray with wrong shape.\n" f"Result has shape: {result.shape}\n" f"Expected shape: {data.shape}" ) @@ -1734,12 +1734,12 @@ def _apply( if isinstance(result, Series): raise ValueError( - f"Function {repr(func)} resulted in the apply method collapsing to a " + f"Function {func!r} resulted in the apply method collapsing to a " f"Series.\nUsually, this is the result of the function returning a " f"single value, instead of list-like." ) msg = ( - f"Function {repr(func)} created invalid {{0}} labels.\nUsually, this is " + f"Function {func!r} created invalid {{0}} labels.\nUsually, this is " f"the result of the function returning a " f"{'Series' if axis is not None else 'DataFrame'} which contains invalid " f"labels, or returning an incorrectly shaped, list-like object which " diff --git a/pandas/io/html.py b/pandas/io/html.py index 26e71c9546ffd..0f3704b698915 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -619,7 +619,7 @@ def _parse_tables(self, document, match, attrs): result.append(table) unique_tables.add(table) if not result: - raise ValueError(f"No tables found matching pattern {repr(match.pattern)}") + raise ValueError(f"No tables found matching pattern {match.pattern!r}") return result def _href_getter(self, obj) -> str | None: @@ -691,7 +691,7 @@ def _build_xpath_expr(attrs) -> str: if "class_" in attrs: attrs["class"] = attrs.pop("class_") - s = " and ".join([f"@{k}={repr(v)}" for k, v in attrs.items()]) + s = " and ".join([f"@{k}={v!r}" for k, v in attrs.items()]) return f"[{s}]" @@ -734,7 +734,7 @@ def _parse_tables(self, document, match, kwargs): # 1. check all descendants for the given pattern and only search tables # GH 49929 - xpath_expr = f"//table[.//text()[re:test(., {repr(pattern)})]]" + xpath_expr = f"//table[.//text()[re:test(., {pattern!r})]]" # if any table attributes were given build an xpath expression to # search for them @@ -755,7 +755,7 @@ def _parse_tables(self, document, match, kwargs): if "display:none" in elem.attrib.get("style", "").replace(" ", ""): elem.drop_tree() if not tables: - raise ValueError(f"No tables found matching regex {repr(pattern)}") + raise ValueError(f"No tables found matching regex {pattern!r}") return tables def _equals_tag(self, obj, tag) -> bool: @@ -914,7 +914,7 @@ def _parser_dispatch(flavor: HTMLFlavors | None) -> type[_HtmlFrameParser]: valid_parsers = list(_valid_parsers.keys()) if flavor not in valid_parsers: raise ValueError( - f"{repr(flavor)} is not a valid flavor, valid flavors are {valid_parsers}" + f"{flavor!r} is not a valid flavor, valid flavors are {valid_parsers}" ) if flavor in ("bs4", "html5lib"): @@ -938,7 +938,7 @@ def _validate_flavor(flavor): elif isinstance(flavor, abc.Iterable): if not all(isinstance(flav, str) for flav in flavor): raise TypeError( - f"Object of type {repr(type(flavor).__name__)} " + f"Object of type {type(flavor).__name__!r} " f"is not an iterable of strings" ) else: diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index 7e3e83d59c87c..de4033d5767e6 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -428,7 +428,7 @@ def _pull_records(js: dict[str, Any], spec: list | str) -> list: else: raise TypeError( f"Path must contain list or null, " - f"but got {type(result).__name__} at {repr(spec)}" + f"but got {type(result).__name__} at {spec!r}" ) return result diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index 2cdc3f235cb66..d35c153459bf8 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -1639,7 +1639,7 @@ def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]: and value != getattr(value, "value", default) ): raise ValueError( - f"The {repr(argname)} option is not supported with the " + f"The {argname!r} option is not supported with the " f"'pyarrow' engine" ) options[argname] = value @@ -1656,8 +1656,8 @@ def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]: pass else: raise ValueError( - f"The {repr(argname)} option is not supported with the " - f"{repr(engine)} engine" + f"The {argname!r} option is not supported with the " + f"{engine!r} engine" ) else: value = default @@ -1760,7 +1760,7 @@ def _clean_options( if fallback_reason and result[arg] != _c_parser_defaults.get(arg): raise ValueError( "Falling back to the 'python' engine because " - f"{fallback_reason}, but this causes {repr(arg)} to be " + f"{fallback_reason}, but this causes {arg!r} to be " "ignored as it is not supported by the 'python' engine." ) del result[arg] diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index c5bdfb5541788..895079bc15588 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -741,7 +741,7 @@ def _chunk_to_dataframe(self) -> DataFrame: js += 1 else: self.close() - raise ValueError(f"unknown column type {repr(self._column_types[j])}") + raise ValueError(f"unknown column type {self._column_types[j]!r}") df = DataFrame(rslt, columns=self.column_names, index=ix, copy=False) return df diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index c39313d5dc654..ca5a75057fd34 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -144,7 +144,7 @@ def read_sas( format = "sas7bdat" else: raise ValueError( - f"unable to infer format of SAS file from filename: {repr(fname)}" + f"unable to infer format of SAS file from filename: {fname!r}" ) reader: ReaderBase diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index f95dde948ae65..9848ac00aa2b6 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -921,7 +921,7 @@ def _get_call_args(backend_name: str, data: Series | DataFrame, args, kwargs): if args and isinstance(data, ABCSeries): positional_args = str(args)[1:-1] keyword_args = ", ".join( - [f"{name}={repr(value)}" for (name, _), value in zip(arg_def, args)] + [f"{name}={value!r}" for (name, _), value in zip(arg_def, args)] ) msg = ( "`Series.plot()` should not be called with positional " diff --git a/pandas/tests/arrays/masked/test_indexing.py b/pandas/tests/arrays/masked/test_indexing.py index 28ee451a7ddd7..37f38a11cbeae 100644 --- a/pandas/tests/arrays/masked/test_indexing.py +++ b/pandas/tests/arrays/masked/test_indexing.py @@ -8,7 +8,7 @@ class TestSetitemValidation: def _check_setitem_invalid(self, arr, invalid): - msg = f"Invalid value '{str(invalid)}' for dtype {arr.dtype}" + msg = f"Invalid value '{invalid!s}' for dtype {arr.dtype}" msg = re.escape(msg) with pytest.raises(TypeError, match=msg): arr[0] = invalid diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py index 024af66ec0844..62f8458441e17 100644 --- a/pandas/tests/frame/methods/test_set_index.py +++ b/pandas/tests/frame/methods/test_set_index.py @@ -628,7 +628,7 @@ def __init__(self, name, color) -> None: self.color = color def __str__(self) -> str: - return f"" + return f"" # necessary for pretty KeyError __repr__ = __str__ @@ -706,7 +706,7 @@ def __init__(self, name, color) -> None: self.color = color def __str__(self) -> str: - return f"" + return f"" thing1 = Thing("One", "red") thing2 = Thing("Two", "blue") diff --git a/pandas/tests/frame/test_repr.py b/pandas/tests/frame/test_repr.py index 776007fb9691d..4425d067e67dc 100644 --- a/pandas/tests/frame/test_repr.py +++ b/pandas/tests/frame/test_repr.py @@ -518,4 +518,4 @@ def test_repr_with_complex_nans(data, output, as_frame): else: reprs = [f"{i} {val}" for i, val in enumerate(output)] expected = "\n".join(reprs) + "\ndtype: complex128" - assert str(obj) == expected, f"\n{str(obj)}\n\n{expected}" + assert str(obj) == expected, f"\n{obj!s}\n\n{expected}" diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 77ce687d51693..66f209837345a 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -899,7 +899,7 @@ def test_isin_nan_common_float64(self, nulls_fixture, float_numpy_dtype): # and 2) that with an NaN we do not have .isin(nulls_fixture) msg = ( r"float\(\) argument must be a string or a (real )?number, " - f"not {repr(type(nulls_fixture).__name__)}" + f"not {type(nulls_fixture).__name__!r}" ) with pytest.raises(TypeError, match=msg): Index([1.0, nulls_fixture], dtype=dtype) diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index f8790bdb5fa42..3e52e9b68735d 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -104,8 +104,8 @@ def test_python_engine(self, python_engine): for default in py_unsupported: msg = ( - f"The {repr(default)} option is not " - f"supported with the {repr(python_engine)} engine" + f"The {default!r} option is not " + f"supported with the {python_engine!r} engine" ) kwargs = {default: object()} @@ -143,10 +143,7 @@ def test_pyarrow_engine(self): 1,2,3,4,""" for default in pa_unsupported: - msg = ( - f"The {repr(default)} option is not " - f"supported with the 'pyarrow' engine" - ) + msg = f"The {default!r} option is not supported with the 'pyarrow' engine" kwargs = {default: object()} default_needs_bool = {"warn_bad_lines", "error_bad_lines"} if default == "dialect": diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 420d82c3af7e3..73044b8c24a53 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -1071,8 +1071,8 @@ def test_parse_dates_combine(self, flavor_read_html): def test_wikipedia_states_table(self, datapath, flavor_read_html): data = datapath("io", "data", "html", "wikipedia_states.html") - assert os.path.isfile(data), f"{repr(data)} is not a file" - assert os.path.getsize(data), f"{repr(data)} is an empty file" + assert os.path.isfile(data), f"{data!r} is not a file" + assert os.path.getsize(data), f"{data!r} is an empty file" result = flavor_read_html(data, match="Arizona", header=1)[0] assert result.shape == (60, 12) assert "Unnamed" in result.columns[-1] diff --git a/pandas/tests/strings/test_api.py b/pandas/tests/strings/test_api.py index 31e005466af7b..ff8c6a98e1819 100644 --- a/pandas/tests/strings/test_api.py +++ b/pandas/tests/strings/test_api.py @@ -170,7 +170,7 @@ def test_api_per_method( # GH 23011, GH 23163 msg = ( f"Cannot use .str.{method_name} with values of " - f"inferred dtype {repr(inferred_dtype)}." + f"inferred dtype {inferred_dtype!r}." ) with pytest.raises(TypeError, match=msg): method(*args, **kwargs) diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 62afb8b83d576..72eff4b6ee479 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -798,10 +798,9 @@ def test_get_offset(): for name, expected in pairs: offset = _get_offset(name) - assert offset == expected, ( - f"Expected {repr(name)} to yield {repr(expected)} " - f"(actual: {repr(offset)})" - ) + assert ( + offset == expected + ), f"Expected {name!r} to yield {expected!r} (actual: {offset!r})" def test_get_offset_legacy(): diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index aef91064d12fb..83c9a66cbd2ca 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -177,9 +177,9 @@ def wrapper(*args, **kwargs) -> Callable[..., Any]: if old_arg_value is not None: if new_arg_name is None: msg = ( - f"the {repr(old_arg_name)} keyword is deprecated and " + f"the {old_arg_name!r} keyword is deprecated and " "will be removed in a future version. Please take " - f"steps to stop the use of {repr(old_arg_name)}" + f"steps to stop the use of {old_arg_name!r}" ) warnings.warn(msg, FutureWarning, stacklevel=stacklevel) kwargs[old_arg_name] = old_arg_value @@ -191,22 +191,22 @@ def wrapper(*args, **kwargs) -> Callable[..., Any]: else: new_arg_value = mapping.get(old_arg_value, old_arg_value) msg = ( - f"the {old_arg_name}={repr(old_arg_value)} keyword is " + f"the {old_arg_name}={old_arg_value!r} keyword is " "deprecated, use " - f"{new_arg_name}={repr(new_arg_value)} instead." + f"{new_arg_name}={new_arg_value!r} instead." ) else: new_arg_value = old_arg_value msg = ( - f"the {repr(old_arg_name)} keyword is deprecated, " - f"use {repr(new_arg_name)} instead." + f"the {old_arg_name!r} keyword is deprecated, " + f"use {new_arg_name!r} instead." ) warnings.warn(msg, FutureWarning, stacklevel=stacklevel) if kwargs.get(new_arg_name) is not None: msg = ( - f"Can only specify {repr(old_arg_name)} " - f"or {repr(new_arg_name)}, not both." + f"Can only specify {old_arg_name!r} " + f"or {new_arg_name!r}, not both." ) raise TypeError(msg) kwargs[new_arg_name] = new_arg_value diff --git a/pyproject.toml b/pyproject.toml index aef1b489c62b0..ddae60642b1bf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -339,8 +339,6 @@ ignore = [ "RUF005", # pairwise-over-zipped (>=PY310 only) "RUF007", - # explicit-f-string-type-conversion - "RUF010", # mutable-class-default "RUF012" ] diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py index ee7f9226a7090..698268ce382f8 100755 --- a/scripts/validate_unwanted_patterns.py +++ b/scripts/validate_unwanted_patterns.py @@ -174,7 +174,7 @@ def private_import_across_module(file_obj: IO[str]) -> Iterable[tuple[int, str]] continue if module_name.startswith("_"): - yield (node.lineno, f"Import of internal function {repr(module_name)}") + yield (node.lineno, f"Import of internal function {module_name!r}") def strings_with_wrong_placed_whitespace(