From 476fac56c973ddf88f6039a0328fe02425353078 Mon Sep 17 00:00:00 2001 From: Yuchen Ethan Xiao Date: Sun, 2 Feb 2025 00:52:41 -0500 Subject: [PATCH] style: manual split long string, comment and docs --- news/linelength79.rst | 23 ++++ src/diffpy/utils/diffraction_objects.py | 123 ++++++++++++-------- src/diffpy/utils/parsers/loaddata.py | 68 ++++++----- src/diffpy/utils/parsers/serialization.py | 34 ++++-- src/diffpy/utils/resampler.py | 46 ++++---- src/diffpy/utils/tools.py | 134 +++++++++++++--------- src/diffpy/utils/transforms.py | 32 ++++-- src/diffpy/utils/validators.py | 5 +- tests/conftest.py | 33 +++--- tests/test_diffraction_objects.py | 101 ++++++++++------ tests/test_loaddata.py | 6 +- tests/test_resample.py | 3 +- tests/test_tools.py | 61 ++++++---- tests/test_transforms.py | 73 ++++++++---- 14 files changed, 473 insertions(+), 269 deletions(-) create mode 100644 news/linelength79.rst diff --git a/news/linelength79.rst b/news/linelength79.rst new file mode 100644 index 00000000..be12d2fb --- /dev/null +++ b/news/linelength79.rst @@ -0,0 +1,23 @@ +**Added:** + +* update isort, flake8 and black to set line limit to 79 + +**Changed:** + +* + +**Deprecated:** + +* + +**Removed:** + +* + +**Fixed:** + +* + +**Security:** + +* diff --git a/src/diffpy/utils/diffraction_objects.py b/src/diffpy/utils/diffraction_objects.py index 1276e08d..63c310cf 100644 --- a/src/diffpy/utils/diffraction_objects.py +++ b/src/diffpy/utils/diffraction_objects.py @@ -31,14 +31,16 @@ ] x_values_not_equal_emsg = ( - "The two objects have different values in x arrays (my_do.all_arrays[:, [1, 2, 3]]). " - "Please ensure the x values of the two objects are identical by re-instantiating " - "the DiffractionObject with the correct x value inputs." + "The two objects have different values in x arrays " + "my_do.all_arrays[:, [1, 2, 3]]). Please ensure the x values of the two " + "objects are identical by re-instantiating the DiffractionObject with the " + "correct x value inputs." ) invalid_add_type_emsg = ( - "You may only add a DiffractionObject with another DiffractionObject or a scalar value. " - "Please rerun by adding another DiffractionObject instance or a scalar value. " + "You may only add a DiffractionObject with another DiffractionObject or a" + "scalar value. Please rerun by adding another DiffractionObject or " + "a scalar value. " "e.g., my_do_1 + my_do_2 or my_do + 10 or 10 + my_do" ) @@ -70,11 +72,14 @@ class DiffractionObject: Attributes ---------- scat_quantity : str - The type of scattering experiment (e.g., "x-ray", "neutron"). Default is an empty string "". + The type of scattering experiment (e.g., "x-ray", "neutron"). + Default is an empty string "". wavelength : float - The wavelength of the incoming beam, specified in angstroms (Å). Default is none. + The wavelength of the incoming beam, specified in angstroms (Å). + Default is none. name: str - The name or label for the scattering data. Default is an empty string "". + The name or label for the scattering data. Default is an empty + string "". qmin : float The minimum q value. qmax : float @@ -104,11 +109,13 @@ def __init__( Parameters ---------- xarray : ndarray - The independent variable array containing "q", "tth", or "d" values. + The independent variable array containing "q", "tth", or "d" + values. yarray : ndarray The dependent variable array corresponding to intensity values. xtype : str - The type of the independent variable in `xarray`. Must be one of {*XQUANTITIES}. + The type of the independent variable in `xarray`. + Must be one of {*XQUANTITIES}. wavelength : float, optional, default is None. The wavelength of the incoming beam, specified in angstroms (Å) scat_quantity : str, optional, default is an empty string "". @@ -124,7 +131,8 @@ def __init__( >>> import numpy as np >>> from diffpy.utils.diffraction_objects import DiffractionObject ... - >>> x = np.array([0.12, 0.24, 0.31, 0.4]) # independent variable (e.g., q) + >>> # independent variable (e.g., q) + >>> x = np.array([0.12, 0.24, 0.31, 0.4]) >>> y = np.array([10, 20, 40, 60]) # intensity values >>> metadata = { ... "sample": "rock salt from the beach", @@ -211,23 +219,28 @@ def __add__(self, other): Parameters ---------- other : DiffractionObject, int, or float - The item to be added. If `other` is a scalar value, this value will be added to each element of the - yarray of this DiffractionObject instance. If `other` is another DiffractionObject, the yarrays of the - two DiffractionObjects will be combined element-wise. The result is a new DiffractionObject instance, - representing the addition and using the xarray from the left-hand side DiffractionObject. + The item to be added. If `other` is a scalar value, this value will + be added to each element of the yarray of this DiffractionObject + instance. If `other` is another DiffractionObject, the yarrays of + the two DiffractionObjects will be combined element-wise.The result + is a new DiffractionObject instance,representing the addition and + using the xarray from the left-hand side DiffractionObject. Returns ------- DiffractionObject - The new DiffractionObject instance with modified yarray values. This instance is a deep copy of the - original with the additions applied. + The new DiffractionObject instance with modified yarray values. + This instance is a deep copy of the original with the additions + applied. Raises ------ ValueError - Raised when the xarrays of two DiffractionObject instances are not equal. + Raised when the xarrays of two DiffractionObject instances are not + equal. TypeError - Raised when `other` is not an instance of DiffractionObject, int, or float. + Raised when `other` is not an instance of DiffractionObject, int, + or float. Examples -------- @@ -253,12 +266,14 @@ def __sub__(self, other): """Subtract scalar value or another DiffractionObject to the yarray of the DiffractionObject. - This method behaves similarly to the `__add__` method, but performs subtraction instead of addition. - For details on parameters, returns, and exceptions, refer to the documentation for `__add__`. + This method behaves similarly to the `__add__` method, but performs + subtraction instead of addition.For details on parameters, returns, and + exceptions, refer to the documentation for `__add__`. Examples -------- - Subtract a scalar value from the yarray of a DiffractionObject instance: + Subtract a scalar value from the yarray of a DiffractionObject + instance: >>> new_do = my_do - 10.1 Subtract the yarrays of two DiffractionObject instances: @@ -279,12 +294,14 @@ def __mul__(self, other): """Multiply a scalar value or another DiffractionObject with the yarray of this DiffractionObject. - This method behaves similarly to the `__add__` method, but performs multiplication instead of addition. - For details on parameters, returns, and exceptions, refer to the documentation for `__add__`. + This method behaves similarly to the `__add__` method, but performs + multiplication instead of addition. For details on parameters, returns, + and exceptions, refer to the documentation for `__add__`. Examples -------- - Multiply a scalar value with the yarray of a DiffractionObject instance: + Multiply a scalar value with the yarray of a DiffractionObject + instance: >>> new_do = my_do * 3.5 Multiply the yarrays of two DiffractionObject instances: @@ -305,8 +322,9 @@ def __truediv__(self, other): """Divide the yarray of this DiffractionObject by a scalar value or another DiffractionObject. - This method behaves similarly to the `__add__` method, but performs division instead of addition. - For details on parameters, returns, and exceptions, refer to the documentation for `__add__`. + This method behaves similarly to the `__add__` method, but performs + division instead of addition.For details on parameters, returns, and + exceptions, refer to the documentation for `__add__`. Examples -------- @@ -344,7 +362,8 @@ def all_arrays(self): Returns ------- ndarray - The shape (len(data), 4) 2D array with columns containing the `yarray` (intensity) + The shape (len(data), 4) 2D array with columns containing the + `yarray` (intensity) and the `xarray` values in q, tth, and d. Examples @@ -399,21 +418,26 @@ def get_array_index(self, xtype, xvalue): Parameters ---------- xtype : str - The type of the independent variable in `xarray`. Must be one of {*XQUANTITIES}. + The type of the independent variable in `xarray`. Must be one of + {*XQUANTITIES}. xvalue : float The value of the xtype to find the closest index for. Returns ------- index : int - The index of the closest value in the array associated with the specified xtype and the value provided. + The index of the closest value in the array associated with the + specified xtype and the value provided. """ xtype = self._input_xtype xarray = self.on_xtype(xtype)[0] if len(xarray) == 0: raise ValueError( - f"The '{xtype}' array is empty. Please ensure it is initialized." + ( + f"The '{xtype}' array is empty. " + f"Please ensure it is initialized." + ) ) index = (np.abs(xarray - xvalue)).argmin() return index @@ -486,10 +510,12 @@ def scale_to( """Return a new diffraction object which is the current object but rescaled in y to the target. - By default, if `q`, `tth`, or `d` are not provided, scaling is based on the max intensity from each object. - Otherwise, y-value in the target at the closest specified x-value will be used as the factor to scale to. - The entire array is scaled by this factor so that one object places on top of the other at that point. - If multiple values of `q`, `tth`, or `d` are provided, an error will be raised. + By default, if `q`, `tth`, or `d` are not provided, scaling is based on + the max intensity from each object. Otherwise, y-value in the target at + the closest specified x-value will be used as the factor to scale to. + The entire array is scaled by this factor so that one object places on + top of the other at that point.If multiple values of `q`, `tth`, or `d` + are provided, an error will be raised. Parameters ---------- @@ -497,8 +523,9 @@ def scale_to( The diffraction object you want to scale the current one onto. q, tth, d : float, optional, default is None - The value of the x-array where you want the curves to line up vertically. - Specify a value on one of the allowed grids, q, tth, or d), e.g., q=10. + The value of the x-array where you want the curves to line up + vertically. Specify a value on one of the allowed grids, q, tth, + or d), e.g., q=10. offset : float, optional, default is None The offset to add to the scaled y-values. @@ -546,7 +573,8 @@ def on_xtype(self, xtype): Parameters ---------- xtype : str - The type of quantity for the independent variable chosen from {*XQUANTITIES, } + The type of quantity for the independent variable chosen from + {*XQUANTITIES, } Raises ------ @@ -556,7 +584,8 @@ def on_xtype(self, xtype): Returns ------- (xarray, yarray) : tuple of ndarray - The tuple containing two 1D numpy arrays with x and y data for the specified xtype. + The tuple containing two 1D numpy arrays with x and y data for the + specified xtype. """ if xtype.lower() in ANGLEQUANTITIES: return self.on_tth() @@ -576,12 +605,13 @@ def dump(self, filepath, xtype=None): filepath : str The filepath where the diffraction object will be dumped xtype : str, optional, default is q - The type of quantity for the independent variable chosen from {*XQUANTITIES, } + The type of quantity for the independent variable chosen from + {*XQUANTITIES, } Examples -------- - To save a diffraction object to a file named "diffraction_data.chi" in the current directory - with the independent variable 'q': + To save a diffraction object to a file named "diffraction_data.chi" in + the current directory with the independent variable 'q': >>> file = "diffraction_data.chi" >>> do.dump(file, xtype="q") @@ -591,7 +621,8 @@ def dump(self, filepath, xtype=None): >>> file = "./output/diffraction_data.chi" >>> do.dump(file, xtype="q") - To save the diffraction data with a different independent variable, such as 'tth': + To save the diffraction data with a different independent variable, + such as 'tth': >>> file = "diffraction_data_tth.chi" >>> do.dump(file, xtype="tth") @@ -615,7 +646,8 @@ def dump(self, filepath, xtype=None): with open(filepath, "w") as f: f.write( - f"[DiffractionObject]\nname = {self.name}\nwavelength = {self.wavelength}\n" + f"[DiffractionObject]\nname = {self.name}\n" + f"wavelength = {self.wavelength}\n" f"scat_quantity = {self.scat_quantity}\n" ) for key, value in self.metadata.items(): @@ -629,6 +661,7 @@ def copy(self): Returns ------- DiffractionObject - The new instance of DiffractionObject, which is a deep copy of the current instance. + The new instance of DiffractionObject, which is a deep copy of the + current instance. """ return deepcopy(self) diff --git a/src/diffpy/utils/parsers/loaddata.py b/src/diffpy/utils/parsers/loaddata.py index 8154f8d5..a9ea5faf 100644 --- a/src/diffpy/utils/parsers/loaddata.py +++ b/src/diffpy/utils/parsers/loaddata.py @@ -25,53 +25,62 @@ def loadData( ): """Find and load data from a text file. - The data block is identified as the first matrix block of at least minrows rows and constant number of columns. - This seems to work for most of the datafiles including those generated by diffpy programs. + The data block is identified as the first matrix block of at least minrows + rows and constant number of columns. This seems to work for most of the + datafiles including those generated by diffpy programs. Parameters ---------- filename Name of the file we want to load data from. minrows: int - Minimum number of rows in the first data block. All rows must have the same number of floating - point values. + Minimum number of rows in the first data block. All rows must have the + same number of floating point values. headers: bool - when False (default), the function returns a numpy array of the data in the data block. - When True, the function instead returns a dictionary of parameters and their corresponding - values parsed from header (information prior the data block). See hdel and hignore for options - to help with parsing header information. + when False (default), the function returns a numpy array of the data + in the data block. When True, the function instead returns a + dictionary of parameters and their corresponding values parsed from + header (information prior the data block). See hdel and hignore for + options to help with parsing header information. hdel: str - (Only used when headers enabled.) Delimiter for parsing header information (default '='). e.g. using - default hdel, the line 'parameter = p_value' is put into the dictionary as {parameter: p_value}. + (Only used when headers enabled.) Delimiter for parsing header + information (default '='). e.g. using default hdel, the line ' + parameter = p_value' is put into the dictionary as + {parameter: p_value}. hignore: list - (Only used when headers enabled.) Ignore header rows beginning with any elements in hignore. - e.g. hignore=['# ', '['] causes the following lines to be skipped: '# qmax=10', '[defaults]'. + (Only used when headers enabled.) Ignore header rows beginning with + any elements in hignore. e.g. hignore=['# ', '['] causes the following + lines to be skipped: '# qmax=10', '[defaults]'. kwargs: - Keyword arguments that are passed to numpy.loadtxt including the following arguments below. (See - numpy.loadtxt for more details.) Only pass kwargs used by numpy.loadtxt. + Keyword arguments that are passed to numpy.loadtxt including the + following arguments below. (See numpy.loadtxt for more details.) Only + pass kwargs used by numpy.loadtxt. Useful kwargs ============= comments: str, sequence of str - The characters or list of characters used to indicate the start of a comment (default '#'). - Comment lines are ignored. + The characters or list of characters used to indicate the start of a + comment (default '#'). Comment lines are ignored. delimiter: str - Delimiter for the data in the block (default use whitespace). For comma-separated data blocks, - set delimiter to ','. + Delimiter for the data in the block (default use whitespace). For comma + -separated data blocks, set delimiter to ','. unpack: bool - Return data as a sequence of columns that allows tuple unpacking such as x, y = - loadData(FILENAME, unpack=True). Note transposing the loaded array as loadData(FILENAME).T has the same - effect. + Return data as a sequence of columns that allows tuple unpacking such + as x, y = loadData(FILENAME, unpack=True). Note transposing the loaded + array as loadData(FILENAME).T has the same effect. usecols: - Zero-based index of columns to be loaded, by default use all detected columns. The reading skips - data blocks that do not have the usecols-specified columns. + Zero-based index of columns to be loaded, by default use all detected + columns. The reading skips data blocks that do not have the usecols- + specified columns. Returns ------- data_block: ndarray - A numpy array containing the found data block. (This is not returned if headers is enabled.) + A numpy array containing the found data block. (This is not returned + if headers is enabled.) hdata: dict - If headers are enabled, return a dictionary of parameters read from the header. + If headers are enabled, return a dictionary of parameters read from + the header. """ from numpy import array, loadtxt @@ -108,7 +117,8 @@ def countcolumnsvalues(line): # Check if file exists before trying to open if not os.path.exists(filename): raise IOError( - f"File {filename} cannot be found. Please rerun the program specifying a valid filename." + f"File {filename} cannot be found. " + f"Please rerun the program specifying a valid filename." ) # make sure fid gets cleaned up @@ -194,7 +204,8 @@ class TextDataLoader(object): minrows: int Minimum number of rows in the first data block. (Default 10.) usecols: tuple - Which columns in our dataset to use. Ignores all other columns. If None (default), use all columns. + Which columns in our dataset to use. Ignores all other columns. If + None (default), use all columns. skiprows Rows in dataset to skip. (Currently not functional.) """ @@ -242,7 +253,8 @@ def readfp(self, fp, append=False): File details include: * File name. * All data blocks findable by loadData. - * Headers (if present) for each data block. (Generally the headers contain column name information). + * Headers (if present) for each data block. (Generally the headers + contain column name information). """ self._reset() # try to read lines from fp first diff --git a/src/diffpy/utils/parsers/serialization.py b/src/diffpy/utils/parsers/serialization.py index bf585b7f..28988ebb 100644 --- a/src/diffpy/utils/parsers/serialization.py +++ b/src/diffpy/utils/parsers/serialization.py @@ -47,16 +47,18 @@ def serialize_data( data_table: list or ndarray Data table. dt_colnames: list - Names of each column in data_table. Every name in data_table_cols will be put into the Dictionary - as a key with a value of that column in data_table (stored as a List). Put None for columns - without names. If dt_cols has less non-None entries than columns in data_table, - the pair {'data table': data_table} will be put in the dictionary. - (Default None: only entry {'data table': data_table} will be added to dictionary.) + Names of each column in data_table. Every name in data_table_cols will + be put into the Dictionary as a key with a value of that column in data + _table (stored as a List). Put None for columns without names. If dt_ + cols has less non-None entries than columns in data_table, the pair {' + data table': data_table} will be put in the dictionary. (Default None: + only entry {'data table': data_table} will be added to dictionary.) show_path: bool - include a path element in the database entry (default True). If 'path' is not included in hddata, - extract path from filename. + include a path element in the database entry (default True). If 'path' + is not included in hddata, extract path from filename. serial_file - Serial language file to dump dictionary into. If None (default), no dumping will occur. + Serial language file to dump dictionary into. If None (default), no + dumping will occur. Returns ------- @@ -79,7 +81,8 @@ def serialize_data( data.update(hdata) # second add named columns in dt_cols - # performed second to prioritize overwriting hdata entries with data_table column entries + # performed second to prioritize overwriting hdata entries with data_table + # column entries named_columns = 0 # initial value max_columns = 1 # higher than named_columns to trigger 'data table' entry if dt_colnames is not None: @@ -98,17 +101,24 @@ def serialize_data( if colname is not None: if colname in hdata.keys(): warnings.warn( - f"Entry '{colname}' in hdata has been overwritten by a data_table entry.", + ( + f"Entry '{colname}' in hdata has been overwritten " + f"by a data_table entry." + ), RuntimeWarning, ) data.update({colname: list(data_table[:, idx])}) named_columns += 1 - # finally add data_table as an entry named 'data table' if not all columns were parsed + # finally add data_table as an entry named 'data table' if not all columns + # were parsed if named_columns < max_columns: if "data table" in data.keys(): warnings.warn( - "Entry 'data table' in hdata has been overwritten by data_table.", + ( + "Entry 'data table' in hdata has been overwritten by " + "data_table." + ), RuntimeWarning, ) data.update({"data table": data_table}) diff --git a/src/diffpy/utils/resampler.py b/src/diffpy/utils/resampler.py index 115087a2..e73d5299 100644 --- a/src/diffpy/utils/resampler.py +++ b/src/diffpy/utils/resampler.py @@ -22,11 +22,11 @@ def wsinterp(x, xp, fp, left=None, right=None): """One-dimensional Whittaker-Shannon interpolation. - Reconstruct a continuous signal from discrete data points by utilizing sinc functions - as interpolation kernels. This function interpolates the values of fp (array), - which are defined over xp (array), at new points x (array or float). - The implementation is based on E. T. Whittaker's 1915 paper - (https://doi.org/10.1017/S0370164600017806). + Reconstruct a continuous signal from discrete data points by utilizing + sinc functions as interpolation kernels. This function interpolates + the values of fp (array), which are defined over xp (array), at new points + x (array or float). The implementation is based on E. T. Whittaker's 1915 + paper (https://doi.org/10.1017/S0370164600017806). Parameters ---------- @@ -37,17 +37,18 @@ def wsinterp(x, xp, fp, left=None, right=None): fp: ndarray The array of y values associated with xp. left: float - If given, set fp for x < xp[0] to left. Otherwise, if left is None (default) or not given, - set fp for x < xp[0] to fp evaluated at xp[-1]. + If given, set fp for x < xp[0] to left. Otherwise, if left is None + (default) or not given, set fp for x < xp[0] to fp evaluated at xp[-1]. right: float - If given, set fp for x > xp[-1] to right. Otherwise, if right is None (default) or not given, set fp for - x > xp[-1] to fp evaluated at xp[-1]. + If given, set fp for x > xp[-1] to right. Otherwise, if right is None + (default) or not given, set fp for x > xp[-1] to fp evaluated at + xp[-1]. Returns ------- ndarray or float - The interpolated values at points x. Returns a single float if x is a scalar, - otherwise returns a numpy.ndarray. + The interpolated values at points x. Returns a single float if x is a + scalar, otherwise returns a numpy.ndarray. """ scalar = np.isscalar(x) if scalar: @@ -82,10 +83,11 @@ def nsinterp(xp, fp, qmin=0, qmax=25, left=None, right=None): """One-dimensional Whittaker-Shannon interpolation onto the Nyquist-Shannon grid. - Takes a band-limited function fp and original grid xp and resamples fp on the NS grid. - Uses the minimum number of points N required by the Nyquist sampling theorem. - N = (qmax-qmin)(rmax-rmin)/pi, where rmin and rmax are the ends of the real-space ranges. - fp must be finite, and the user inputs qmin and qmax of the frequency-domain. + Takes a band-limited function fp and original grid xp and resamples fp on + the NS grid. Uses the minimum number of points N required by the Nyquist + sampling theorem. N = (qmax-qmin)(rmax-rmin)/pi, where rmin and rmax are + the ends of the real-space ranges. fp must be finite, and the user inputs + qmin and qmax of the frequency-domain. Parameters ---------- @@ -103,8 +105,8 @@ def nsinterp(xp, fp, qmin=0, qmax=25, left=None, right=None): x: ndarray The Nyquist-Shannon grid computed for the given qmin and qmax. fp_at_x: ndarray - The interpolated values at points x. Returns a single float if x is a scalar, - otherwise returns a numpy.ndarray. + The interpolated values at points x. Returns a single float if x is a + scalar, otherwise returns a numpy.ndarray. """ # Ensure numpy array xp = np.array(xp) @@ -122,8 +124,9 @@ def nsinterp(xp, fp, qmin=0, qmax=25, left=None, right=None): def resample(r, s, dr): """Resample a PDF on a new grid. - This uses the Whittaker-Shannon interpolation formula to put s1 on a new grid if dr is less than the sampling - interval of r1, or linear interpolation if dr is greater than the sampling interval of r1. + This uses the Whittaker-Shannon interpolation formula to put s1 on a new + grid if dr is less than the sampling interval of r1, or linear + interpolation if dr is greater than the sampling interval of r1. Parameters ---------- @@ -140,8 +143,9 @@ def resample(r, s, dr): """ warnings.warn( - "The 'resample' function is deprecated and will be removed in a future release (3.8.0). \n" - "'resample' has been renamed 'wsinterp' to better reflect functionality. Please use 'wsinterp' instead.", + "The 'resample' function is deprecated and will be removed in a " + "future release (3.8.0). \n'resample' has been renamed 'wsinterp' to " + "better reflect functionality. Please use 'wsinterp' instead.", DeprecationWarning, stacklevel=2, ) diff --git a/src/diffpy/utils/tools.py b/src/diffpy/utils/tools.py index 5924df23..2849e091 100644 --- a/src/diffpy/utils/tools.py +++ b/src/diffpy/utils/tools.py @@ -22,7 +22,8 @@ def _stringify(string_value): Returns ------- str - The original string if string_value is not None, otherwise an empty string. + The original string if string_value is not None, otherwise an empty + string. """ return string_value if string_value is not None else "" @@ -53,36 +54,40 @@ def get_user_info(owner_name=None, owner_email=None, owner_orcid=None): """Get name, email, and orcid of the owner/user from various sources and return it as a metadata dictionary. - The function looks for the information in json format configuration files with the name 'diffpyconfig.json'. - These can be in the user's home directory and in the current working directory. The information in the - config files are combined, with the local config overriding the home-directory one. Values for - owner_name, owner_email, and owner_orcid may be passed in to the function and these override the values - in the config files. - - A template for the config file is below. Create a text file called 'diffpyconfig.json' in your home directory - and copy-paste the template into it, editing it with your real information. + The function looks for the information in json format configuration files + with the name 'diffpyconfig.json'. These can be in the user's home + directory and in the current working directory. The information in the + config files are combined, with the local config overriding the home- + directory one. Values for owner_name, owner_email, and owner_orcid may be + passed in to the function and these override the values in the config + files. + + A template for the config file is below. Create a text file called ' + diffpyconfig.json' in your home directory and copy-paste the template into + it, editing it with your real information. { "owner_name": ">", "owner_email": ">@email.com", - "owner_orcid": ">" + "owner_orcid": ">" # noqa: E501 } - You may also store any other global-level information that you would like associated with your - diffraction data in this file + You may also store any other global-level information that you would like + associated with your diffraction data in this file Parameters ---------- - owner_name : str, optional, default is the value stored in the global or local config file. - The name of the user who will show as owner in the metadata that is stored with the data - owner_email : str, optional, default is the value stored in the global or local config file. - The email of the user/owner - owner_orcid : str, optional, default is the value stored in the global or local config file. - The ORCID id of the user/owner + owner_name : str, optional, default is the value stored in the global or + local config file. The name of the user who will show as owner in the + metadata that is stored with the data + owner_email : str, optional, default is the value stored in the global or + local config file. The email of the user/owner + owner_orcid : str, optional, default is the value stored in the global or + local config file. The ORCID id of the user/owner Returns ------- user_info : dict - The dictionary containing username, email and orcid of the user/owner, and any other information - stored in the global or local config files. + The dictionary containing username, email and orcid of the user/owner, + and any other information stored in the global or local config files. """ runtime_info = { "owner_name": owner_name, @@ -104,24 +109,27 @@ def check_and_build_global_config(skip_config_creation=False): """Check for a global diffpu config file in user's home directory and creates one if it is missing. - The file it looks for is called diffpyconfig.json. This can contain anything in json format, but - minimally contains information about the computer owner. The information is used - when diffpy objects are created and saved to files or databases to retain ownership information - of datasets. For example, it is used by diffpy.utils.tools.get_user_info(). + The file it looks for is called diffpyconfig.json. This can contain + anything in json format, but minimally contains information about the + computer owner. The information is used when diffpy objects are created + and saved to files or databases to retain ownership information of datasets + . For example, it is used by diffpy.utils.tools.get_user_info(). - If the function finds no config file in the user's home directory it interrupts execution - and prompts the user for name, email, and orcid information. It then creates the config file - with this information inside it. + If the function finds no config file in the user's home directory it + interrupts execution and prompts the user for name, email, and orcid + information. It then creates the config file with this information inside + it. The function returns True if the file exists and False otherwise. - If you would like to check for a file but not run the file creation workflow you can set - the optional argument skip_config_creation to True. + If you would like to check for a file but not run the file creation + workflow you can set the optional argument skip_config_creation to True. Parameters ---------- skip_config_creation : bool, optional, default is False. - The boolean that will override the creation workflow even if no config file exists. + The boolean that will override the creation workflow even if no config + file exists. Returns ------- @@ -136,13 +144,14 @@ def check_and_build_global_config(skip_config_creation=False): if skip_config_creation: return config_exists intro_text = ( - "No global configuration file was found containing information about the user to " - "associate with the data.\n By following the prompts below you can add your name " - "and email to this file on the current " - "computer and your name will be automatically associated with subsequent diffpy data by default.\n" - "This is not recommended on a shared or public computer. " - "You will only have to do that once.\n" - "For more information, please refer to www.diffpy.org/diffpy.utils/examples/toolsexample.html" + "No global configuration file was found containing information about " + "the user to associate with the data.\n By following the prompts " + "below you can add your name and email to this file on the current " + "computer and your name will be automatically associated with " + "subsequent diffpy data by default.\n This is not recommended on a " + "shared or public computer. You will only have to do that once.\n " + "For more information, please refer to www.diffpy.org/diffpy.utils/ " + "examples/toolsexample.html" ) print(intro_text) username = input( @@ -160,14 +169,15 @@ def check_and_build_global_config(skip_config_creation=False): with open(config_path, "w") as f: f.write(json.dumps(config)) outro_text = ( - f"The config file at {Path().home() / 'diffpyconfig.json'} has been created. " - f"The values {config} were entered.\n" - f"These values will be inserted as metadata with your data in apps that use " - f"diffpy.get_user_info(). If you would like to update these values, either " - f"delete the config file and this workflow will rerun next time you run this " - f"program. Or you may open the config file in a text editor and manually edit the" - f"entries. For more information, see: " - f"https://diffpy.github.io/diffpy.utils/examples/tools_example.html" + "The config file at {Path().home() / 'diffpyconfig.json'} has " + "been created. The values {config} were entered.\n These values " + "will be inserted as metadata with your data in apps that use " + "diffpy.get_user_info(). If you would like to update these " + "values , either delete the config file and this workflow will " + "return next time you run this program. Or you may open the " + "config file in a text editor and manually edit the entries. " + "For more information, see: https://diffpy.github.io/diffpy.utils" + "/examples/tools_example.html" ) print(outro_text) config_exists = True @@ -177,13 +187,15 @@ def check_and_build_global_config(skip_config_creation=False): def get_package_info(package_names, metadata=None): """Fetch package version and updates it into (given) metadata. - Package info stored in metadata as {'package_info': {'package_name': 'version_number'}}. + Package info stored in metadata as {'package_info': {'package_name': ' + version_number'}}. ---------- package_name : str or list The name of the package(s) to retrieve the version number for. metadata : dict - The dictionary to store the package info. If not provided, a new dictionary will be created. + The dictionary to store the package info. If not provided, a new + dictionary will be created. Returns ------- @@ -208,7 +220,8 @@ def get_density_from_cloud(sample_composition, mp_token=""): It is not implemented yet. """ raise NotImplementedError( - "So sorry, density computation from composition is not implemented right now. " + "So sorry, density computation from composition is not implemented " + "right now. " "We hope to have this implemented in the next release. " "Please rerun specifying a sample mass density." ) @@ -221,7 +234,8 @@ def compute_mu_using_xraydb( Computes mu based on the sample composition and energy. User should provide a sample mass density or a packing fraction. - If neither density nor packing fraction is specified, or if both are specified, a ValueError will be raised. + If neither density nor packing fraction is specified, or if both are + specified, a ValueError will be raised. Reference: https://xraypy.github.io/XrayDB/python.html#xraydb.material_mu. Parameters @@ -245,7 +259,8 @@ def compute_mu_using_xraydb( sample_mass_density is not None and packing_fraction is not None ): raise ValueError( - "You must specify either sample_mass_density or packing_fraction, but not both. " + "You must specify either sample_mass_density or packing_fraction, " + "but not both. " "Please rerun specifying only one." ) if packing_fraction is not None: @@ -275,8 +290,10 @@ def _model_function(z, diameter, z0, I0, mud, slope): """ Compute the model function with the following steps: 1. Let dz = z-z0, so that dz is centered at 0 - 2. Compute length l that is the effective length for computing intensity I = I0 * e^{-mu * l}: - - For dz within the capillary diameter, l is the chord length of the circle at position dz + 2. Compute length l that is the effective length for computing intensity + I = I0 * e^{-mu * l}: + - For dz within the capillary diameter, l is the chord length of the + circle at position dz - For dz outside this range, l = 0 3. Apply a linear adjustment to I0 by taking I0 as I0 - slope * z """ @@ -309,7 +326,8 @@ def _extend_z_and_convolve(z, diameter, half_slit_width, z0, I0, mud, slope): z_extended = np.concatenate([z_left_pad, z, z_right_pad]) I_extended = _model_function(z_extended, diameter, z0, I0, mud, slope) kernel = _top_hat(z_extended - z_extended.mean(), half_slit_width) - I_convolved = I_extended # this takes care of the case where slit width is close to 0 + # this takes care of the case where slit width is close to 0 + I_convolved = I_extended if kernel.sum() != 0: kernel /= kernel.sum() I_convolved = convolve(I_extended, kernel, mode="same") @@ -364,12 +382,14 @@ def compute_mud(filepath): This function loads z-scan data and fits it to a model that convolves a top-hat function with I = I0 * e^{-mu * l}. - The fitting procedure is run multiple times, and we return the best-fit parameters based on the lowest rmse. + The fitting procedure is run multiple times, and we return the best-fit + parameters based on the lowest rmse. The full mathematical details are described in the paper: - An ad hoc Absorption Correction for Reliable Pair-Distribution Functions from Low Energy x-ray Sources, - Yucong Chen, Till Schertenleib, Andrew Yang, Pascal Schouwink, Wendy L. Queen and Simon J. L. Billinge, - in preparation. + An ad hoc Absorption Correction for Reliable Pair-Distribution Functions + from Low Energy x-ray Sources, Yucong Chen, Till Schertenleib, Andrew + Yang , Pascal Schouwink, Wendy L. Queen and Simon J. L. Billinge, in + preparation. Parameters ---------- diff --git a/src/diffpy/utils/transforms.py b/src/diffpy/utils/transforms.py index 693bb693..d8989225 100644 --- a/src/diffpy/utils/transforms.py +++ b/src/diffpy/utils/transforms.py @@ -4,19 +4,24 @@ import numpy as np wavelength_warning_emsg = ( - "No wavelength has been specified. You can continue to use the DiffractionObject, but " - "some of its powerful features will not be available. " - "To specify a wavelength, if you have do = DiffractionObject(xarray, yarray, 'tth'), " - "you may set do.wavelength = 1.54 for a wavelength of 1.54 angstroms." + "No wavelength has been specified. You can continue to use the " + "DiffractionObject, but some of its powerful features will not be " + "available. To specify a wavelength, if you have do = DiffractionObject( " + "xarray, yarray, 'tth'), you may set do.wavelength = 1.54 for a " + "wavelength of 1.54 angstroms." ) invalid_tth_emsg = ( "Two theta exceeds 180 degrees. Please check the input values for errors." ) invalid_q_or_d_or_wavelength_emsg = ( - "The supplied input array and wavelength will result in an impossible two-theta. " - "Please check these values and re-instantiate the DiffractionObject with correct values." + "The supplied input array and wavelength will result in an impossible two " + "-theta. Please check these values and re-instantiate the " + "DiffractionObject with correct values." +) +inf_output_imsg = ( + "INFO: The largest output value in the array is infinite. " + "This is allowed, but it will not be plotted." ) -inf_output_imsg = "INFO: The largest output value in the array is infinite. This is allowed, but it will not be plotted." def _validate_inputs(q, wavelength): @@ -73,7 +78,8 @@ def q_to_tth(q, wavelength): def tth_to_q(tth, wavelength): r"""Helper function to convert two-theta to q on independent variable axis. - If wavelength is missing, returns independent variable axis as integer indexes. + If wavelength is missing, returns independent variable axis as integer + indexes. By definition the relationship is: @@ -100,7 +106,8 @@ def tth_to_q(tth, wavelength): ------- q : ndarray The 1D array of :math:`q` values np.array([qs]). - The units for the q-values are the inverse of the units of the provided wavelength. + The units for the q-values are the inverse of the units of the + provided wavelength. """ tth.astype(float) if np.any(np.deg2rad(tth) > np.pi): @@ -139,7 +146,8 @@ def q_to_d(q): def tth_to_d(tth, wavelength): r"""Helper function to convert two-theta to d on independent variable axis. - The formula is .. math:: d = \frac{\lambda}{2 \sin\left(\frac{2\theta}{2}\right)}. + The formula is .. + math::d = \frac{\lambda}{2 \sin\left(\frac{2\theta}{2}\right)}. Here we convert tth to q first, then to d. @@ -191,7 +199,9 @@ def d_to_q(d): def d_to_tth(d, wavelength): r"""Helper function to convert d to two-theta on independent variable axis. - The formula is .. math:: 2\theta = 2 \arcsin\left(\frac{\lambda}{2d}\right). + The formula is .. + + math:: 2\theta = 2 \arcsin\left(\frac{\lambda}{2d}\right). Here we convert d to q first, then to tth. diff --git a/src/diffpy/utils/validators.py b/src/diffpy/utils/validators.py index 91a461bf..a1e9fd33 100644 --- a/src/diffpy/utils/validators.py +++ b/src/diffpy/utils/validators.py @@ -1,8 +1,9 @@ def is_number(string): """Check if the provided string can be converted to a float. - Since integers can be converted to floats, this function will return True for integers as well. - Hence, we can use this function to check if a string is a number. + Since integers can be converted to floats, this function will return + True for integers as well. Hence, we can use this function to check if + a string is a number. Parameters ---------- diff --git a/tests/conftest.py b/tests/conftest.py index b6d3bf3c..2c54a44e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -37,7 +37,8 @@ def _load(filename): @pytest.fixture def do_minimal(): - # Create an instance of DiffractionObject with empty xarray and yarray values, and a non-empty wavelength + # Create an instance of DiffractionObject with empty xarray and yarray + # values, and a non-empty wavelength return DiffractionObject( xarray=np.empty(0), yarray=np.empty(0), xtype="tth", wavelength=1.54 ) @@ -45,7 +46,8 @@ def do_minimal(): @pytest.fixture def do_minimal_tth(): - # Create an instance of DiffractionObject with non-empty xarray, yarray, and wavelength values + # Create an instance of DiffractionObject with non-empty xarray, yarray + # , and wavelength values return DiffractionObject( wavelength=2 * np.pi, xarray=np.array([30, 60]), @@ -56,7 +58,8 @@ def do_minimal_tth(): @pytest.fixture def do_minimal_d(): - # Create an instance of DiffractionObject with non-empty xarray, yarray, and wavelength values + # Create an instance of DiffractionObject with non-empty xarray, yarray + # , and wavelength values return DiffractionObject( wavelength=1.54, xarray=np.array([1, 2]), @@ -68,9 +71,10 @@ def do_minimal_d(): @pytest.fixture def wavelength_warning_msg(): return ( - "No wavelength has been specified. You can continue to use the DiffractionObject, but " - "some of its powerful features will not be available. " - "To specify a wavelength, if you have do = DiffractionObject(xarray, yarray, 'tth'), " + "No wavelength has been specified. You can continue to use the " + "DiffractionObject, but some of its powerful features will not be " + "available. To specify a wavelength, " + "if you have do = DiffractionObject(xarray, yarray, 'tth'), " "you may set do.wavelength = 1.54 for a wavelength of 1.54 angstroms." ) @@ -78,16 +82,18 @@ def wavelength_warning_msg(): @pytest.fixture def invalid_q_or_d_or_wavelength_error_msg(): return ( - "The supplied input array and wavelength will result in an impossible two-theta. " - "Please check these values and re-instantiate the DiffractionObject with correct values." + "The supplied input array and wavelength will result in an " + "impossible two-theta. Please check these values and re-instantiate " + "the DiffractionObject with correct values." ) @pytest.fixture def invalid_add_type_error_msg(): return ( - "You may only add a DiffractionObject with another DiffractionObject or a scalar value. " - "Please rerun by adding another DiffractionObject instance or a scalar value. " + "You may only add a DiffractionObject with another DiffractionObject " + "or a scalar value. Please rerun by adding another DiffractionObject " + "instance or a scalar value. " "e.g., my_do_1 + my_do_2 or my_do + 10 or 10 + my_do" ) @@ -95,7 +101,8 @@ def invalid_add_type_error_msg(): @pytest.fixture def x_values_not_equal_error_msg(): return ( - "The two objects have different values in x arrays (my_do.all_arrays[:, [1, 2, 3]]). " - "Please ensure the x values of the two objects are identical by re-instantiating " - "the DiffractionObject with the correct x value inputs." + "The two objects have different values in x arrays " + "(my_do.all_arrays[:, [1, 2, 3]]). Please ensure the x values of the " + "two objects are identical by re-instantiating the DiffractionObject " + "with the correct x value inputs." ) diff --git a/tests/test_diffraction_objects.py b/tests/test_diffraction_objects.py index 7db4cc33..94cc1928 100644 --- a/tests/test_diffraction_objects.py +++ b/tests/test_diffraction_objects.py @@ -202,7 +202,8 @@ def test_init_invalid_xtype(): "org_do_args, target_do_args, scale_inputs, expected", [ # Test whether the original y-array is scaled as expected - ( # C1: none of q, tth, d, provided, expect to scale on the maximal intensity from each object + ( # C1: none of q, tth, d, provided, expect to scale on the maximal + # intensity from each object { "xarray": np.array([0.1, 0.2, 0.3]), "yarray": np.array([1, 2, 3]), @@ -275,8 +276,9 @@ def test_init_invalid_xtype(): {"tth": 60}, {"xtype": "tth", "yarray": np.array([1, 2, 3, 4, 5, 6, 10])}, ), - ( # C5.1: Reuse test case from C1, none of q, tth, d, provided, but include an offset, - # expect scaled y-array in C1 to shift up by 2 + ( # C5.1: Reuse test case from C1, none of q, tth, d, provided, but + # include an offset, expect scaled y-array in C1 to shift up + # by 2 { "xarray": np.array([0.1, 0.2, 0.3]), "yarray": np.array([1, 2, 3]), @@ -292,7 +294,8 @@ def test_init_invalid_xtype(): {"offset": 2}, {"xtype": "q", "yarray": np.array([12, 22, 32])}, ), - ( # C5.2: Reuse test case from C4, but include an offset, expect scaled y-array in C4 to shift up by 2 + ( # C5.2: Reuse test case from C4, but include an offset, expect + # scaled y-array in C4 to shift up by 2 { "xarray": np.array([10, 25, 30.1, 40.2, 61, 120, 140]), "yarray": np.array([10, 20, 30, 40, 50, 60, 100]), @@ -358,8 +361,10 @@ def test_scale_to_bad(org_do_args, target_do_args, scale_inputs): @pytest.mark.parametrize( "do_args, get_array_index_inputs, expected_index", [ - # Test get_array_index() returns the expected index given xtype and value - ( # C1: Target value is in the xarray and xtype is identical, expect exact index match + # Test get_array_index() returns the expected index given xtype and + # value + ( # C1: Target value is in the xarray and xtype is identical, expect + # exact index match { "wavelength": 4 * np.pi, "xarray": np.array([30.005, 60]), @@ -372,7 +377,8 @@ def test_scale_to_bad(org_do_args, target_do_args, scale_inputs): }, 0, ), - ( # C2: Target value lies in the array, expect the (first) closest index + ( # C2: Target value lies in the array, expect the (first) closest + # index { "wavelength": 4 * np.pi, "xarray": np.array([30, 60]), @@ -477,8 +483,10 @@ def test_dump(tmp_path, mocker): with open(file, "r") as f: actual = f.read() expected = ( - "[DiffractionObject]\nname = test\nwavelength = 1.54\nscat_quantity = x-ray\nthing1 = 1\n" - "thing2 = thing2\npackage_info = {'package2': '3.4.5', 'diffpy.utils': '3.3.0'}\n" + "[DiffractionObject]\nname = test\nwavelength = 1.54\n" + "scat_quantity = x-ray\nthing1 = 1\n" + "thing2 = thing2\n" + "package_info = {'package2': '3.4.5', 'diffpy.utils': '3.3.0'}\n" "creation_time = 2012-01-14 00:00:00\n\n" "#### start data\n0.000000000000000000e+00 0.000000000000000000e+00\n" "1.000000000000000000e+00 1.000000000000000000e+00\n" @@ -492,10 +500,15 @@ def test_dump(tmp_path, mocker): @pytest.mark.parametrize( - "do_init_args, expected_do_dict, divide_by_zero_warning_expected, wavelength_warning_expected", + ( + "do_init_args, expected_do_dict, divide_by_zero_warning_expected, " + "wavelength_warning_expected" + ), [ - # Test __dict__ of DiffractionObject instance initialized with valid arguments - ( # C1: Instantiate DO with empty arrays, expect it to be a valid DO, but with everything empty + # Test __dict__ of DiffractionObject instance initialized with valid + # arguments + ( # C1: Instantiate DO with empty arrays, expect it to be a valid DO, + # but with everything empty { "xarray": np.empty(0), "yarray": np.empty(0), @@ -518,8 +531,9 @@ def test_dump(tmp_path, mocker): False, True, ), - ( # C2: Instantiate just DO with empty array like in C1 but with wavelength, xtype, name, and metadata - # expect a valid DO with empty arrays, but with some non-array attributes + ( # C2: Instantiate just DO with empty array like in C1 but with + # wavelength, xtype, name, and metadata expect a valid DO with + # empty arrays, but with some non-array attributes { "xarray": np.empty(0), "yarray": np.empty(0), @@ -545,8 +559,9 @@ def test_dump(tmp_path, mocker): False, False, ), - ( # C3: Minimum arguments provided for init with non-empty values for xarray and yarray and wavelength - # expect all attributes set without None + ( # C3: Minimum arguments provided for init with non-empty values for + # xarray and yarray and wavelength expect all attributes set + # without None { "xarray": np.array([0.0, 90.0, 180.0]), "yarray": np.array([1.0, 2.0, 3.0]), @@ -576,7 +591,8 @@ def test_dump(tmp_path, mocker): True, False, ), - ( # C4: Same as C3, but with an optional scat_quantity argument, expect non-empty string for scat_quantity + ( # C4: Same as C3, but with an optional scat_quantity argument, + # expect non-empty string for scat_quantity { "xarray": np.array( [np.inf, 2 * np.sqrt(2) * np.pi, 2 * np.pi] @@ -643,12 +659,18 @@ def test_init_valid( @pytest.mark.parametrize( "do_init_args, expected_error_msg", [ - # Test expected error messages when 3 required arguments not provided in DiffractionObject init - ( # C1: No arguments provided, expect 3 required positional arguments error + # Test expected error messages when 3 required arguments not provided + # in DiffractionObject init + ( # C1: No arguments provided, expect 3 required positional arguments + # error {}, - "missing 3 required positional arguments: 'xarray', 'yarray', and 'xtype'", + ( + "missing 3 required positional arguments: " + "'xarray', 'yarray', and 'xtype'" + ), ), - ( # C2: Only xarray and yarray provided, expect 1 required positional argument error + ( # C2: Only xarray and yarray provided, expect 1 required positional + # argument error {"xarray": np.array([0.0, 90.0]), "yarray": np.array([0.0, 90.0])}, "missing 1 required positional argument: 'xtype'", ), @@ -704,7 +726,10 @@ def test_uuid_setter_error(do_minimal): with pytest.raises( AttributeError, - match="Direct modification of attribute 'uuid' is not allowed. Please use 'input_data' to modify 'uuid'.", + match=( + "Direct modification of attribute 'uuid' is not allowed. " + "Please use 'input_data' to modify 'uuid'." + ), ): do.uuid = uuid.uuid4() @@ -749,8 +774,10 @@ def test_copy_object(do_minimal): @pytest.mark.parametrize( "operation, starting_yarray, scalar_value, expected_yarray", [ - # Test scalar addition, subtraction, multiplication, and division to y-values by adding a scalar value - # C1: Test scalar addition to y-values (intensity), expect no change to x-values (q, tth, d) + # Test scalar addition, subtraction, multiplication, and division to + # y-values by adding a scalar value + # C1: Test scalar addition to y-values (intensity), expect no change + # to x-values (q, tth, d) ( # 1. Add 5 "add", np.array([1.0, 2.0]), @@ -763,7 +790,8 @@ def test_copy_object(do_minimal): 5.1, np.array([6.1, 7.1]), ), - # C2: Test scalar subtraction to y-values (intensity), expect no change to x-values (q, tth, d) + # C2: Test scalar subtraction to y-values (intensity), expect no + # change to x-values (q, tth, d) ( # 1. Subtract 1 "sub", np.array([1.0, 2.0]), @@ -776,7 +804,8 @@ def test_copy_object(do_minimal): 0.5, np.array([0.5, 1.5]), ), - # C3: Test scalar multiplication to y-values (intensity), expect no change to x-values (q, tth, d) + # C3: Test scalar multiplication to y-values (intensity), expect no + # change to x-values (q, tth, d) ( # 1. Multiply by 2 "mul", np.array([1.0, 2.0]), @@ -789,7 +818,8 @@ def test_copy_object(do_minimal): 2.5, np.array([2.5, 5.0]), ), - # C4: Test scalar division to y-values (intensity), expect no change to x-values (q, tth, d) + # C4: Test scalar division to y-values (intensity), expect no change + # to x-values (q, tth, d) ( # 1. Divide by 2 "div", np.array([1.0, 2.0]), @@ -837,9 +867,13 @@ def test_scalar_operations( @pytest.mark.parametrize( - "operation, expected_do_1_all_arrays_with_y_modified, expected_do_2_all_arrays_with_y_modified", + ( + "operation, expected_do_1_all_arrays_with_y_modified, " + "expected_do_2_all_arrays_with_y_modified" + ), [ - # Test addition, subtraction, multiplication, and division of two DO objects + # Test addition, subtraction, multiplication, and division of two DO + # objects ( # Test addition of two DO objects, expect combined yarray values "add", np.array( @@ -855,7 +889,8 @@ def test_scalar_operations( ] ), ), - ( # Test subtraction of two DO objects, expect differences in yarray values + ( # Test subtraction of two DO objects, expect differences in yarray + # values "sub", np.array( [ @@ -870,7 +905,8 @@ def test_scalar_operations( ] ), ), - ( # Test multiplication of two DO objects, expect multiplication in yarray values + ( # Test multiplication of two DO objects, expect multiplication in + # yarray values "mul", np.array( [ @@ -975,7 +1011,8 @@ def test_operator_invalid_type(do_minimal_tth, invalid_add_type_error_msg): def test_operator_invalid_xarray_values_not_equal( operation, do_minimal_tth, do_minimal_d, x_values_not_equal_error_msg ): - # Add two DO objects with different xarray values but equal in shape, expect ValueError + # Add two DO objects with different xarray values but equal in shape, + # expect ValueError do_1 = do_minimal_tth do_2 = do_minimal_d with pytest.raises( diff --git a/tests/test_loaddata.py b/tests/test_loaddata.py index 7580c672..82d947ee 100644 --- a/tests/test_loaddata.py +++ b/tests/test_loaddata.py @@ -15,9 +15,9 @@ def test_loadData_default(datafile): with pytest.raises(IOError) as err: loadData("doesnotexist.txt") - assert ( - str(err.value) - == "File doesnotexist.txt cannot be found. Please rerun the program specifying a valid filename." + assert str(err.value) == ( + "File doesnotexist.txt cannot be found. " + "Please rerun the program specifying a valid filename." ) # The default minrows=10 makes it read from the third line diff --git a/tests/test_resample.py b/tests/test_resample.py index 6e6294ed..61b32004 100644 --- a/tests/test_resample.py +++ b/tests/test_resample.py @@ -7,7 +7,8 @@ def test_wsinterp(): - # FIXME: if another SW interp function exists, run comparisons for interpolated points + # FIXME: if another SW interp function exists, run comparisons for + # interpolated points # Sampling rate ssr = 44100**-1 # Standard sampling rate for human-hearable frequencies diff --git a/tests/test_tools.py b/tests/test_tools.py index 8d69c6c0..1e866007 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -18,7 +18,8 @@ @pytest.mark.parametrize( "runtime_inputs, expected", - [ # config file in home is present, no config in cwd. various runtime values passed + [ # config file in home is present, no config in cwd. various runtime + # values passed # C1: nothing passed in, expect uname, email, orcid from home_config ( {}, @@ -28,7 +29,8 @@ "owner_orcid": "home_orcid", }, ), - # C2: empty strings passed in, expect uname, email, orcid from home_config + # C2: empty strings passed in, expect uname, email, orcid from + # home_config ( {"owner_name": "", "owner_email": "", "owner_orcid": ""}, { @@ -37,7 +39,8 @@ "owner_orcid": "home_orcid", }, ), - # C3: just owner name passed in at runtime. expect runtime_oname but others from config + # C3: just owner name passed in at runtime. + # expect runtime_oname but others from config ( {"owner_name": "runtime_ownername"}, { @@ -46,7 +49,8 @@ "owner_orcid": "home_orcid", }, ), - # C4: just owner email passed in at runtime. expect runtime_email but others from config + # C4: just owner email passed in at runtime. + # expect runtime_email but others from config ( {"owner_email": "runtime@email.com"}, { @@ -55,7 +59,8 @@ "owner_orcid": "home_orcid", }, ), - # C5: just owner ci passed in at runtime. expect runtime_orcid but others from config + # C5: just owner ci passed in at runtime. expect runtime_orcid but + # others from config ( {"owner_orcid": "runtime_orcid"}, { @@ -69,8 +74,8 @@ def test_get_user_info_with_home_conf_file( runtime_inputs, expected, user_filesystem, mocker ): - # user_filesystem[0] is tmp_dir/home_dir with the global config file in it, user_filesystem[1] - # is tmp_dir/cwd_dir + # user_filesystem[0] is tmp_dir/home_dir with the global config file in it, + # user_filesystem[1] is tmp_dir/cwd_dir mocker.patch.object(Path, "home", return_value=user_filesystem[0]) os.chdir(user_filesystem[1]) actual = get_user_info(**runtime_inputs) @@ -81,7 +86,8 @@ def test_get_user_info_with_home_conf_file( "runtime_inputs, expected", [ # tests as before but now config file present in cwd and home but orcid # missing in the cwd config - # C1: nothing passed in, expect uname, email from local config, orcid from home_config + # C1: nothing passed in, expect uname, email from local config, orcid + # from home_config ( {}, { @@ -90,7 +96,8 @@ def test_get_user_info_with_home_conf_file( "owner_orcid": "home_orcid", }, ), - # C2: empty strings passed in, expect uname, email, orcid from home_config + # C2: empty strings passed in, expect uname, email, orcid from + # home_config ( {"owner_name": "", "owner_email": "", "owner_orcid": ""}, { @@ -99,7 +106,8 @@ def test_get_user_info_with_home_conf_file( "owner_orcid": "home_orcid", }, ), - # C3: just owner name passed in at runtime. expect runtime_oname but others from config + # C3: just owner name passed in at runtime. expect runtime_oname but + # others from config ( {"owner_name": "runtime_ownername"}, { @@ -108,7 +116,8 @@ def test_get_user_info_with_home_conf_file( "owner_orcid": "home_orcid", }, ), - # C4: just owner email passed in at runtime. expect runtime_email but others from config + # C4: just owner email passed in at runtime. expect runtime_email but + # others from config ( {"owner_email": "runtime@email.com"}, { @@ -117,7 +126,8 @@ def test_get_user_info_with_home_conf_file( "owner_orcid": "home_orcid", }, ), - # C5: just owner ci passed in at runtime. expect runtime_orcid but others from config + # C5: just owner ci passed in at runtime. expect runtime_orcid but + # others from config ( {"owner_orcid": "runtime_orcid"}, { @@ -131,8 +141,8 @@ def test_get_user_info_with_home_conf_file( def test_get_user_info_with_local_conf_file( runtime_inputs, expected, user_filesystem, mocker ): - # user_filesystem[0] is tmp_dir/home_dir with the global config file in it, user_filesystem[1] - # is tmp_dir/cwd_dir + # user_filesystem[0] is tmp_dir/home_dir with the global config file in it, + # user_filesystem[1] is tmp_dir/cwd_dir mocker.patch.object(Path, "home", return_value=user_filesystem[0]) os.chdir(user_filesystem[1]) local_config_data = { @@ -147,7 +157,8 @@ def test_get_user_info_with_local_conf_file( @pytest.mark.parametrize( "test_inputs,expected", - [ # Check check_and_build_global_config() builds correct config when config is found missing + [ # Check check_and_build_global_config() builds correct config when + # config is found missing ( # C1: user inputs valid name, email and orcid {"user_inputs": ["input_name", "input@email.com", "input_orcid"]}, { @@ -160,7 +171,8 @@ def test_get_user_info_with_local_conf_file( {"user_inputs": ["", "", ""]}, None, ), # C2: empty strings passed in, expect no config file created - ( # C3: just username input, expect config file but with some empty values + ( # C3: just username input, expect config file but with some empty + # values {"user_inputs": ["input_name", "", ""]}, {"owner_email": "", "owner_orcid": "", "owner_name": "input_name"}, ), @@ -169,8 +181,8 @@ def test_get_user_info_with_local_conf_file( def test_check_and_build_global_config( test_inputs, expected, user_filesystem, mocker ): - # user_filesystem[0] is tmp_dir/home_dir with the global config file in it, user_filesystem[1] - # is tmp_dir/cwd_dir + # user_filesystem[0] is tmp_dir/home_dir with the global config file in it, + # user_filesystem[1] is tmp_dir/cwd_dir mocker.patch.object(Path, "home", return_value=user_filesystem[0]) os.chdir(user_filesystem[1]) confile = user_filesystem[0] / "diffpyconfig.json" @@ -262,7 +274,8 @@ def test_get_package_info(monkeypatch, inputs, expected): "inputs", [ # Test when the function has invalid inputs - ( # C1: Both mass density and packing fraction are provided, expect ValueError exception + ( # C1: Both mass density and packing fraction are provided, expect + # ValueError exception { "sample_composition": "SiO2", "energy": 10, @@ -270,7 +283,8 @@ def test_get_package_info(monkeypatch, inputs, expected): "packing_fraction": 1, } ), - ( # C2: None of mass density or packing fraction are provided, expect ValueError exception + ( # C2: None of mass density or packing fraction are provided, expect + # ValueError exception { "sample_composition": "SiO2", "energy": 10, @@ -281,8 +295,11 @@ def test_get_package_info(monkeypatch, inputs, expected): def test_compute_mu_using_xraydb_bad(inputs): with pytest.raises( ValueError, - match="You must specify either sample_mass_density or packing_fraction, but not both. " - "Please rerun specifying only one.", + match=( + "You must specify " + "either sample_mass_density or packing_fraction, but not both. " + "Please rerun specifying only one." + ), ): compute_mu_using_xraydb(**inputs) diff --git a/tests/test_transforms.py b/tests/test_transforms.py index 4617584c..1553aa34 100644 --- a/tests/test_transforms.py +++ b/tests/test_transforms.py @@ -20,15 +20,17 @@ # C1: Allow empty array q to compute tth with or without wavelength # 1. Wavelength provided, expect empty array of tth (4 * np.pi, np.empty((0)), np.empty(0)), - # 2. No wavelength provided, expected empty array of tth and wavelength UserWarning + # 2. No wavelength provided, expected empty array of tth + # and wavelength UserWarning (None, np.empty((0)), np.empty((0))), # C2: Use non-empty q values to compute tth with or without wavelength - ( # 1. No wavelength provided, expect valid tth values in degrees with wavelength UserWarning + ( # 1. No wavelength provided, expect valid tth values in degrees + # with wavelength UserWarning None, np.array([0, 0.2, 0.4, 0.6, 0.8, 1]), np.array([0, 1, 2, 3, 4, 5]), ), - ( # 2. Wavelength provided, expect tth values of 2*arcsin(q) in degrees + ( # 2. Wavelength provided, expect tth values of 2*arcsin(q) in degree 4 * np.pi, np.array([0, 1 / np.sqrt(2), 1.0]), np.array([0, 90.0, 180.0]), @@ -50,12 +52,14 @@ def test_q_to_tth(wavelength, q, expected_tth, wavelength_warning_msg): "wavelength, q, expected_error_type", [ # Test ValeuError in q to tth conversion with invalid two-theta values. - ( # C1: Invalid q values that result in tth > 180 degrees, expect ValueError + ( # C1: Invalid q values that result in tth > 180 degrees, expect + # ValueError 4 * np.pi, np.array([0.2, 0.4, 0.6, 0.8, 1, 1.2]), ValueError, ), - ( # C2: Wrong wavelength that results in tth > 180 degrees, expect ValueError + ( # C2: Wrong wavelength that results in tth > 180 degrees, expect + # ValueError 100, np.array([0, 0.2, 0.4, 0.6, 0.8, 1]), ValueError, @@ -77,15 +81,18 @@ def test_q_to_tth_bad( # C1: Allow empty tth values to compute 1, with or without wavelength # 1. Wavelength provided, expect empty array of q (None, np.array([]), np.array([])), - # 2. No wavelength provided, expected empty array of q and wavelength UserWarning + # 2. No wavelength provided, expected empty array of q and wavelength + # UserWarning (4 * np.pi, np.array([]), np.array([])), - # C2: Use non-empty tth values between 0-180 degrees to compute q, with or without wavelength + # C2: Use non-empty tth values between 0-180 degrees to compute q, + # with or without wavelength ( # 1. No wavelength provided, expect valid q values between 0-1 None, np.array([0, 30, 60, 90, 120, 180]), np.array([0, 1, 2, 3, 4, 5]), ), - ( # 2. Wavelength provided, expect expected q values are sin15, sin30, sin45, sin60, sin90 + ( # 2. Wavelength provided, expect expected q values are sin15, sin30 + # , sin45, sin60, sin90 4 * np.pi, np.array([0, 30.0, 60.0, 90.0, 120.0, 180.0]), np.array([0, 0.258819, 0.5, 0.707107, 0.866025, 1]), @@ -107,18 +114,25 @@ def test_tth_to_q(wavelength, tth, expected_q, wavelength_warning_msg): @pytest.mark.parametrize( "wavelength, tth, expected_error_type, expected_error_msg", [ - # C1: Invalid tth value of > 180 degrees provided, with or without wavelength + # C1: Invalid tth value of > 180 degrees provided, with or without + # wavelength ( # 1. No wavelength provided, expect two theta ValueError None, np.array([0, 30, 60, 90, 120, 181]), ValueError, - "Two theta exceeds 180 degrees. Please check the input values for errors.", + ( + "Two theta exceeds 180 degrees. " + "Please check the input values for errors." + ), ), ( # 2. Wavelength provided, expect two theta ValueError 4 * np.pi, np.array([0, 30, 60, 90, 120, 181]), ValueError, - "Two theta exceeds 180 degrees. Please check the input values for errors.", + ( + "Two theta exceeds 180 degrees. " + "Please check the input values for errors." + ), ), ], ) @@ -143,7 +157,8 @@ def test_tth_to_q_bad( np.array([62.83185307, 2, 1, 0.66667, 0.5, 0.4]), False, ), - ( # 2. Valid q values containing 0, expect d values with divide by zero warning + ( # 2. Valid q values containing 0, expect d values with divide by + # zero warning np.array( [0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi] ), @@ -195,14 +210,16 @@ def test_d_to_q(d, expected_q, zero_divide_error_expected): (None, np.array([]), np.array([]), False), # C2: Empty tth values, wavelength provided, expect empty d values (4 * np.pi, np.array([]), np.array([]), False), - # C3: User specified valid tth values between 0-180 degrees (without wavelength) + # C3: User specified valid tth values between 0-180 degrees (without + # wavelength) ( None, np.array([0, 30, 60, 90, 120, 180]), np.array([0, 1, 2, 3, 4, 5]), False, ), - ( # C4: User specified valid tth values between 0-180 degrees (with wavelength) + ( # C4: User specified valid tth values between 0-180 degrees (with + # wavelength) 4 * np.pi, np.array([0, 30.0, 60.0, 90.0, 120.0, 180.0]), np.array([np.inf, 24.27636, 12.56637, 8.88577, 7.25520, 6.28319]), @@ -235,17 +252,25 @@ def test_tth_to_d( @pytest.mark.parametrize( "wavelength, tth, expected_error_type, expected_error_msg", [ - ( # C1: Invalid tth value of > 180 degrees, no wavelength, expect two theta ValueError + ( # C1: Invalid tth value of > 180 degrees, no wavelength, expect two + # theta ValueError None, np.array([0, 30, 60, 90, 120, 181]), ValueError, - "Two theta exceeds 180 degrees. Please check the input values for errors.", + ( + "Two theta exceeds 180 degrees. " + "Please check the input values for errors." + ), ), - ( # C2: Invalid tth value of > 180 degrees with wavelength, expect two theta ValueError + ( # C2: Invalid tth value of > 180 degrees with wavelength, expect + # two theta ValueError 4 * np.pi, np.array([0, 30, 60, 90, 120, 181]), ValueError, - "Two theta exceeds 180 degrees. Please check the input values for errors.", + ( + "Two theta exceeds 180 degrees. " + "Please check the input values for errors." + ), ), ], ) @@ -263,14 +288,16 @@ def test_tth_to_d_invalid( (None, np.empty((0)), np.empty((0)), False), # C2: Empty d values with wavelength, expect empty tth values (4 * np.pi, np.empty((0)), np.empty(0), False), - # C3: Valid d values, no wavelength, expect valid and non-empty tth values + # C3: Valid d values, no wavelength, expect valid and non-empty tth + # values ( None, np.array([1, 0.8, 0.6, 0.4, 0.2, 0]), np.array([0, 1, 2, 3, 4, 5]), True, ), - ( # C4: Valid d values with wavelength, expect valid and non-empty thh values + ( # C4: Valid d values with wavelength, expect valid and non-empty + # thh values 4 * np.pi, np.array( [4 * np.pi, 4 / np.sqrt(2) * np.pi, 4 / np.sqrt(3) * np.pi] @@ -308,9 +335,11 @@ def test_d_to_tth( @pytest.mark.parametrize( "wavelength, d, expected_error_type", [ - # C1: Invalid d values that result in tth > 180 degrees, expect invalid q, d, or wavelength ValueError + # C1: Invalid d values that result in tth > 180 degrees, expect + # invalid q, d, or wavelength ValueError (4 * np.pi, np.array([1.2, 1, 0.8, 0.6, 0.4, 0.2]), ValueError), - # C2: Wrong wavelength that result in tth > 180 degreesm, expect invalid q, d, or wavelength ValueError + # C2: Wrong wavelength that result in tth > 180 degreesm, expect + # invalid q, d, or wavelength ValueError (100, np.array([1.2, 1, 0.8, 0.6, 0.4, 0.2]), ValueError), ], )