From c28b97b28a5a97471805d74b3c2b68a390027f1a Mon Sep 17 00:00:00 2001 From: Rundong Hua <157993340+stevenhua0320@users.noreply.github.com> Date: Mon, 29 Jul 2024 17:58:09 +0800 Subject: [PATCH] lint check and fix python2 print and exception issues (#21) * lint check and fix python2 print and exception issues * [pre-commit.ci] auto fixes from pre-commit hooks --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- diffpy/srmise/pdfpeakextraction.py | 2 +- diffpy/srmise/peakextraction.py | 618 ++++++++++++++++++----------- diffpy/srmise/peakstability.py | 66 ++- diffpy/srmise/srmiselog.py | 81 ++-- 4 files changed, 487 insertions(+), 280 deletions(-) diff --git a/diffpy/srmise/pdfpeakextraction.py b/diffpy/srmise/pdfpeakextraction.py index 8f9ecd6..06bd2be 100644 --- a/diffpy/srmise/pdfpeakextraction.py +++ b/diffpy/srmise/pdfpeakextraction.py @@ -913,7 +913,7 @@ def find_qmax(r, y, showgraphs=False): plt.show() plt.ioff() - raw_input() + input() return (qmax, dq) diff --git a/diffpy/srmise/peakextraction.py b/diffpy/srmise/peakextraction.py index 73da8a2..8f1d404 100644 --- a/diffpy/srmise/peakextraction.py +++ b/diffpy/srmise/peakextraction.py @@ -58,12 +58,22 @@ def __init__(self, newvars=[]): Parameters newvars: Sequence of strings that represent additional extraction parameters.""" self.clear() - self.extractvars = dict.fromkeys(('effective_dy', 'rng', 'pf', 'initial_peaks', 'baseline', 'cres', 'error_method')) + self.extractvars = dict.fromkeys( + ( + "effective_dy", + "rng", + "pf", + "initial_peaks", + "baseline", + "cres", + "error_method", + ) + ) for k in newvars: if k not in self.extractvars: self.extractvars[k] = None else: - emsg = "Extraction variable %s conflicts with existing variable" %k + emsg = "Extraction variable %s conflicts with existing variable" % k raise ValueError(emsg) return @@ -104,7 +114,7 @@ def setdata(self, x, y, dx=None, dy=None): if len(self.x) != len(self.dx) or len(self.x) != len(self.dy): emsg = "Sequences dx and dy (if present) must have the same length as x" raise ValueError(emsg) - #self.defaultvars() + # self.defaultvars() return def setvars(self, quiet=False, **kwds): @@ -120,7 +130,8 @@ def setvars(self, quiet=False, **kwds): baseline: Baseline instance or BaselineFunction instance (use built-in estimation) error_method: ErrorEvaluator subclass instance used to compare models (default AIC) initial_peaks: Peaks instance. These peaks are present at the start of extraction. - rng: Sequence specifying the least and greatest x-values over which to extract peaks.""" + rng: Sequence specifying the least and greatest x-values over which to extract peaks. + """ for k, v in kwds.iteritems(): if k in self.extractvars: if quiet: @@ -159,24 +170,26 @@ def defaultvars(self, *args): Note that the default values of very important parameters like the uncertainty and clustering resolution are crude guesses at best. """ - if self.cres is None or 'cres' in args: - self.cres = 4*(self.x[-1] - self.x[0])/len(self.x) + if self.cres is None or "cres" in args: + self.cres = 4 * (self.x[-1] - self.x[0]) / len(self.x) - if self.effective_dy is None or 'effective_dy' in args: + if self.effective_dy is None or "effective_dy" in args: if np.all(self.dy > 0): # That is, all points positive uncertainty. self.effective_dy = self.dy else: # A terribly crude guess - self.effective_dy = .05*(np.max(self.y)-np.min(self.y))*np.ones(len(self.x)) + self.effective_dy = ( + 0.05 * (np.max(self.y) - np.min(self.y)) * np.ones(len(self.x)) + ) elif np.isscalar(self.effective_dy) and self.effective_dy > 0: - self.effective_dy = self.effective_dy*np.ones(len(self.x)) + self.effective_dy = self.effective_dy * np.ones(len(self.x)) if self.pf is None or "pf" in args: from diffpy.srmise.peaks import GaussianOverR # TODO: Make a more useful default. - self.pf = [GaussianOverR(self.x[-1]-self.x[0])] + self.pf = [GaussianOverR(self.x[-1] - self.x[0])] if self.rng is None or "rng" in args: self.rng = [self.x[0], self.x[-1]] @@ -192,37 +205,40 @@ def defaultvars(self, *args): s = self.getrangeslice() epars = self.baseline.estimate_parameters(self.x[s], self.y[s]) self.baseline = self.baseline.actualize(epars, "internal") - logger.info("Estimating baseline: %s" %self.baseline) + logger.info("Estimating baseline: %s" % self.baseline) except (NotImplementedError, SrMiseEstimationError): - logger.error("Could not estimate baseline from provided BaselineFunction, trying default values.") + logger.error( + "Could not estimate baseline from provided BaselineFunction, trying default values." + ) self.baseline = None if self.baseline is None or "baseline" in args: from diffpy.srmise.baselines import Polynomial - bl = Polynomial(degree = -1) + + bl = Polynomial(degree=-1) self.baseline = bl.actualize(np.array([]), "internal") if self.error_method is None or "error_method" in args: from diffpy.srmise.modelevaluators import AIC + self.error_method = AIC if self.initial_peaks is None or "initial_peaks" in args: self.initial_peaks = Peaks() - def __str__(self): """Return string summary of PeakExtraction.""" out = [] for k in self.extractvars: - out.append("%s: %s" %(k, getattr(self, k))) + out.append("%s: %s" % (k, getattr(self, k))) if self.extracted is not None: - out.append("Extraction type: %s" %self.extraction_type) + out.append("Extraction type: %s" % self.extraction_type) out.append("--- Extracted ---") out.append(str(self.extracted)) else: out.append("No extracted peaks exist.") - return '\n'.join(out)+'\n' + return "\n".join(out) + "\n" def plot(self, **kwds): """Convenience function to plot data and extracted peaks with matplotlib. @@ -242,10 +258,18 @@ def plot(self, **kwds): x = self.x[rangeslice] y = self.y[rangeslice] dy = self.dy[rangeslice] - mcluster = ModelCluster(self.initial_peaks, self.baseline, x, y, dy, None, self.error_method, self.pf) + mcluster = ModelCluster( + self.initial_peaks, + self.baseline, + x, + y, + dy, + None, + self.error_method, + self.pf, + ) plt.plot(*mcluster.plottable(kwds)) - def read(self, filename): """load PeakExtraction object from file @@ -254,12 +278,14 @@ def read(self, filename): returns self """ try: - self.readstr(open(filename,'rb').read()) - except SrMiseDataFormatError, err: + self.readstr(open(filename, "rb").read()) + except SrMiseDataFormatError as err: logger.exception("") basename = os.path.basename(filename) - emsg = ("Could not open '%s' due to unsupported file format " + - "or corrupted data. [%s]") % (basename, err) + emsg = ( + "Could not open '%s' due to unsupported file format " + + "or corrupted data. [%s]" + ) % (basename, err) raise SrMiseFileError(emsg) return self @@ -288,120 +314,120 @@ def readstr(self, datastring): safebf = [] # find where the results section starts - res = re.search(r'^#+ Results\s*(?:#.*\s+)*', datastring, re.M) + res = re.search(r"^#+ Results\s*(?:#.*\s+)*", datastring, re.M) if res: - results = datastring[res.end():].strip() - header = datastring[:res.start()] + results = datastring[res.end() :].strip() + header = datastring[: res.start()] # find data section, and what information it contains - res = re.search(r'^#+ start data\s*(?:#.*\s+)*', header, re.M) + res = re.search(r"^#+ start data\s*(?:#.*\s+)*", header, re.M) if res: - start_data = header[res.end():].strip() - start_data_info = header[res.start():res.end()] - header = header[:res.start()] - res = re.search(r'^(#+L.*)$', start_data_info, re.M) + start_data = header[res.end() :].strip() + start_data_info = header[res.start() : res.end()] + header = header[: res.start()] + res = re.search(r"^(#+L.*)$", start_data_info, re.M) if res: - start_data_info = start_data_info[res.start():res.end()].strip() + start_data_info = start_data_info[res.start() : res.end()].strip() hasx = False hasy = False hasdx = False hasdy = False hasedy = False - res = re.search(r'\bx\b', start_data_info) + res = re.search(r"\bx\b", start_data_info) if res: hasx = True - res = re.search(r'\by\b', start_data_info) + res = re.search(r"\by\b", start_data_info) if res: hasy = True - res = re.search(r'\bdx\b', start_data_info) + res = re.search(r"\bdx\b", start_data_info) if res: hasdx = True - res = re.search(r'\bdy\b', start_data_info) + res = re.search(r"\bdy\b", start_data_info) if res: hasdy = True - res = re.search(r'\edy\b', start_data_info) + res = re.search(r"\edy\b", start_data_info) if res: hasedy = True - res = re.search(r'^#+ Metadata\s*(?:#.*\s+)*', header, re.M) + res = re.search(r"^#+ Metadata\s*(?:#.*\s+)*", header, re.M) if res: - metadata = header[res.end():].strip() - header = header[:res.start()] + metadata = header[res.end() :].strip() + header = header[: res.start()] - res = re.search(r'^#+ SrMiseMetadata\s*(?:#.*\s+)*', header, re.M) + res = re.search(r"^#+ SrMiseMetadata\s*(?:#.*\s+)*", header, re.M) if res: - srmisemetadata = header[res.end():].strip() - header = header[:res.start()] + srmisemetadata = header[res.end() :].strip() + header = header[: res.start()] - res = re.search(r'^#+ InitialPeaks.*$', header, re.M) + res = re.search(r"^#+ InitialPeaks.*$", header, re.M) if res: - initial_peaks = header[res.end():].strip() - header = header[:res.start()] + initial_peaks = header[res.end() :].strip() + header = header[: res.start()] - res = re.search(r'^#+ BaselineObject\s*(?:#.*\s+)*', header, re.M) + res = re.search(r"^#+ BaselineObject\s*(?:#.*\s+)*", header, re.M) if res: - baselineobject = header[res.end():].strip() - header = header[:res.start()] + baselineobject = header[res.end() :].strip() + header = header[: res.start()] - res = re.search(r'^#+ PeakFunctions.*$', header, re.M) + res = re.search(r"^#+ PeakFunctions.*$", header, re.M) if res: - peakfunctions = header[res.end():].strip() - header = header[:res.start()] + peakfunctions = header[res.end() :].strip() + header = header[: res.start()] - res = re.search(r'^#+ BaselineFunctions.*$', header, re.M) + res = re.search(r"^#+ BaselineFunctions.*$", header, re.M) if res: - baselinefunctions = header[res.end():].strip() - header = header[:res.start()] + baselinefunctions = header[res.end() :].strip() + header = header[: res.start()] ### Instantiating baseline functions - res = re.split(r'(?m)^#+ BaselineFunction \d+\s*(?:#.*\s+)*', baselinefunctions) + res = re.split(r"(?m)^#+ BaselineFunction \d+\s*(?:#.*\s+)*", baselinefunctions) for s in res[1:]: safebf.append(BaseFunction.factory(s, safebf)) ### Instantiating peak functions - res = re.split(r'(?m)^#+ PeakFunction \d+\s*(?:#.*\s+)*', peakfunctions) + res = re.split(r"(?m)^#+ PeakFunction \d+\s*(?:#.*\s+)*", peakfunctions) for s in res[1:]: safepf.append(BaseFunction.factory(s, safepf)) ### Instantiating Baseline object - if re.match(r'^None$', baselineobject): + if re.match(r"^None$", baselineobject): self.baseline = None - elif re.match(r'^\d+$', baselineobject): + elif re.match(r"^\d+$", baselineobject): self.baseline = safebf[int(baselineobject)] else: self.baseline = Baseline.factory(baselineobject, safebf) ### Instantiating initial peaks - if re.match(r'^None$', initial_peaks): + if re.match(r"^None$", initial_peaks): self.initial_peaks = None else: self.initial_peaks = Peaks() - res = re.split(r'(?m)^#+ InitialPeak\s*(?:#.*\s+)*', initial_peaks) + res = re.split(r"(?m)^#+ InitialPeak\s*(?:#.*\s+)*", initial_peaks) for s in res[1:]: self.initial_peaks.append(Peak.factory(s, safepf)) ### Instantiating srmise metatdata # pf - res = re.search(r'^pf=(.*)$', srmisemetadata, re.M) + res = re.search(r"^pf=(.*)$", srmisemetadata, re.M) self.pf = eval(res.groups()[0].strip()) if self.pf is not None: self.pf = [safepf[i] for i in self.pf] # cres - rx = { 'f' : r'[-+]?(\d+(\.\d*)?|\d*\.\d+)([eE][-+]?\d+)?' } + rx = {"f": r"[-+]?(\d+(\.\d*)?|\d*\.\d+)([eE][-+]?\d+)?"} regexp = r"\bcres *= *(%(f)s)\b" % rx res = re.search(regexp, srmisemetadata, re.I) self.cres = float(res.groups()[0]) # error_method - res = re.search(r'^ModelEvaluator=(.*)$', srmisemetadata, re.M) + res = re.search(r"^ModelEvaluator=(.*)$", srmisemetadata, re.M) __import__("diffpy.srmise.modelevaluators") module = sys.modules["diffpy.srmise.modelevaluators"] self.error_method = getattr(module, res.groups()[0].strip()) # range - res = re.search(r'^Range=(.*)$', srmisemetadata, re.M) + res = re.search(r"^Range=(.*)$", srmisemetadata, re.M) self.rng = eval(res.groups()[0].strip()) ### Instantiating other metadata @@ -440,10 +466,13 @@ def readstr(self, datastring): for line in start_data.split("\n"): l = line.split() if len(arrays) != len(l): - emsg = ("Number of value fields does not match that given by '%s'" %start_data_info) + emsg = ( + "Number of value fields does not match that given by '%s'" + % start_data_info + ) for a, v in zip(arrays, line.split()): a.append(float(v)) - except (ValueError, IndexError), err: + except (ValueError, IndexError) as err: raise SrMiseDataFormatError(str(err)) if hasx: self.x = np.array(self.x) @@ -456,15 +485,14 @@ def readstr(self, datastring): if hasedy: self.effective_dy = np.array(self.effective_dy) - ### Instantiating results - res = re.search(r'^#+ ModelCluster\s*(?:#.*\s+)*', results, re.M) + res = re.search(r"^#+ ModelCluster\s*(?:#.*\s+)*", results, re.M) if res: - mc = results[res.end():].strip() - results = results[:res.start()] + mc = results[res.end() :].strip() + results = results[: res.start()] # extraction type - res = re.search(r'^extraction_type=(.*)$', results, re.M) + res = re.search(r"^extraction_type=(.*)$", results, re.M) if res: self.extraction_type = eval(res.groups()[0].strip()) else: @@ -472,10 +500,12 @@ def readstr(self, datastring): raise SrMiseDataFormatError(emsg) # extracted - if re.match(r'^None$', mc): + if re.match(r"^None$", mc): self.extracted = None else: - self.extracted = ModelCluster.factory(mc, pfbaselist=safepf, blfbaselist=safebf) + self.extracted = ModelCluster.factory( + mc, pfbaselist=safepf, blfbaselist=safebf + ) def write(self, filename): """Write string representation of PeakExtraction instance to file. @@ -483,12 +513,11 @@ def write(self, filename): Parameters filename: the name of the file to write""" bytes = self.writestr() - f = open(filename, 'w') + f = open(filename, "w") f.write(bytes) f.close() return - def writestr(self): """Return string representation of PeakExtraction object.""" import time @@ -500,11 +529,14 @@ def writestr(self): lines = [] # Header - lines.extend([ - 'History written: ' + time.ctime(), - 'produced by ' + getuser(), - 'diffpy.srmise version %s' %__version__, - '##### PDF Peak Extraction' ]) + lines.extend( + [ + "History written: " + time.ctime(), + "produced by " + getuser(), + "diffpy.srmise version %s" % __version__, + "##### PDF Peak Extraction", + ] + ) # Generate list of PeakFunctions and BaselineFunctions # so I can refer to them by index when necessary. @@ -517,7 +549,7 @@ def writestr(self): if self.baseline is not None: if isinstance(self.baseline, BaseFunction): allbf.append(self.baseline) - else: # should be a ModelPart + else: # should be a ModelPart allbf.append(self.baseline.owner()) if self.extracted is not None: allpf.extend(self.extracted.peak_funcs) @@ -532,13 +564,13 @@ def writestr(self): # Indexed baseline functions lines.append("## BaselineFunctions") for i, bf in enumerate(safebf): - lines.append('# BaselineFunction %s' %i) + lines.append("# BaselineFunction %s" % i) lines.append(bf.writestr(safebf)) # Indexed peak functions lines.append("## PeakFunctions") for i, pf in enumerate(safepf): - lines.append('# PeakFunction %s' %i) + lines.append("# PeakFunction %s" % i) lines.append(pf.writestr(safepf)) # Baseline @@ -546,7 +578,7 @@ def writestr(self): if self.baseline is None: lines.append("None") elif self.baseline in safebf: - lines.append('%s' %repr(safebf.index(self.baseline))) + lines.append("%s" % repr(safebf.index(self.baseline))) else: lines.append(self.baseline.writestr(safebf)) @@ -556,34 +588,34 @@ def writestr(self): lines.append("None") else: for ip in self.initial_peaks: - lines.append('# InitialPeak') + lines.append("# InitialPeak") lines.append(ip.writestr(safepf)) - lines.append('# SrMiseMetadata') + lines.append("# SrMiseMetadata") # Extractable peak types if self.pf is None: lines.append("pf=None") else: - lines.append("pf=%s" %repr([safepf.index(p) for p in self.pf])) + lines.append("pf=%s" % repr([safepf.index(p) for p in self.pf])) # Clustering resolution - lines.append('cres=%g' %self.cres) + lines.append("cres=%g" % self.cres) # Model evaluator if self.error_method is None: - lines.append('ModelEvaluator=None') + lines.append("ModelEvaluator=None") else: - lines.append('ModelEvaluator=%s' %self.error_method.__name__) + lines.append("ModelEvaluator=%s" % self.error_method.__name__) # Extraction range - lines.append("Range=%s" %repr(self.rng)) + lines.append("Range=%s" % repr(self.rng)) # Everything not defined by PeakExtraction - lines.append('# Metadata') + lines.append("# Metadata") lines.append(self.writemetadata()) # Raw data used in extraction. - lines.append('##### start data') - line = ['#L'] + lines.append("##### start data") + line = ["#L"] numlines = 0 if self.x is not None: line.append("x") @@ -604,29 +636,28 @@ def writestr(self): for i in range(numlines): line = [] if self.x is not None: - line.append("%g" %self.x[i]) + line.append("%g" % self.x[i]) if self.y is not None: - line.append("%g" %self.y[i]) + line.append("%g" % self.y[i]) if self.dx is not None: - line.append("%g" %self.dx[i]) + line.append("%g" % self.dx[i]) if self.dy is not None: - line.append("%g" %self.dy[i]) + line.append("%g" % self.dy[i]) if self.effective_dy is not None: - line.append("%g" %self.effective_dy[i]) + line.append("%g" % self.effective_dy[i]) lines.append(" ".join(line)) - ### Calculated members - lines.append('##### Results') - lines.append('extraction_type=%s' %repr(self.extraction_type)) + lines.append("##### Results") + lines.append("extraction_type=%s" % repr(self.extraction_type)) lines.append("### ModelCluster") if self.extracted is None: - lines.append('None') + lines.append("None") else: lines.append(self.extracted.writestr(pfbaselist=safepf, blfbaselist=safebf)) - datastring = "\n".join(lines)+"\n" + datastring = "\n".join(lines) + "\n" return datastring def writemetadata(self): @@ -647,7 +678,7 @@ def getrangeslice(self): while self.x[low_idx] < max(self.x[0], self.rng[0]): low_idx += 1 hi_idx = len(self.x) - while self.x[hi_idx-1] > min(self.x[-1], self.rng[1]): + while self.x[hi_idx - 1] > min(self.x[-1], self.rng[1]): hi_idx -= 1 return slice(low_idx, hi_idx) @@ -675,18 +706,22 @@ def estimate_peak(self, x, add=True): # Determine clusters using initial_peaks and pre-defined or estimated baseline rangeslice = self.getrangeslice() x1 = self.x[rangeslice] - y1 = self.y[rangeslice] - self.baseline.value(x1) - self.initial_peaks.value(x1) + y1 = ( + self.y[rangeslice] + - self.baseline.value(x1) + - self.initial_peaks.value(x1) + ) dy = self.effective_dy[rangeslice] if x < x1[0] or x > x1[-1]: - emsg = "Argument x=%s outside allowed range (%s, %s)." %(x, x1[0], x1[-1]) + emsg = "Argument x=%s outside allowed range (%s, %s)." % (x, x1[0], x1[-1]) raise ValueError(emsg) # Object performing clustering on data. Note that DataClusters # provides an iterator that clusters the next point and returns # itself. Thus, dclusters and step (below) refer to the same object. - dclusters = DataClusters(x1, y1, self.cres) # Cluster with baseline removed + dclusters = DataClusters(x1, y1, self.cres) # Cluster with baseline removed dclusters.makeclusters() cidx = dclusters.find_nearest_cluster2(x)[0] cslice = dclusters.cut(cidx) @@ -695,15 +730,17 @@ def estimate_peak(self, x, add=True): y1 = y1[cslice] dy = dy[cslice] - mcluster = ModelCluster(None, None, x1, y1, dy, None, self.error_method, self.pf) + mcluster = ModelCluster( + None, None, x1, y1, dy, None, self.error_method, self.pf + ) mcluster.fit() if len(mcluster.model) > 0: if add: - logger.info("Adding peak: %s" %mcluster.model[0]) + logger.info("Adding peak: %s" % mcluster.model[0]) self.add_peaks(mcluster.model) else: - logger.info("Found peak: %s" %mcluster.model[0]) + logger.info("Found peak: %s" % mcluster.model[0]) return mcluster.model[0] else: logger.info("No peaks found.") @@ -725,12 +762,12 @@ def add_peaks(self, peaks): def extract_single(self, recursion_depth=1): """Find ModelCluster with peaks extracted from data. Return ModelCovariance instance at top level. - Every extracted peak is one of the peak functions supplied. All - comparisons of different peak models are performed with the class - specified by error_method. + Every extracted peak is one of the peak functions supplied. All + comparisons of different peak models are performed with the class + specified by error_method. - Parameters - recursion_depth: (1) Tracks recursion with extract_single.""" + Parameters + recursion_depth: (1) Tracks recursion with extract_single.""" self.clearcalc() tracer = srmiselog.tracer tracer.pushc() @@ -755,13 +792,17 @@ def extract_single(self, recursion_depth=1): # provides an iterator that clusters the next point and returns # itself. Thus, dclusters and step (below) refer to the same object. - dclusters = DataClusters(x, y, self.cres) # Cluster with baseline removed + dclusters = DataClusters(x, y, self.cres) # Cluster with baseline removed # The data for model clusters includes the baseline y = self.y[rangeslice] - ip.value(x) # List of ModelClusters containing extracted peaks. - mclusters = [ModelCluster(None, bl, x, y, dy, dclusters.cut(0), self.error_method, self.pf)] + mclusters = [ + ModelCluster( + None, bl, x, y, dy, dclusters.cut(0), self.error_method, self.pf + ) + ] # The minimum number of points required to make a valid fit, as # determined by the peak functions and error method used. This is a @@ -777,28 +818,35 @@ def extract_single(self, recursion_depth=1): stepcounter += 1 msg = "\n\n------ Recursion: %s Step: %s Cluster: %s %s ------" - logger.debug(msg, - recursion_depth, - stepcounter, - step.lastcluster_idx, - step.clusters[step.lastcluster_idx] - ) + logger.debug( + msg, + recursion_depth, + stepcounter, + step.lastcluster_idx, + step.clusters[step.lastcluster_idx], + ) # Update mclusters if len(step.clusters) > len(mclusters): # Add a new cluster - mclusters.insert(step.lastcluster_idx, - ModelCluster(None, - bl, - x, - y, - dy, - step.cut(step.lastcluster_idx), - self.error_method, - self.pf)) + mclusters.insert( + step.lastcluster_idx, + ModelCluster( + None, + bl, + x, + y, + dy, + step.cut(step.lastcluster_idx), + self.error_method, + self.pf, + ), + ) else: # Update an existing cluster - mclusters[step.lastcluster_idx].change_slice(step.cut(step.lastcluster_idx)) + mclusters[step.lastcluster_idx].change_slice( + step.cut(step.lastcluster_idx) + ) # Find newly adjacent clusters adjacent = step.find_adjacent_clusters().ravel() @@ -813,7 +861,7 @@ def extract_single(self, recursion_depth=1): assert len(adjacent) <= 3 ### Update cluster fits ### - #1. Refit clusters adjacent to at least one other cluster. + # 1. Refit clusters adjacent to at least one other cluster. for a in adjacent: mclusters[a].fit(justify=True) @@ -842,7 +890,7 @@ def extract_single(self, recursion_depth=1): # enlarged cluster ("new_cluster") or an intermediate cluster # ("adj_cluster"). - if step.lastpoint_idx == 0 or step.lastpoint_idx == len(step.x)-1: + if step.lastpoint_idx == 0 or step.lastpoint_idx == len(step.x) - 1: logger.debug("Boundary full: %s", step.lastpoint_idx) full_cluster = ModelCluster(mclusters[step.lastcluster_idx]) full_cluster.fit(True) @@ -853,7 +901,7 @@ def extract_single(self, recursion_depth=1): # Determine neighborhood appropriate for fitting (no larger than combined clusters) if len(full_cluster.model) > 0: - peak_pos = np.array([p['position'] for p in full_cluster.model]) + peak_pos = np.array([p["position"] for p in full_cluster.model]) pivot = peak_pos.searchsorted(border_x) else: peak_pos = np.array([]) @@ -871,25 +919,27 @@ def extract_single(self, recursion_depth=1): elif pivot == 1: # One peak left left_data = full_cluster.slice.indices(len(x))[0] - near_peaks = np.append(near_peaks, pivot-1) + near_peaks = np.append(near_peaks, pivot - 1) else: # left_data -> one more peak to the left - left_data = max(0, x.searchsorted(peak_pos[pivot-2])-1) - near_peaks = np.append(near_peaks, pivot-1) + left_data = max(0, x.searchsorted(peak_pos[pivot - 2]) - 1) + near_peaks = np.append(near_peaks, pivot - 1) if pivot == len(peak_pos): # No peaks right of border_x! - right_data = full_cluster.slice.indices(len(x))[1]-1 - elif pivot == len(peak_pos)-1: + right_data = full_cluster.slice.indices(len(x))[1] - 1 + elif pivot == len(peak_pos) - 1: # One peak right - right_data = full_cluster.slice.indices(len(x))[1]-1 + right_data = full_cluster.slice.indices(len(x))[1] - 1 near_peaks = np.append(near_peaks, pivot) else: # right_data -> one more peak to the right - right_data = min(len(x), x.searchsorted(peak_pos[pivot+1])+1) + right_data = min(len(x), x.searchsorted(peak_pos[pivot + 1]) + 1) near_peaks = np.append(near_peaks, pivot) - other_peaks = np.concatenate([np.arange(0, pivot-1), np.arange(pivot+1, len(peak_pos))]) + other_peaks = np.concatenate( + [np.arange(0, pivot - 1), np.arange(pivot + 1, len(peak_pos))] + ) # Go from indices to lists of peaks. near_peaks = Peaks([full_cluster.model[i] for i in near_peaks]) @@ -900,31 +950,63 @@ def extract_single(self, recursion_depth=1): # The adjusted error is passed unchanged. This may introduce # a few more peaks than is justified, but they can be pruned # with the correct statistics at the top level of recursion. - adj_slice = slice(left_data, right_data+1) + adj_slice = slice(left_data, right_data + 1) adj_x = x[adj_slice] - adj_y = y[adj_slice]-other_peaks.value(adj_x) + adj_y = y[adj_slice] - other_peaks.value(adj_x) adj_error = dy[adj_slice] - adj_cluster = ModelCluster(near_peaks, bl, adj_x, adj_y, adj_error, slice(len(adj_x)), self.error_method, self.pf) + adj_cluster = ModelCluster( + near_peaks, + bl, + adj_x, + adj_y, + adj_error, + slice(len(adj_x)), + self.error_method, + self.pf, + ) # Recursively cluster/fit the residual rec_r = adj_x - rec_y = adj_y-near_peaks.value(rec_r) + rec_y = adj_y - near_peaks.value(rec_r) rec_error = adj_error # Quick check to see if there is anything to find min_npars = min([p.npars for p in self.pf]) - checkrec = ModelCluster(None, None, rec_r, rec_y, rec_error, None, self.error_method, self.pf) - recurse = len(near_peaks) > 0 and checkrec.quality().growth_justified(checkrec, min_npars) + checkrec = ModelCluster( + None, + None, + rec_r, + rec_y, + rec_error, + None, + self.error_method, + self.pf, + ) + recurse = len(near_peaks) > 0 and checkrec.quality().growth_justified( + checkrec, min_npars + ) if recurse and recursion_depth < 3: - logger.info("\n*********STARTING RECURSION level %s (full boundary)************" %(recursion_depth+1)) + logger.info( + "\n*********STARTING RECURSION level %s (full boundary)************" + % (recursion_depth + 1) + ) rec_search = PeakExtraction() rec_search.setdata(rec_r, rec_y, None, rec_error) - rec_search.setvars(quiet=True, baseline=bl, cres=self.cres, pf=self.pf, error_method=self.error_method) - rec_search.extract_single(recursion_depth+1) + rec_search.setvars( + quiet=True, + baseline=bl, + cres=self.cres, + pf=self.pf, + error_method=self.error_method, + ) + rec_search.extract_single(recursion_depth + 1) rec = rec_search.extracted - logger.info("*********ENDING RECURSION level %s (full boundary) ************\n" %(recursion_depth+1)) + logger.info( + "*********ENDING RECURSION level %s (full boundary) ************\n" + % (recursion_depth + 1) + ) # Incorporate best peaks from recursive search. adj_cluster.augment(rec) @@ -934,15 +1016,17 @@ def extract_single(self, recursion_depth=1): full_cluster.replacepeaks(adj_cluster.model) full_cluster.fit(True) - msg = ["---Result of full boundary---", - "Original cluster:", - "%s", - "Final cluster:", - "%s", - "---End of combining clusters---"] - logger.debug("\n".join(msg), - mclusters[step.lastcluster_idx], - full_cluster) + msg = [ + "---Result of full boundary---", + "Original cluster:", + "%s", + "Final cluster:", + "%s", + "---End of combining clusters---", + ] + logger.debug( + "\n".join(msg), mclusters[step.lastcluster_idx], full_cluster + ) mclusters[step.lastcluster_idx] = full_cluster ### End update cluster fits ### @@ -954,21 +1038,22 @@ def extract_single(self, recursion_depth=1): msg = ["Current model"] msg.extend(["%s" for m in mclusters]) - logger.debug("\n".join(msg), - *[m.model for m in mclusters]) + logger.debug("\n".join(msg), *[m.model for m in mclusters]) - cleft = step.clusters[idx-1] + cleft = step.clusters[idx - 1] cright = step.clusters[idx] - new_cluster = ModelCluster.join_adjacent(mclusters[idx-1], mclusters[idx]) + new_cluster = ModelCluster.join_adjacent( + mclusters[idx - 1], mclusters[idx] + ) # Estimate coordinate where clusters combine. - border_x = .5*(x[cleft[1]]+x[cright[0]]) - border_y = .5*(y[cleft[1]]+y[cright[0]]) + border_x = 0.5 * (x[cleft[1]] + x[cright[0]]) + border_y = 0.5 * (y[cleft[1]] + y[cright[0]]) # Determine neighborhood appropriate for fitting (no larger than combined clusters) if len(new_cluster.model) > 0: - peak_pos = np.array([p['position'] for p in new_cluster.model]) + peak_pos = np.array([p["position"] for p in new_cluster.model]) pivot = peak_pos.searchsorted(border_x) else: peak_pos = np.array([]) @@ -982,29 +1067,31 @@ def extract_single(self, recursion_depth=1): # interpeak range goes from peak to peak of next nearest peaks, although their contributions to the data are still removed. if pivot == 0: # No peaks left of border_x! - left_data=new_cluster.slice.indices(len(x))[0] + left_data = new_cluster.slice.indices(len(x))[0] elif pivot == 1: # One peak left left_data = new_cluster.slice.indices(len(x))[0] - near_peaks = np.append(near_peaks, pivot-1) + near_peaks = np.append(near_peaks, pivot - 1) else: # left_data -> one more peak to the left - left_data = max(0,x.searchsorted(peak_pos[pivot-2])-1) - near_peaks = np.append(near_peaks, pivot-1) + left_data = max(0, x.searchsorted(peak_pos[pivot - 2]) - 1) + near_peaks = np.append(near_peaks, pivot - 1) if pivot == len(peak_pos): # No peaks right of border_x! - right_data = new_cluster.slice.indices(len(x))[1]-1 - elif pivot == len(peak_pos)-1: + right_data = new_cluster.slice.indices(len(x))[1] - 1 + elif pivot == len(peak_pos) - 1: # One peak right - right_data = new_cluster.slice.indices(len(x))[1]-1 + right_data = new_cluster.slice.indices(len(x))[1] - 1 near_peaks = np.append(near_peaks, pivot) else: # right_data -> one more peak to the right - right_data = min(len(x), x.searchsorted(peak_pos[pivot+1])+1) + right_data = min(len(x), x.searchsorted(peak_pos[pivot + 1]) + 1) near_peaks = np.append(near_peaks, pivot) - other_peaks = np.concatenate([np.arange(0, pivot-1), np.arange(pivot+1, len(peak_pos))]) + other_peaks = np.concatenate( + [np.arange(0, pivot - 1), np.arange(pivot + 1, len(peak_pos))] + ) # Go from indices to lists of peaks. near_peaks = Peaks([new_cluster.model[i] for i in near_peaks]) @@ -1015,17 +1102,35 @@ def extract_single(self, recursion_depth=1): # The adjusted error is passed unchanged. This may introduce # a few more peaks than is justified, but they can be pruned # with the correct statistics at the top level of recursion. - adj_slice = slice(left_data, right_data+1) + adj_slice = slice(left_data, right_data + 1) adj_x = x[adj_slice] - adj_y = y[adj_slice]-other_peaks.value(adj_x) + adj_y = y[adj_slice] - other_peaks.value(adj_x) adj_error = dy[adj_slice] #### Perform recursion on a version that is scaled at the # border, as well as on that is simply fit beforehand. In # many cases these lead to nearly identical results, but # occasionally one works much better than the other. - adj_cluster1 = ModelCluster(near_peaks.copy(), bl, adj_x, adj_y, adj_error, slice(len(adj_x)), self.error_method, self.pf) - adj_cluster2 = ModelCluster(near_peaks.copy(), bl, adj_x, adj_y, adj_error, slice(len(adj_x)), self.error_method, self.pf) + adj_cluster1 = ModelCluster( + near_peaks.copy(), + bl, + adj_x, + adj_y, + adj_error, + slice(len(adj_x)), + self.error_method, + self.pf, + ) + adj_cluster2 = ModelCluster( + near_peaks.copy(), + bl, + adj_x, + adj_y, + adj_error, + slice(len(adj_x)), + self.error_method, + self.pf, + ) # Adjust cluster at border if there is at least one peak on # either side. @@ -1034,23 +1139,44 @@ def extract_single(self, recursion_depth=1): # Recursively cluster/fit the residual rec_r1 = adj_x - #rec_y1 = adj_y - near_peaks.value(rec_r1) + # rec_y1 = adj_y - near_peaks.value(rec_r1) rec_y1 = adj_y - adj_cluster1.model.value(rec_r1) rec_error1 = adj_error # Quick check to see if there is anything to find min_npars = min([p.npars for p in self.pf]) - checkrec = ModelCluster(None, None, rec_r1, rec_y1, rec_error1, None, self.error_method, self.pf) + checkrec = ModelCluster( + None, + None, + rec_r1, + rec_y1, + rec_error1, + None, + self.error_method, + self.pf, + ) recurse1 = checkrec.quality().growth_justified(checkrec, min_npars) if recurse1 and recursion_depth < 3: - logger.info("\n*********STARTING RECURSION level %s (reduce at border)************" %(recursion_depth+1)) + logger.info( + "\n*********STARTING RECURSION level %s (reduce at border)************" + % (recursion_depth + 1) + ) rec_search1 = PeakExtraction() rec_search1.setdata(rec_r1, rec_y1, None, rec_error1) - rec_search1.setvars(quiet=True, baseline=bl, cres=self.cres, pf=self.pf, error_method=self.error_method) - rec_search1.extract_single(recursion_depth+1) + rec_search1.setvars( + quiet=True, + baseline=bl, + cres=self.cres, + pf=self.pf, + error_method=self.error_method, + ) + rec_search1.extract_single(recursion_depth + 1) rec1 = rec_search1.extracted - logger.info("*********ENDING RECURSION level %s (reduce at border) ************\n" %(recursion_depth+1)) + logger.info( + "*********ENDING RECURSION level %s (reduce at border) ************\n" + % (recursion_depth + 1) + ) # Incorporate best peaks from recursive search. adj_cluster1.augment(rec1) @@ -1060,23 +1186,46 @@ def extract_single(self, recursion_depth=1): # Recursively cluster/fit the residual rec_r2 = adj_x - #rec_y2 = adj_y - near_peaks.value(rec_r2) + # rec_y2 = adj_y - near_peaks.value(rec_r2) rec_y2 = adj_y - adj_cluster2.model.value(rec_r2) rec_error2 = adj_error # Quick check to see if there is anything to find min_npars = min([p.npars for p in self.pf]) - checkrec = ModelCluster(None, None, rec_r2, rec_y2, rec_error2, None, self.error_method, self.pf) - recurse2 = len(near_peaks) > 0 and checkrec.quality().growth_justified(checkrec, min_npars) + checkrec = ModelCluster( + None, + None, + rec_r2, + rec_y2, + rec_error2, + None, + self.error_method, + self.pf, + ) + recurse2 = len(near_peaks) > 0 and checkrec.quality().growth_justified( + checkrec, min_npars + ) if recurse2 and recursion_depth < 3: - logger.info("\n*********STARTING RECURSION level %s (prefit)************" %(recursion_depth+1)) + logger.info( + "\n*********STARTING RECURSION level %s (prefit)************" + % (recursion_depth + 1) + ) rec_search2 = PeakExtraction() rec_search2.setdata(rec_r2, rec_y2, None, rec_error2) - rec_search2.setvars(quiet=True, baseline=bl, cres=self.cres, pf=self.pf, error_method=self.error_method) - rec_search2.extract_single(recursion_depth+1) + rec_search2.setvars( + quiet=True, + baseline=bl, + cres=self.cres, + pf=self.pf, + error_method=self.error_method, + ) + rec_search2.extract_single(recursion_depth + 1) rec2 = rec_search2.extracted - logger.info("*********ENDING RECURSION level %s (prefit) ************\n" %(recursion_depth+1)) + logger.info( + "*********ENDING RECURSION level %s (prefit) ************\n" + % (recursion_depth + 1) + ) # Incorporate best peaks from recursive search. adj_cluster2.augment(rec2) @@ -1095,22 +1244,22 @@ def extract_single(self, recursion_depth=1): new_cluster.fit(True) - - msg = ["---Result of combining clusters---", - "First cluster:", - "%s", - "Second cluster:", - "%s", - "Resulting cluster:", - "%s", - "---End of combining clusters---"] - - logger.debug("\n".join(msg), - mclusters[idx-1], - mclusters[idx], - new_cluster) - - mclusters[idx-1] = new_cluster + msg = [ + "---Result of combining clusters---", + "First cluster:", + "%s", + "Second cluster:", + "%s", + "Resulting cluster:", + "%s", + "---End of combining clusters---", + ] + + logger.debug( + "\n".join(msg), mclusters[idx - 1], mclusters[idx], new_cluster + ) + + mclusters[idx - 1] = new_cluster del mclusters[idx] ### End combine adjacent clusters loop ### @@ -1121,7 +1270,6 @@ def extract_single(self, recursion_depth=1): tracer.emit(*mclusters) - ### End main extraction loop ### ################################ @@ -1166,13 +1314,20 @@ def fit_single(self): dy = self.effective_dy[rngslice] # Set up ModelCluster - ext = ModelCluster(self.initial_peaks, self.baseline, x, y, dy, None, - self.error_method, self.pf) + ext = ModelCluster( + self.initial_peaks, + self.baseline, + x, + y, + dy, + None, + self.error_method, + self.pf, + ) # Fit model with baseline and calculate covariance matrix cov = ModelCovariance() - ext.fit(fitbaseline=True, estimate=False, cov=cov, - cov_format="default_output") + ext.fit(fitbaseline=True, estimate=False, cov=cov, cov_format="default_output") # Update calculated instance variables self.extraction_type = "fit_single" @@ -1180,11 +1335,12 @@ def fit_single(self): return cov -#end PeakExtraction class + +# end PeakExtraction class # simple test code -if __name__ == '__main__': +if __name__ == "__main__": from numpy.random import randn @@ -1195,13 +1351,13 @@ def fit_single(self): srmiselog.setlevel("info") srmiselog.liveplotting(False) - pf = GaussianOverR(.7) - res = .01 + pf = GaussianOverR(0.7) + res = 0.01 - pars = [[3, .2, 10], [3.5, .2, 10]] + pars = [[3, 0.2, 10], [3.5, 0.2, 10]] ideal_peaks = Peaks([pf.actualize(p, "pwa") for p in pars]) - r = np.arange(2,4,res) + r = np.arange(2, 4, res) y = ideal_peaks.value(r) + randn(len(r)) err = np.ones(len(r)) @@ -1209,14 +1365,14 @@ def fit_single(self): te = PeakExtraction() te.setdata(r, y, None, err) - te.setvars(rng=[1.51,10.], pf=[pf], cres=.1, effective_dy = 1.5*err) + te.setvars(rng=[1.51, 10.0], pf=[pf], cres=0.1, effective_dy=1.5 * err) te.extract_single() - print "--- Actual Peak parameters ---" - print ideal_peaks + print("--- Actual Peak parameters ---") + print(ideal_peaks) - print "\n--- After extraction ---" - print te + print("\n--- After extraction ---") + print(te) te.plot() - raw_input() + input() diff --git a/diffpy/srmise/peakstability.py b/diffpy/srmise/peakstability.py index 3897a2d..fcf5e31 100644 --- a/diffpy/srmise/peakstability.py +++ b/diffpy/srmise/peakstability.py @@ -27,7 +27,7 @@ class PeakStability: """Utility to test robustness of peaks. results: [error scalar, model, bl, dr] - ppe: a PDFPeakExtraction instance """ + ppe: a PDFPeakExtraction instance""" def __init__(self): self.results = [] @@ -39,11 +39,11 @@ def setppe(self, ppe): def load(self, filename): try: - import cPickle as pickle + import cPickle as pickle except: - import pickle + import pickle - in_s = open(filename, 'rb') + in_s = open(filename, "rb") try: (self.results, ppestr) = pickle.load(in_s) self.ppe = PDFPeakExtraction() @@ -67,10 +67,10 @@ def load(self, filename): def save(self, filename): try: - import cPickle as pickle + import cPickle as pickle except: - import pickle - out_s = open(filename, 'wb') + import pickle + out_s = open(filename, "wb") try: # Write to the stream outstr = self.ppe.writestr() @@ -82,18 +82,23 @@ def save(self, filename): if r[2] is None: bldict = None else: - bldict = {"pars":r[2].pars, "free":r[2].free, "removable":r[2].removable, "static_owner":r[2].static_owner} + bldict = { + "pars": r[2].pars, + "free": r[2].free, + "removable": r[2].removable, + "static_owner": r[2].static_owner, + } results2.append([r[0], r[1], bldict, r[3]]) pickle.dump([results2, outstr], out_s) finally: out_s.close() - def plotseries(self, style='o', **kwds): + def plotseries(self, style="o", **kwds): plt.figure() plt.ioff() for e, r, bl, dr in self.results: peakpos = [p["position"] for p in r] - es = [e]*len(peakpos) + es = [e] * len(peakpos) plt.plot(peakpos, es, style, **kwds) plt.ion() plt.draw() @@ -103,19 +108,32 @@ def plot(self, **kwds): plt.clf() plt.plot(*self.ppe.extracted.plottable(), **kwds) q = self.ppe.extracted.quality() - plt.suptitle("[%i/%i]\n" - "Uncertainty: %6.3f. Peaks: %i.\n" - "Quality: %6.3f. Chi-square: %6.3f" - %(self.current+1, len(self.results), self.ppe.effective_dy[0], len(self.ppe.extracted.model), q.stat, q.chisq)) + plt.suptitle( + "[%i/%i]\n" + "Uncertainty: %6.3f. Peaks: %i.\n" + "Quality: %6.3f. Chi-square: %6.3f" + % ( + self.current + 1, + len(self.results), + self.ppe.effective_dy[0], + len(self.ppe.extracted.model), + q.stat, + q.chisq, + ) + ) def setcurrent(self, idx): """Make the idxth model the active one.""" self.current = idx if idx is not None: result = self.results[idx] - self.ppe.setvars(quiet=True, effective_dy=result[0]*np.ones(len(self.ppe.x))) + self.ppe.setvars( + quiet=True, effective_dy=result[0] * np.ones(len(self.ppe.x)) + ) (r, y, dr, dy) = self.ppe.resampledata(result[3]) - self.ppe.extracted = ModelCluster(result[1], result[2], r, y, dy, None, self.ppe.error_method, self.ppe.pf) + self.ppe.extracted = ModelCluster( + result[1], result[2], r, y, dy, None, self.ppe.error_method, self.ppe.pf + ) else: self.ppe.clearcalc() @@ -142,7 +160,7 @@ def animate(self, results=None, step=False, **kwds): plt.ion() plt.draw() if step: - raw_input() + input() self.setcurrent(oldcurrent) @@ -153,18 +171,22 @@ def run(self, err, savecovs=False): self.results = [] covs = [] for i, e in enumerate(err): - print "---- Running for uncertainty %s (%i/%i) ----" %(e, i, len(err)) + print("---- Running for uncertainty %s (%i/%i) ----" % (e, i, len(err))) self.ppe.clearcalc() self.ppe.setvars(effective_dy=e) if savecovs: covs.append(self.ppe.extract()) else: self.ppe.extract() - dr = (self.ppe.extracted.r_cluster[-1]-self.ppe.extracted.r_cluster[0])/(len(self.ppe.extracted.r_cluster)-1) - self.results.append([e, self.ppe.extracted.model, self.ppe.extracted.baseline, dr]) + dr = ( + self.ppe.extracted.r_cluster[-1] - self.ppe.extracted.r_cluster[0] + ) / (len(self.ppe.extracted.r_cluster) - 1) + self.results.append( + [e, self.ppe.extracted.model, self.ppe.extracted.baseline, dr] + ) for e, r, bl, dr in self.results: - print "---- Results for uncertainty %s ----" %e - print r + print("---- Results for uncertainty %s ----" % e) + print(r) return covs diff --git a/diffpy/srmise/srmiselog.py b/diffpy/srmise/srmiselog.py index 3ea2c50..d971b73 100644 --- a/diffpy/srmise/srmiselog.py +++ b/diffpy/srmise/srmiselog.py @@ -48,11 +48,13 @@ defaultformat = "%(message)s" defaultlevel = logging.INFO -LEVELS = {'debug': logging.DEBUG, - 'info': logging.INFO, - 'warning': logging.WARNING, - 'error': logging.ERROR, - 'critical': logging.CRITICAL} +LEVELS = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} ### Set up logging to stdout ### logger = logging.getLogger("diffpy.srmise") @@ -72,13 +74,15 @@ liveplots = False wait = False + def addfilelog(filename, level=defaultlevel, format=defaultformat): """Log output from diffpy.srmise in specified file. Parameters filename: Name of file to receiving output level: The logging level - format: A string defining format of output messages conforming to logging package.""" + format: A string defining format of output messages conforming to logging package. + """ global fh fh = logging.FileHandler(filename) fh.setLevel(level) @@ -86,6 +90,7 @@ def addfilelog(filename, level=defaultlevel, format=defaultformat): fh.setFormatter(formatter) logger.addHandler(fh) + def setfilelevel(level): """Set level of file logger. @@ -101,6 +106,7 @@ def setfilelevel(level): emsg = "File handler does not exist, cannot set its level." raise SrMiseLogError(emsg) + def setlevel(level): """Set level of default (stdout) logger. @@ -112,6 +118,7 @@ def setlevel(level): if level < logger.getEffectiveLevel(): logger.setLevel(level) + def liveplotting(lp, w=False): """Set whether or not to use live plotting. @@ -138,6 +145,7 @@ def liveplotting(lp, w=False): ### TracePeaks. Primary purpose is to enable creating movies. ### + class TracePeaks(object): """Output trace information during peak extraction.""" @@ -191,15 +199,20 @@ def maketrace(self, *args, **kwds): clusters.append(m.slice) for m in args[1:]: mc.replacepeaks(m.model) - return {"mc":mc, "clusters":clusters, "recursion":self.recursion, "counter":self.counter} + return { + "mc": mc, + "clusters": clusters, + "recursion": self.recursion, + "counter": self.counter, + } def writestr(self, trace): """Return string representation of current trace.""" lines = [] lines.append("### Trace") - lines.append("counter=%i" %trace["counter"]) - lines.append("recursion=%i" %trace["recursion"]) - lines.append("clusters=%s" %trace["clusters"]) + lines.append("counter=%i" % trace["counter"]) + lines.append("recursion=%i" % trace["recursion"]) + lines.append("clusters=%s" % trace["clusters"]) lines.append("### ModelCluster") lines.append(trace["mc"].writestr()) @@ -207,8 +220,8 @@ def writestr(self, trace): def write(self, trace): """Write current trace to file.""" - filename = "%s_%i" %(self.filebase, trace["counter"]) - f = open(filename, 'w') + filename = "%s_%i" % (self.filebase, trace["counter"]) + f = open(filename, "w") bytes = self.writestr(trace) f.write(bytes) f.close() @@ -225,12 +238,14 @@ def read(self, filename): "mc" - A ModelCluster instance "recursion" - The recursion level of mc""" try: - return self.readstr(open(filename,'rb').read()) - except SrMiseDataFormatError, err: + return self.readstr(open(filename, "rb").read()) + except SrMiseDataFormatError as err: logger.exception("") basename = os.path.basename(filename) - emsg = ("Could not open '%s' due to unsupported file format " + - "or corrupted data. [%s]") % (basename, err) + emsg = ( + "Could not open '%s' due to unsupported file format " + + "or corrupted data. [%s]" + ) % (basename, err) raise SrMiseFileError(emsg) return None @@ -247,43 +262,49 @@ def readstr(self, datastring): "recursion" - The recursion level of mc""" # find where the ModelCluster section starts - res = re.search(r'^#+ ModelCluster\s*(?:#.*\s+)*', datastring, re.M) + res = re.search(r"^#+ ModelCluster\s*(?:#.*\s+)*", datastring, re.M) if res: - header = datastring[:res.start()] - mc = datastring[res.end():].strip() + header = datastring[: res.start()] + mc = datastring[res.end() :].strip() else: emsg = "Required section 'ModelCluster' not found." raise SrMiseDataFormatError(emsg) # instantiate ModelCluster - if re.match(r'^None$', mc): + if re.match(r"^None$", mc): mc = None else: from diffpy.srmise.modelcluster import ModelCluster + mc = ModelCluster.factory(mc) - res = re.search(r'^clusters=(.*)$', header, re.M) + res = re.search(r"^clusters=(.*)$", header, re.M) if res: clusters = eval(res.groups()[0].strip()) else: emsg = "Required field 'clusters' not found." raise SrMiseDataFormatError(emsg) - res = re.search(r'^recursion=(.*)$', header, re.M) + res = re.search(r"^recursion=(.*)$", header, re.M) if res: recursion = eval(res.groups()[0].strip()) else: emsg = "Required field 'recursion' not found." raise SrMiseDataFormatError(emsg) - res = re.search(r'^counter=(.*)$', header, re.M) + res = re.search(r"^counter=(.*)$", header, re.M) if res: counter = eval(res.groups()[0].strip()) else: emsg = "Required field 'counter' not found." raise SrMiseDataFormatError(emsg) - return {"mc":mc, "clusters":clusters, "recursion":self.recursion, "counter":self.counter} + return { + "mc": mc, + "clusters": clusters, + "recursion": self.recursion, + "counter": self.counter, + } def pushr(self): """Enter a layer of recursion, and return new level.""" @@ -315,16 +336,24 @@ def reset_trace(self): # filter property def setfilter(self, filter): - self.__filter = compile(" and ".join(["(%s)" %f for f in filter]), '', 'eval') - def getfilter(self): return self.__filter + self.__filter = compile( + " and ".join(["(%s)" % f for f in filter]), "", "eval" + ) + + def getfilter(self): + return self.__filter + filter = property(getfilter, setfilter) + ### End of class TracePeaks + def settracer(**kwds): global tracer tracer = TracePeaks(**kwds) return tracer + # Default tracer never emits tracer = settracer()