Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cleaning #31

Merged
merged 6 commits into from
Jul 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 0 additions & 23 deletions diffpy/srmise/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,28 +13,5 @@

"""Tools for peak extraction from PDF."""

__all__ = [
"basefunction",
"srmiseerrors",
"srmiselog",
"dataclusters",
"modelcluster",
"modelparts",
"pdfdataset",
"pdfpeakextraction",
"peakextraction",
"peakstability",
"multimodelselection",
]

from basefunction import BaseFunction
from dataclusters import DataClusters
from modelcluster import ModelCluster, ModelCovariance
from modelparts import ModelPart, ModelParts
from multimodelselection import MultimodelSelection
from pdfdataset import PDFDataSet
from pdfpeakextraction import PDFPeakExtraction
from peakextraction import PeakExtraction
from peakstability import PeakStability

from diffpy.srmise.version import __version__
23 changes: 10 additions & 13 deletions diffpy/srmise/modelevaluators/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,6 @@

import numpy as np

import diffpy.srmise.srmiselog
from diffpy.srmise.srmiseerrors import SrMiseModelEvaluatorError

logger = logging.getLogger("diffpy.srmise")


Expand All @@ -68,9 +65,9 @@ def __lt__(self, other):
""" """

assert self.method == other.method # Comparison between same types required
assert self.stat != None and other.stat != None # The statistic must already be calculated
assert self.stat is not None and other.stat is not None # The statistic must already be calculated

if self.higher_is_better:
if self.higher_is_better is not None:
return self.stat < other.stat
else:
return other.stat < self.stat
Expand All @@ -79,9 +76,9 @@ def __le__(self, other):
""" """

assert self.method == other.method # Comparison between same types required
assert self.stat != None and other.stat != None # The statistic must already be calculated
assert self.stat is not None and other.stat is not None # The statistic must already be calculated

if self.higher_is_better:
if self.higher_is_better is not None:
return self.stat <= other.stat
else:
return other.stat <= self.stat
Expand All @@ -90,25 +87,25 @@ def __eq__(self, other):
""" """

assert self.method == other.method # Comparison between same types required
assert self.stat != None and other.stat != None # The statistic must already be calculated
assert self.stat is not None and other.stat is not None # The statistic must already be calculated

return self.stat == other.stat

def __ne__(self, other):
""" """

assert self.method == other.method # Comparison between same types required
assert self.stat != None and other.stat != None # The statistic must already be calculated
assert self.stat is not None and other.stat is not None # The statistic must already be calculated

return self.stat != other.stat

def __gt__(self, other):
""" """

assert self.method == other.method # Comparison between same types required
assert self.stat != None and other.stat != None # The statistic must already be calculated
assert self.stat is not None and other.stat is not None # The statistic must already be calculated

if self.higher_is_better:
if self.higher_is_better is not None:
return self.stat > other.stat
else:
return other.stat > self.stat
Expand All @@ -117,9 +114,9 @@ def __ge__(self, other):
""" """

assert self.method == other.method # Comparison between same types required
assert self.stat != None and other.stat != None # The statistic must already be calculated
assert self.stat is not None and other.stat is not None # The statistic must already be calculated

if self.higher_is_better:
if self.higher_is_better is not None:
return self.stat >= other.stat
else:
return other.stat >= self.stat
Expand Down
2 changes: 1 addition & 1 deletion diffpy/srmise/modelparts.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def fit(

return

#### Notes on the fit f
# # Notes on the fit f
# f[0] = solution
# f[1] = Uses the fjac and ipvt optional outputs to construct an estimate of the jacobian around the solution.
# None if a singular matrix encountered (indicates very flat curvature in some direction).
Expand Down
2 changes: 1 addition & 1 deletion diffpy/srmise/multimodelselection.py
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,7 @@ def plot3dclassprobs(self, **kwds):
verts.append(np.concatenate([[p0], zip(xs, ys), [p1], [p0]]))
zlabels.append(i)

### Define face colors
# Define face colors
fc = np.array([len(self.classes[z]) for z in zlabels])
if class_size is "fraction":
fc = fc / float(len(self.results))
Expand Down
44 changes: 22 additions & 22 deletions diffpy/srmise/peakextraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,25 +375,25 @@ def readstr(self, datastring):
baselinefunctions = header[res.end() :].strip()
header = header[: res.start()]

### Instantiating baseline functions
# Instantiating baseline functions
res = re.split(r"(?m)^#+ BaselineFunction \d+\s*(?:#.*\s+)*", baselinefunctions)
for s in res[1:]:
safebf.append(BaseFunction.factory(s, safebf))

### Instantiating peak functions
# Instantiating peak functions
res = re.split(r"(?m)^#+ PeakFunction \d+\s*(?:#.*\s+)*", peakfunctions)
for s in res[1:]:
safepf.append(BaseFunction.factory(s, safepf))

### Instantiating Baseline object
# Instantiating Baseline object
if re.match(r"^None$", baselineobject):
self.baseline = None
elif re.match(r"^\d+$", baselineobject):
self.baseline = safebf[int(baselineobject)]
else:
self.baseline = Baseline.factory(baselineobject, safebf)

### Instantiating initial peaks
# Instantiating initial peaks
if re.match(r"^None$", initial_peaks):
self.initial_peaks = None
else:
Expand All @@ -402,7 +402,7 @@ def readstr(self, datastring):
for s in res[1:]:
self.initial_peaks.append(Peak.factory(s, safepf))

### Instantiating srmise metatdata
# Instantiating srmise metatdata

# pf
res = re.search(r"^pf=(.*)$", srmisemetadata, re.M)
Expand All @@ -426,10 +426,10 @@ def readstr(self, datastring):
res = re.search(r"^Range=(.*)$", srmisemetadata, re.M)
self.rng = eval(res.groups()[0].strip())

### Instantiating other metadata
# Instantiating other metadata
self.readmetadata(metadata)

### Instantiating start data
# Instantiating start data
# read actual data - x, y, dx, dy, plus effective_dy
arrays = []
if hasx:
Expand Down Expand Up @@ -478,7 +478,7 @@ def readstr(self, datastring):
if hasedy:
self.effective_dy = np.array(self.effective_dy)

### Instantiating results
# Instantiating results
res = re.search(r"^#+ ModelCluster\s*(?:#.*\s+)*", results, re.M)
if res:
mc = results[res.end() :].strip()
Expand Down Expand Up @@ -638,7 +638,7 @@ def writestr(self):
line.append("%g" % self.effective_dy[i])
lines.append(" ".join(line))

### Calculated members
# Calculated members
lines.append("##### Results")
lines.append("extraction_type=%s" % repr(self.extraction_type))

Expand Down Expand Up @@ -792,8 +792,8 @@ def extract_single(self, recursion_depth=1):

stepcounter = 0

############################
### Main extraction loop ###
# #########################
# Main extraction loop ###
for step in dclusters:

stepcounter += 1
Expand Down Expand Up @@ -839,7 +839,7 @@ def extract_single(self, recursion_depth=1):
# three clusters can become adjacent at any given step.
assert len(adjacent) <= 3

### Update cluster fits ###
# Update cluster fits ###
# 1. Refit clusters adjacent to at least one other cluster.
for a in adjacent:
mclusters[a].fit(justify=True)
Expand Down Expand Up @@ -922,7 +922,7 @@ def extract_single(self, recursion_depth=1):
near_peaks = Peaks([full_cluster.model[i] for i in near_peaks])
other_peaks = Peaks([full_cluster.model[i] for i in other_peaks])

### Remove contribution of peaks outside neighborhood
# Remove contribution of peaks outside neighborhood
# Define range of fitting/recursion to the interpeak range
# The adjusted error is passed unchanged. This may introduce
# a few more peaks than is justified, but they can be pruned
Expand Down Expand Up @@ -985,7 +985,7 @@ def extract_single(self, recursion_depth=1):
# Incorporate best peaks from recursive search.
adj_cluster.augment(rec)

### Select which model to use
# Select which model to use
full_cluster.model = other_peaks
full_cluster.replacepeaks(adj_cluster.model)
full_cluster.fit(True)
Expand All @@ -1001,9 +1001,9 @@ def extract_single(self, recursion_depth=1):
logger.debug("\n".join(msg), mclusters[step.lastcluster_idx], full_cluster)

mclusters[step.lastcluster_idx] = full_cluster
### End update cluster fits ###
# End update cluster fits ###

### Combine adjacent clusters ###
# Combine adjacent clusters ###

# Iterate in reverse order to preserve earlier indices
for idx in adjacent[-1:0:-1]:
Expand Down Expand Up @@ -1065,7 +1065,7 @@ def extract_single(self, recursion_depth=1):
near_peaks = Peaks([new_cluster.model[i] for i in near_peaks])
other_peaks = Peaks([new_cluster.model[i] for i in other_peaks])

### Remove contribution of peaks outside neighborhood
# Remove contribution of peaks outside neighborhood
# Define range of fitting/recursion to the interpeak range
# The adjusted error is passed unchanged. This may introduce
# a few more peaks than is justified, but they can be pruned
Expand All @@ -1075,7 +1075,7 @@ def extract_single(self, recursion_depth=1):
adj_y = y[adj_slice] - other_peaks.value(adj_x)
adj_error = dy[adj_slice]

#### Perform recursion on a version that is scaled at the
# # Perform recursion on a version that is scaled at the
# border, as well as on that is simply fit beforehand. In
# many cases these lead to nearly identical results, but
# occasionally one works much better than the other.
Expand Down Expand Up @@ -1194,7 +1194,7 @@ def extract_single(self, recursion_depth=1):
# Incorporate best peaks from recursive search.
adj_cluster2.augment(rec2)

### Select which model to use
# Select which model to use
new_cluster.model = other_peaks
rej_cluster = ModelCluster(new_cluster)
q1 = adj_cluster1.quality(self.error_method)
Expand Down Expand Up @@ -1224,16 +1224,16 @@ def extract_single(self, recursion_depth=1):
mclusters[idx - 1] = new_cluster
del mclusters[idx]

### End combine adjacent clusters loop ###
# End combine adjacent clusters loop ###

# Finally, combine clusters in dclusters
if len(adjacent) > 0:
step.combine_clusters([adjacent])

tracer.emit(*mclusters)

### End main extraction loop ###
################################
# End main extraction loop ###
# #############################

# Put initial peaks back in
mclusters[0].addexternalpeaks(ip)
Expand Down
4 changes: 2 additions & 2 deletions diffpy/srmise/peaks/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,13 +91,13 @@ def __init__(
raise ValueError(emsg)
BaseFunction.__init__(self, parameterdict, parformats, default_formats, metadict, base, Cache)

#### "Virtual" class methods ####
# # "Virtual" class methods ####

def scale_at(self, peak, x, scale):
emsg = "scale_at must be implemented in a PeakFunction subclass."
raise NotImplementedError(emsg)

#### Methods required by BaseFunction ####
# # Methods required by BaseFunction ####

def actualize(
self,
Expand Down
Loading