Skip to content

Commit 8772a95

Browse files
fix too many leading "#" in string block (#26)
1 parent 181a54a commit 8772a95

5 files changed

+34
-36
lines changed

doc/examples/fit_initial.py

+5-6
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
grid."""
2121

2222
import matplotlib.pyplot as plt
23-
import numpy as np
2423

2524
from diffpy.srmise import PDFPeakExtraction
2625
from diffpy.srmise.applications.plot import makeplot
@@ -30,11 +29,11 @@
3029

3130
def run(plot=True):
3231

33-
## Initialize peak extraction
32+
# Initialize peak extraction
3433
ppe = PDFPeakExtraction()
3534
ppe.loadpdf("data/C60_fine_qmax21.gr")
3635

37-
## Set up interpolated baseline.
36+
# Set up interpolated baseline.
3837
# The FromSequence baseline creates an interpolated baseline from provided
3938
# r and G(r) values, either two lists or a file containing (r, G(r)) pairs.
4039
# The baseline has no parameters. This particular baseline was estimated
@@ -43,7 +42,7 @@ def run(plot=True):
4342
blf = FromSequence("data/C60baseline.dat")
4443
bl = blf.actualize([])
4544

46-
## Set up fitting parameters
45+
# Set up fitting parameters
4746
# A summary of how parameters impact fitting is given below.
4847
# "rng" - Same as peak extraction
4948
# "baseline" - Same as peak extraction
@@ -66,7 +65,7 @@ def run(plot=True):
6665
kwds["dg"] = 5000 # ad hoc, but gives each point equal weight in fit.
6766
ppe.setvars(**kwds)
6867

69-
## Set up termination ripples
68+
# Set up termination ripples
7069
# Peak fitting never changes the peak function, so termination ripples
7170
# are not applied automatically as they are in peak extraction.
7271
# Termination ripples require setting the underlying peak function and qmax.
@@ -95,7 +94,7 @@ def run(plot=True):
9594
# Perform fit.
9695
ppe.fit()
9796

98-
## Save results
97+
# Save results
9998
ppe.write("output/fit_initial.srmise")
10099
ppe.writepwa("output/fit_initial.pwa")
101100

doc/examples/multimodel_known_dG1.py

+7-8
Original file line numberDiff line numberDiff line change
@@ -38,46 +38,45 @@
3838

3939
import diffpy.srmise.srmiselog as sml
4040
from diffpy.srmise import MultimodelSelection, PDFPeakExtraction
41-
from diffpy.srmise.applications.plot import makeplot
4241

4342

4443
def run(plot=True):
4544

46-
## Suppress mundane output
45+
# Suppress mundane output
4746
# When running scripts, especially involving multiple trials, it can be
4847
# useful to suppress many of the diffpy.srmise messages. Valid levels
4948
# include "debug", "info" (the default), "warning", "error", and
5049
# "critical." See diffpy.srmise.srmiselog for more information.
5150
sml.setlevel("warning")
5251

53-
## Initialize peak extraction from saved trial
52+
# Initialize peak extraction from saved trial
5453
ppe = PDFPeakExtraction()
5554
ppe.read("output/query_results.srmise")
5655
ppe.clearcalc()
5756

58-
## Set up extraction parameters
57+
# Set up extraction parameters
5958
# All parameters loaded from .srmise file.
6059
# Setting new values will override the previous values.
6160
kwds = {}
6261
kwds["rng"] = [10.9, 15] # Region of PDF with some overlap.
6362
ppe.setvars(**kwds)
6463

65-
## Create multimodel selection object.
64+
# Create multimodel selection object.
6665
# The MultimodelSelection class keeps track of the results of peak
6766
# extraction as the assumed uncertainty dg is varied.
6867
ms = MultimodelSelection()
6968
ms.setppe(ppe)
7069

71-
## Define range of dg values
70+
# Define range of dg values
7271
# For the purpose of illustration use 15 evenly-spaced values of dg where
7372
# 50% < dg < 120% of mean experimental dG in extraction range.
7473
dg_mean = np.mean(ppe.dy[ppe.getrangeslice()])
7574
dgs = np.linspace(0.5 * dg_mean, 1.2 * dg_mean, 15)
7675

77-
## Perform peak extraction for each of the assumed uncertainties.
76+
# Perform peak extraction for each of the assumed uncertainties.
7877
ms.run(dgs)
7978

80-
## Save results
79+
# Save results
8180
# The file known_dG_models.dat saves the models generated above. The file
8281
# known_dG_aics.dat saves the value of the AIC of each model when evaluated
8382
# on a Nyquist-sampled grid using each of the dg values used to generate

doc/examples/multimodel_known_dG2.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -62,19 +62,19 @@ def run(plot=True):
6262
# Suppress mundane output
6363
sml.setlevel("warning")
6464

65-
## Create multimodeling object and load diffpy.srmise results from file.
65+
# Create multimodeling object and load diffpy.srmise results from file.
6666
ms = MultimodelSelection()
6767
ms.load("output/known_dG_models.dat")
6868
ms.loadaics("output/known_dG_aics.dat")
6969

70-
## Use Nyquist sampling
70+
# Use Nyquist sampling
7171
# Standard AIC analysis assumes the data have independent uncertainties.
7272
# Nyquist sampling minimizes correlations in the PDF, which is the closest
7373
# approximation to independence possible for the PDF.
7474
dr = np.pi / ms.ppe.qmax
7575
(r, y, dr2, dy) = ms.ppe.resampledata(dr)
7676

77-
## Classify models
77+
# Classify models
7878
# All models are placed into classes. Models in the same class
7979
# should be essentially identical (same peak parameters, etc.)
8080
# up to a small tolerance determined by comparing individual peaks. The
@@ -90,7 +90,7 @@ def run(plot=True):
9090
tolerance = 0.2
9191
ms.classify(r, tolerance)
9292

93-
## Summarize various facts about the analysis.
93+
# Summarize various facts about the analysis.
9494
num_models = len(ms.results)
9595
num_classes = len(ms.classes)
9696
print("------- Multimodeling Summary --------")
@@ -99,15 +99,15 @@ def run(plot=True):
9999
print("Range of dgs: %f-%f" % (ms.dgs[0], ms.dgs[-1]))
100100
print("Nyquist-sampled data points: %i" % len(r))
101101

102-
## Get dG usable as key in analysis.
102+
# Get dG usable as key in analysis.
103103
# The Akaike probabilities were calculated for many assumed values of the
104104
# experimental uncertainty dG, and each of these assumed dG is used as a
105105
# key when obtaining the corresponding results. Numerical precision can
106106
# make recalculating the exact value difficult, so the dg_key method returns
107107
# the key closest to its argument.
108108
dG = ms.dg_key(np.mean(ms.ppe.dy))
109109

110-
## Find "best" models.
110+
# Find "best" models.
111111
# In short, models with greatest Akaike probability. Akaike probabilities
112112
# can only be validly compared if they were calculated for identical data,
113113
# namely identical PDF values *and* uncertainties, and are only reliable
@@ -120,7 +120,7 @@ def run(plot=True):
120120
print("Rank Model Class Free AIC Prob File")
121121
for i in range(len(ms.classes)):
122122

123-
## Generate information about best model in ith best class.
123+
# Generate information about best model in ith best class.
124124
# The get(dG, *args, **kwds) method returns a tuple of values
125125
# corresponding to string arguments for the best model in best class at
126126
# given dG. When the corder keyword is given it returns the model from
@@ -167,7 +167,7 @@ def run(plot=True):
167167
# Uncomment line below to save figures.
168168
# plt.savefig(filename_base + ".png", format="png")
169169

170-
## 3D plot of Akaike probabilities
170+
# 3D plot of Akaike probabilities
171171
# This plot shows the Akaike probabilities of all classes as a function
172172
# of assumed uncertainty dG. This gives a rough sense of how the models
173173
# selected by an AIC-based analysis would vary if the experimental

doc/examples/multimodel_unknown_dG1.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -42,18 +42,18 @@
4242

4343
def run(plot=True):
4444

45-
## Suppress mundane output
45+
# Suppress mundane output
4646
# When running scripts, especially involving multiple trials, it can be
4747
# useful to suppress many of the diffpy.srmise messages. Valid levels
4848
# include "debug", "info" (the default), "warning", "error", and
4949
# "critical." See diffpy.srmise.srmiselog for more information.
5050
sml.setlevel("warning")
5151

52-
## Initialize peak extraction
52+
# Initialize peak extraction
5353
ppe = PDFPeakExtraction()
5454
ppe.loadpdf("data/C60_fine_qmax21.gr")
5555

56-
## Set up extraction parameters
56+
# Set up extraction parameters
5757
# The FromSequence baseline interpolates (r, G(r)) values read from a
5858
# specified file. It has parameters. This particular baseline was
5959
# calculated by approximating the C60 sample as a face-centered cubic
@@ -65,22 +65,22 @@ def run(plot=True):
6565
kwds["cres"] = 0.05
6666
ppe.setvars(**kwds)
6767

68-
## Create multimodel selection object.
68+
# Create multimodel selection object.
6969
# The MultimodelSelection class keeps track of the results of peak
7070
# extraction as the assumed uncertainty dg is varied.
7171
ms = MultimodelSelection()
7272
ms.setppe(ppe)
7373

74-
## Define range of dg values
74+
# Define range of dg values
7575
# For the purpose of illustration use 20 evenly-spaced values of dg where
7676
# 1% < dg < 10% of max gr value between r=1 and 7.25.
7777
grmax = np.max(ppe.y[ppe.getrangeslice()])
7878
dgs = np.linspace(0.01 * grmax, 0.10 * grmax, 20)
7979

80-
## Perform peak extraction for each of the assumed uncertainties.
80+
# Perform peak extraction for each of the assumed uncertainties.
8181
ms.run(dgs)
8282

83-
## Save results
83+
# Save results
8484
# The file C60_models.dat saves the models generated above. The file
8585
# C60_aics.dat saves the value of the AIC of each model when evaluated
8686
# on a Nyquist-sampled grid using each of the dg values used to generate

doc/examples/multimodel_unknown_dG2.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -78,19 +78,19 @@ def run(plot=True):
7878
# Suppress mundane output
7979
sml.setlevel("warning")
8080

81-
## Create multimodeling object and load diffpy.srmise results from file.
81+
# Create multimodeling object and load diffpy.srmise results from file.
8282
ms = MultimodelSelection()
8383
ms.load("output/unknown_dG_models.dat")
8484
ms.loadaics("output/unknown_dG_aics.dat")
8585

86-
## Use Nyquist sampling
86+
# Use Nyquist sampling
8787
# Standard AIC analysis assumes the data have independent uncertainties.
8888
# Nyquist sampling minimizes correlations in the PDF, which is the closest
8989
# approximation to independence possible for the PDF.
9090
dr = np.pi / ms.ppe.qmax
9191
(r, y, dr2, dy) = ms.ppe.resampledata(dr)
9292

93-
## Classify models
93+
# Classify models
9494
# All models are placed into classes. Models in the same class
9595
# should be essentially identical (same peak parameters, etc.)
9696
# up to a small tolerance determined by comparing individual peaks. The
@@ -106,7 +106,7 @@ def run(plot=True):
106106
tolerance = 0.2
107107
ms.classify(r, tolerance)
108108

109-
## Summarize various facts about the analysis.
109+
# Summarize various facts about the analysis.
110110
num_models = len(ms.results)
111111
num_classes = len(ms.classes)
112112
print("------- Multimodeling Summary --------")
@@ -115,7 +115,7 @@ def run(plot=True):
115115
print("Range of dgs: %f-%f" % (ms.dgs[0], ms.dgs[-1]))
116116
print("Nyquist-sampled data points: %i" % len(r))
117117

118-
## Find "best" models.
118+
# Find "best" models.
119119
# In short, models with greatest Akaike probability. Akaike probabilities
120120
# can only be validly compared if they were calculated for identical data,
121121
# namely identical PDF values *and* uncertainties, and are only reliable
@@ -144,7 +144,7 @@ def run(plot=True):
144144
print(" Best dG Model Class Free AIC Prob File")
145145
for dG in best_dGs:
146146

147-
## Generate information about best model.
147+
# Generate information about best model.
148148
# The get(dG, *args, **kwds) method returns a tuple of values
149149
# corresponding to string arguments for the best model in best class at
150150
# given dG. When the corder keyword is given it returns the model from
@@ -191,7 +191,7 @@ def run(plot=True):
191191
# Uncomment line below to save figures.
192192
# plt.savefig(filename_base + ".png", format="png")
193193

194-
## 3D plot of Akaike probabilities
194+
# 3D plot of Akaike probabilities
195195
# This plot shows the Akaike probabilities of all classes as a function
196196
# of assumed uncertainty dG. This gives a rough sense of how the models
197197
# selected by an AIC-based analysis would vary if the experimental

0 commit comments

Comments
 (0)