@@ -62,19 +62,19 @@ def run(plot=True):
62
62
# Suppress mundane output
63
63
sml .setlevel ("warning" )
64
64
65
- ## Create multimodeling object and load diffpy.srmise results from file.
65
+ # Create multimodeling object and load diffpy.srmise results from file.
66
66
ms = MultimodelSelection ()
67
67
ms .load ("output/known_dG_models.dat" )
68
68
ms .loadaics ("output/known_dG_aics.dat" )
69
69
70
- ## Use Nyquist sampling
70
+ # Use Nyquist sampling
71
71
# Standard AIC analysis assumes the data have independent uncertainties.
72
72
# Nyquist sampling minimizes correlations in the PDF, which is the closest
73
73
# approximation to independence possible for the PDF.
74
74
dr = np .pi / ms .ppe .qmax
75
75
(r , y , dr2 , dy ) = ms .ppe .resampledata (dr )
76
76
77
- ## Classify models
77
+ # Classify models
78
78
# All models are placed into classes. Models in the same class
79
79
# should be essentially identical (same peak parameters, etc.)
80
80
# up to a small tolerance determined by comparing individual peaks. The
@@ -90,7 +90,7 @@ def run(plot=True):
90
90
tolerance = 0.2
91
91
ms .classify (r , tolerance )
92
92
93
- ## Summarize various facts about the analysis.
93
+ # Summarize various facts about the analysis.
94
94
num_models = len (ms .results )
95
95
num_classes = len (ms .classes )
96
96
print ("------- Multimodeling Summary --------" )
@@ -99,15 +99,15 @@ def run(plot=True):
99
99
print ("Range of dgs: %f-%f" % (ms .dgs [0 ], ms .dgs [- 1 ]))
100
100
print ("Nyquist-sampled data points: %i" % len (r ))
101
101
102
- ## Get dG usable as key in analysis.
102
+ # Get dG usable as key in analysis.
103
103
# The Akaike probabilities were calculated for many assumed values of the
104
104
# experimental uncertainty dG, and each of these assumed dG is used as a
105
105
# key when obtaining the corresponding results. Numerical precision can
106
106
# make recalculating the exact value difficult, so the dg_key method returns
107
107
# the key closest to its argument.
108
108
dG = ms .dg_key (np .mean (ms .ppe .dy ))
109
109
110
- ## Find "best" models.
110
+ # Find "best" models.
111
111
# In short, models with greatest Akaike probability. Akaike probabilities
112
112
# can only be validly compared if they were calculated for identical data,
113
113
# namely identical PDF values *and* uncertainties, and are only reliable
@@ -120,7 +120,7 @@ def run(plot=True):
120
120
print ("Rank Model Class Free AIC Prob File" )
121
121
for i in range (len (ms .classes )):
122
122
123
- ## Generate information about best model in ith best class.
123
+ # Generate information about best model in ith best class.
124
124
# The get(dG, *args, **kwds) method returns a tuple of values
125
125
# corresponding to string arguments for the best model in best class at
126
126
# given dG. When the corder keyword is given it returns the model from
@@ -167,7 +167,7 @@ def run(plot=True):
167
167
# Uncomment line below to save figures.
168
168
# plt.savefig(filename_base + ".png", format="png")
169
169
170
- ## 3D plot of Akaike probabilities
170
+ # 3D plot of Akaike probabilities
171
171
# This plot shows the Akaike probabilities of all classes as a function
172
172
# of assumed uncertainty dG. This gives a rough sense of how the models
173
173
# selected by an AIC-based analysis would vary if the experimental
0 commit comments