From b385474bc023bde51c65bf106419f154048c4495 Mon Sep 17 00:00:00 2001 From: stevenhua0320 Date: Tue, 30 Jul 2024 11:57:24 +0800 Subject: [PATCH] fix too many leading "#" in string block --- doc/examples/fit_initial.py | 11 +++++------ doc/examples/multimodel_known_dG1.py | 15 +++++++-------- doc/examples/multimodel_known_dG2.py | 16 ++++++++-------- doc/examples/multimodel_unknown_dG1.py | 14 +++++++------- doc/examples/multimodel_unknown_dG2.py | 14 +++++++------- 5 files changed, 34 insertions(+), 36 deletions(-) diff --git a/doc/examples/fit_initial.py b/doc/examples/fit_initial.py index 33bf4e6..71979e8 100644 --- a/doc/examples/fit_initial.py +++ b/doc/examples/fit_initial.py @@ -20,7 +20,6 @@ grid.""" import matplotlib.pyplot as plt -import numpy as np from diffpy.srmise import PDFPeakExtraction from diffpy.srmise.applications.plot import makeplot @@ -30,11 +29,11 @@ def run(plot=True): - ## Initialize peak extraction + # Initialize peak extraction ppe = PDFPeakExtraction() ppe.loadpdf("data/C60_fine_qmax21.gr") - ## Set up interpolated baseline. + # Set up interpolated baseline. # The FromSequence baseline creates an interpolated baseline from provided # r and G(r) values, either two lists or a file containing (r, G(r)) pairs. # The baseline has no parameters. This particular baseline was estimated @@ -43,7 +42,7 @@ def run(plot=True): blf = FromSequence("data/C60baseline.dat") bl = blf.actualize([]) - ## Set up fitting parameters + # Set up fitting parameters # A summary of how parameters impact fitting is given below. # "rng" - Same as peak extraction # "baseline" - Same as peak extraction @@ -66,7 +65,7 @@ def run(plot=True): kwds["dg"] = 5000 # ad hoc, but gives each point equal weight in fit. ppe.setvars(**kwds) - ## Set up termination ripples + # Set up termination ripples # Peak fitting never changes the peak function, so termination ripples # are not applied automatically as they are in peak extraction. # Termination ripples require setting the underlying peak function and qmax. @@ -95,7 +94,7 @@ def run(plot=True): # Perform fit. ppe.fit() - ## Save results + # Save results ppe.write("output/fit_initial.srmise") ppe.writepwa("output/fit_initial.pwa") diff --git a/doc/examples/multimodel_known_dG1.py b/doc/examples/multimodel_known_dG1.py index aa50c41..f0ceb1b 100644 --- a/doc/examples/multimodel_known_dG1.py +++ b/doc/examples/multimodel_known_dG1.py @@ -38,46 +38,45 @@ import diffpy.srmise.srmiselog as sml from diffpy.srmise import MultimodelSelection, PDFPeakExtraction -from diffpy.srmise.applications.plot import makeplot def run(plot=True): - ## Suppress mundane output + # Suppress mundane output # When running scripts, especially involving multiple trials, it can be # useful to suppress many of the diffpy.srmise messages. Valid levels # include "debug", "info" (the default), "warning", "error", and # "critical." See diffpy.srmise.srmiselog for more information. sml.setlevel("warning") - ## Initialize peak extraction from saved trial + # Initialize peak extraction from saved trial ppe = PDFPeakExtraction() ppe.read("output/query_results.srmise") ppe.clearcalc() - ## Set up extraction parameters + # Set up extraction parameters # All parameters loaded from .srmise file. # Setting new values will override the previous values. kwds = {} kwds["rng"] = [10.9, 15] # Region of PDF with some overlap. ppe.setvars(**kwds) - ## Create multimodel selection object. + # Create multimodel selection object. # The MultimodelSelection class keeps track of the results of peak # extraction as the assumed uncertainty dg is varied. ms = MultimodelSelection() ms.setppe(ppe) - ## Define range of dg values + # Define range of dg values # For the purpose of illustration use 15 evenly-spaced values of dg where # 50% < dg < 120% of mean experimental dG in extraction range. dg_mean = np.mean(ppe.dy[ppe.getrangeslice()]) dgs = np.linspace(0.5 * dg_mean, 1.2 * dg_mean, 15) - ## Perform peak extraction for each of the assumed uncertainties. + # Perform peak extraction for each of the assumed uncertainties. ms.run(dgs) - ## Save results + # Save results # The file known_dG_models.dat saves the models generated above. The file # known_dG_aics.dat saves the value of the AIC of each model when evaluated # on a Nyquist-sampled grid using each of the dg values used to generate diff --git a/doc/examples/multimodel_known_dG2.py b/doc/examples/multimodel_known_dG2.py index 934e4bb..e72db46 100644 --- a/doc/examples/multimodel_known_dG2.py +++ b/doc/examples/multimodel_known_dG2.py @@ -62,19 +62,19 @@ def run(plot=True): # Suppress mundane output sml.setlevel("warning") - ## Create multimodeling object and load diffpy.srmise results from file. + # Create multimodeling object and load diffpy.srmise results from file. ms = MultimodelSelection() ms.load("output/known_dG_models.dat") ms.loadaics("output/known_dG_aics.dat") - ## Use Nyquist sampling + # Use Nyquist sampling # Standard AIC analysis assumes the data have independent uncertainties. # Nyquist sampling minimizes correlations in the PDF, which is the closest # approximation to independence possible for the PDF. dr = np.pi / ms.ppe.qmax (r, y, dr2, dy) = ms.ppe.resampledata(dr) - ## Classify models + # Classify models # All models are placed into classes. Models in the same class # should be essentially identical (same peak parameters, etc.) # up to a small tolerance determined by comparing individual peaks. The @@ -90,7 +90,7 @@ def run(plot=True): tolerance = 0.2 ms.classify(r, tolerance) - ## Summarize various facts about the analysis. + # Summarize various facts about the analysis. num_models = len(ms.results) num_classes = len(ms.classes) print("------- Multimodeling Summary --------") @@ -99,7 +99,7 @@ def run(plot=True): print("Range of dgs: %f-%f" % (ms.dgs[0], ms.dgs[-1])) print("Nyquist-sampled data points: %i" % len(r)) - ## Get dG usable as key in analysis. + # Get dG usable as key in analysis. # The Akaike probabilities were calculated for many assumed values of the # experimental uncertainty dG, and each of these assumed dG is used as a # key when obtaining the corresponding results. Numerical precision can @@ -107,7 +107,7 @@ def run(plot=True): # the key closest to its argument. dG = ms.dg_key(np.mean(ms.ppe.dy)) - ## Find "best" models. + # Find "best" models. # In short, models with greatest Akaike probability. Akaike probabilities # can only be validly compared if they were calculated for identical data, # namely identical PDF values *and* uncertainties, and are only reliable @@ -120,7 +120,7 @@ def run(plot=True): print("Rank Model Class Free AIC Prob File") for i in range(len(ms.classes)): - ## Generate information about best model in ith best class. + # Generate information about best model in ith best class. # The get(dG, *args, **kwds) method returns a tuple of values # corresponding to string arguments for the best model in best class at # given dG. When the corder keyword is given it returns the model from @@ -167,7 +167,7 @@ def run(plot=True): # Uncomment line below to save figures. # plt.savefig(filename_base + ".png", format="png") - ## 3D plot of Akaike probabilities + # 3D plot of Akaike probabilities # This plot shows the Akaike probabilities of all classes as a function # of assumed uncertainty dG. This gives a rough sense of how the models # selected by an AIC-based analysis would vary if the experimental diff --git a/doc/examples/multimodel_unknown_dG1.py b/doc/examples/multimodel_unknown_dG1.py index 3016fb3..7a6a2b8 100644 --- a/doc/examples/multimodel_unknown_dG1.py +++ b/doc/examples/multimodel_unknown_dG1.py @@ -42,18 +42,18 @@ def run(plot=True): - ## Suppress mundane output + # Suppress mundane output # When running scripts, especially involving multiple trials, it can be # useful to suppress many of the diffpy.srmise messages. Valid levels # include "debug", "info" (the default), "warning", "error", and # "critical." See diffpy.srmise.srmiselog for more information. sml.setlevel("warning") - ## Initialize peak extraction + # Initialize peak extraction ppe = PDFPeakExtraction() ppe.loadpdf("data/C60_fine_qmax21.gr") - ## Set up extraction parameters + # Set up extraction parameters # The FromSequence baseline interpolates (r, G(r)) values read from a # specified file. It has parameters. This particular baseline was # calculated by approximating the C60 sample as a face-centered cubic @@ -65,22 +65,22 @@ def run(plot=True): kwds["cres"] = 0.05 ppe.setvars(**kwds) - ## Create multimodel selection object. + # Create multimodel selection object. # The MultimodelSelection class keeps track of the results of peak # extraction as the assumed uncertainty dg is varied. ms = MultimodelSelection() ms.setppe(ppe) - ## Define range of dg values + # Define range of dg values # For the purpose of illustration use 20 evenly-spaced values of dg where # 1% < dg < 10% of max gr value between r=1 and 7.25. grmax = np.max(ppe.y[ppe.getrangeslice()]) dgs = np.linspace(0.01 * grmax, 0.10 * grmax, 20) - ## Perform peak extraction for each of the assumed uncertainties. + # Perform peak extraction for each of the assumed uncertainties. ms.run(dgs) - ## Save results + # Save results # The file C60_models.dat saves the models generated above. The file # C60_aics.dat saves the value of the AIC of each model when evaluated # on a Nyquist-sampled grid using each of the dg values used to generate diff --git a/doc/examples/multimodel_unknown_dG2.py b/doc/examples/multimodel_unknown_dG2.py index 1bd793d..9a4b24a 100644 --- a/doc/examples/multimodel_unknown_dG2.py +++ b/doc/examples/multimodel_unknown_dG2.py @@ -78,19 +78,19 @@ def run(plot=True): # Suppress mundane output sml.setlevel("warning") - ## Create multimodeling object and load diffpy.srmise results from file. + # Create multimodeling object and load diffpy.srmise results from file. ms = MultimodelSelection() ms.load("output/unknown_dG_models.dat") ms.loadaics("output/unknown_dG_aics.dat") - ## Use Nyquist sampling + # Use Nyquist sampling # Standard AIC analysis assumes the data have independent uncertainties. # Nyquist sampling minimizes correlations in the PDF, which is the closest # approximation to independence possible for the PDF. dr = np.pi / ms.ppe.qmax (r, y, dr2, dy) = ms.ppe.resampledata(dr) - ## Classify models + # Classify models # All models are placed into classes. Models in the same class # should be essentially identical (same peak parameters, etc.) # up to a small tolerance determined by comparing individual peaks. The @@ -106,7 +106,7 @@ def run(plot=True): tolerance = 0.2 ms.classify(r, tolerance) - ## Summarize various facts about the analysis. + # Summarize various facts about the analysis. num_models = len(ms.results) num_classes = len(ms.classes) print("------- Multimodeling Summary --------") @@ -115,7 +115,7 @@ def run(plot=True): print("Range of dgs: %f-%f" % (ms.dgs[0], ms.dgs[-1])) print("Nyquist-sampled data points: %i" % len(r)) - ## Find "best" models. + # Find "best" models. # In short, models with greatest Akaike probability. Akaike probabilities # can only be validly compared if they were calculated for identical data, # namely identical PDF values *and* uncertainties, and are only reliable @@ -144,7 +144,7 @@ def run(plot=True): print(" Best dG Model Class Free AIC Prob File") for dG in best_dGs: - ## Generate information about best model. + # Generate information about best model. # The get(dG, *args, **kwds) method returns a tuple of values # corresponding to string arguments for the best model in best class at # given dG. When the corder keyword is given it returns the model from @@ -191,7 +191,7 @@ def run(plot=True): # Uncomment line below to save figures. # plt.savefig(filename_base + ".png", format="png") - ## 3D plot of Akaike probabilities + # 3D plot of Akaike probabilities # This plot shows the Akaike probabilities of all classes as a function # of assumed uncertainty dG. This gives a rough sense of how the models # selected by an AIC-based analysis would vary if the experimental