@@ -18,7 +18,7 @@ class SNMFOptimizer:
18
18
----------
19
19
source_matrix : ndarray
20
20
The original, unmodified data to be decomposed and later, compared against.
21
- Shape is (length_of_signal, number_of_conditions ).
21
+ Shape is (length_of_signal, number_of_signals ).
22
22
stretch : ndarray
23
23
The best guess (or while running, the current guess) for the stretching
24
24
factor matrix.
@@ -47,14 +47,14 @@ class SNMFOptimizer:
47
47
The number of components to extract from source_matrix. Must be provided when and only when
48
48
Y0 is not provided.
49
49
random_state : int
50
- The seed for the initial guesses at the matrices (A, X , and Y ) created by
50
+ The seed for the initial guesses at the matrices (stretch, components , and weights ) created by
51
51
the decomposition.
52
52
num_updates : int
53
- The total number of times that any of (A, X , and Y ) have had their values changed.
53
+ The total number of times that any of (stretch, components , and weights ) have had their values changed.
54
54
If not terminated by other means, this value is used to stop when reaching max_iter.
55
55
objective_function: float
56
56
The value corresponding to the minimization of the difference between the source_matrix and the
57
- products of A, X , and Y . For full details see the sNMF paper. Smaller corresponds to
57
+ products of (stretch, components , and weights) . For full details see the sNMF paper. Smaller corresponds to
58
58
better agreement and is desirable.
59
59
objective_difference : float
60
60
The change in the objective function value since the last update. A negative value
@@ -67,8 +67,8 @@ def __init__(
67
67
init_weights = None ,
68
68
init_components = None ,
69
69
init_stretch = None ,
70
- rho = 1e12 ,
71
- eta = 610 ,
70
+ rho = 0 ,
71
+ eta = 0 ,
72
72
max_iter = 500 ,
73
73
tol = 5e-7 ,
74
74
n_components = None ,
@@ -80,35 +80,36 @@ def __init__(
80
80
----------
81
81
source_matrix : ndarray
82
82
The data to be decomposed. Shape is (length_of_signal, number_of_conditions).
83
- init_weights : ndarray
83
+ init_weights : ndarray Optional Default = rng.beta(a=2.5, b=1.5, size=(n_components, n_signals))
84
84
The initial guesses for the component weights at each stretching condition.
85
- Shape is (number_of_components, number_of_conditions ) Must provide exactly one
85
+ Shape is (number_of_components, number_of_signals ) Must provide exactly one
86
86
of this or n_components.
87
- init_components : ndarray
87
+ init_components : ndarray Optional Default = rng.random((self.signal_length, self.n_components))
88
88
The initial guesses for the intensities of each component per
89
89
row/sample/angle. Shape is (length_of_signal, number_of_components).
90
- init_stretch : ndarray
90
+ init_stretch : ndarray Optional Default = np.ones((self.n_components, self.n_signals)) + self._rng.normal(
91
+ 0, 1e-3, size=(self.n_components, self.n_signals)
91
92
The initial guesses for the stretching factor for each component, at each
92
- condition. Shape is (number_of_components, number_of_conditions ).
93
- rho : float
93
+ condition (for each signal) . Shape is (number_of_components, number_of_signals ).
94
+ rho : float Optional Default = 0
94
95
The stretching factor that influences the decomposition. Zero corresponds to no
95
96
stretching present. Relatively insensitive and typically adjusted in powers of 10.
96
- eta : float
97
+ eta : int Optional Default = 0
97
98
The sparsity factor that influences the decomposition. Should be set to zero for
98
99
non-sparse data such as PDF. Can be used to improve results for sparse data such
99
100
as XRD, but due to instability, should be used only after first selecting the
100
101
best value for rho. Suggested adjustment is by powers of 2.
101
- max_iter : int
102
+ max_iter : int Optional Default = 500
102
103
The maximum number of times to update each of A, X, and Y before stopping
103
104
the optimization.
104
- tol : float
105
+ tol : float Optional Default = 5e-7
105
106
The convergence threshold. This is the minimum fractional improvement in the
106
107
objective function to allow without terminating the optimization. Note that
107
108
a minimum of 20 updates are run before this parameter is checked.
108
- n_components : int
109
+ n_components : int Optional Default = None
109
110
The number of components to extract from source_matrix. Must be provided when and only when
110
111
Y0 is not provided.
111
- random_state : int
112
+ random_state : int Optional Default = None
112
113
The seed for the initial guesses at the matrices (A, X, and Y) created by
113
114
the decomposition.
114
115
"""
@@ -157,10 +158,10 @@ def __init__(
157
158
self .weights = np .maximum (0 , self .weights )
158
159
159
160
# Second-order spline: Tridiagonal (-2 on diagonal, 1 on sub/superdiagonals)
160
- self .spline_smooth_operator = 0.25 * diags (
161
+ self ._spline_smooth_operator = 0.25 * diags (
161
162
[1 , - 2 , 1 ], offsets = [0 , 1 , 2 ], shape = (self .n_signals - 2 , self .n_signals )
162
163
)
163
- self .spline_smooth_penalty = self .spline_smooth_operator .T @ self .spline_smooth_operator
164
+ self ._spline_smooth_penalty = self ._spline_smooth_operator .T @ self ._spline_smooth_operator
164
165
165
166
# Set up residual matrix, objective function, and history
166
167
self .residuals = self .get_residual_matrix ()
@@ -173,7 +174,7 @@ def __init__(
173
174
self .grad_components = np .zeros_like (self .components ) # Gradient of X (zeros for now)
174
175
self ._prev_grad_components = np .zeros_like (self .components ) # Previous gradient of X (zeros for now)
175
176
176
- regularization_term = 0.5 * rho * np .linalg .norm (self .spline_smooth_operator @ self .stretch .T , "fro" ) ** 2
177
+ regularization_term = 0.5 * rho * np .linalg .norm (self ._spline_smooth_operator @ self .stretch .T , "fro" ) ** 2
177
178
sparsity_term = eta * np .sum (np .sqrt (self .components )) # Square root penalty
178
179
print (
179
180
f"Start, Objective function: { self .objective_function :.5e} "
@@ -185,7 +186,7 @@ def __init__(
185
186
self .optimize_loop ()
186
187
# Print diagnostics
187
188
regularization_term = (
188
- 0.5 * rho * np .linalg .norm (self .spline_smooth_operator @ self .stretch .T , "fro" ) ** 2
189
+ 0.5 * rho * np .linalg .norm (self ._spline_smooth_operator @ self .stretch .T , "fro" ) ** 2
189
190
)
190
191
sparsity_term = eta * np .sum (np .sqrt (self .components )) # Square root penalty
191
192
print (
@@ -333,7 +334,7 @@ def get_objective_function(self, residuals=None, stretch=None):
333
334
if stretch is None :
334
335
stretch = self .stretch
335
336
residual_term = 0.5 * np .linalg .norm (residuals , "fro" ) ** 2
336
- regularization_term = 0.5 * self .rho * np .linalg .norm (self .spline_smooth_operator @ stretch .T , "fro" ) ** 2
337
+ regularization_term = 0.5 * self .rho * np .linalg .norm (self ._spline_smooth_operator @ stretch .T , "fro" ) ** 2
337
338
sparsity_term = self .eta * np .sum (np .sqrt (self .components )) # Square root penalty
338
339
# Final objective function value
339
340
function = residual_term + regularization_term + sparsity_term
@@ -652,7 +653,7 @@ def regularize_function(self, stretch=None):
652
653
)
653
654
der_reshaped = np .asarray (tiled_derivative ).reshape ((self .n_signals , self .n_components ), order = "F" )
654
655
func_grad = (
655
- der_reshaped .T + self .rho * stretch @ self .spline_smooth_operator .T @ self .spline_smooth_operator
656
+ der_reshaped .T + self .rho * stretch @ self ._spline_smooth_operator .T @ self ._spline_smooth_operator
656
657
)
657
658
658
659
return reg_func , func_grad
0 commit comments