Skip to content

Commit a3fde98

Browse files
authored
Update OrbitFG.py with comments.
1 parent 9f85413 commit a3fde98

File tree

1 file changed

+48
-7
lines changed

1 file changed

+48
-7
lines changed

OrbitFG.py

Lines changed: 48 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,12 @@
55
import matplotlib.pyplot as plt
66
from math import pi, sqrt, ceil, atan2
77

8+
9+
'''
10+
To create a sparse matrix, you have to create three "parallel" arrays,
11+
one with row indicies, one with column indicies, and one with the actual
12+
values to put in the matrix. This is a helper function to make this process easier.
13+
'''
814
def dense_2_sp_lists(M: np.array, tl_row : int, tl_col: int, row_vec=True):
915
'''
1016
This function takes in a dense matrix and turns it into a flat array.
@@ -44,15 +50,27 @@ def dense_2_sp_lists(M: np.array, tl_row : int, tl_col: int, row_vec=True):
4450
'''
4551
L's columns will be 3*N big, where N is the number of timesteps
4652
Rows will be organized organized dynamics first, then measurements:
47-
4853
'''
4954
class satelliteModelBatch:
55+
'''
56+
A class to perform the factor graph optimization. It stores
57+
all the measurements in the class, has several "helper" functions, and the
58+
"opt" function that does the actual optimization using the helper functions
59+
'''
60+
5061
def __init__(self, meas: np.array, R, Q,
5162
dt: float = 5,
5263
x0:np.array = np.array([0, 2E7, 4500, 0])):
64+
'''
65+
Get the measurements, dt (for running the dynamics),
66+
the R & Q matrices, and the initial location and initialize
67+
the hidden variables
68+
'''
5369
self.N = len(meas)
5470
# self.N = 10
5571
self.dt = dt
72+
# To do more accurate dynamics, if dt is large, use multiple, smaller
73+
# timesteps instead
5674
if self.dt > 1:
5775
self.prop_dt = self.dt / ceil(self.dt)
5876
self.n_timesteps = int(ceil(self.dt))
@@ -77,6 +95,10 @@ def create_init_state(self):
7795
self.states[i] = self.prop_one_timestep(self.states[i-1])
7896

7997
def prop_one_timestep(self, state):
98+
'''
99+
This runs the actual dynamics, i.e. given a value for x (really x_k) find
100+
what x should be (by dynamics) at the next timestep (self.dt forward in time) -- x_{k+1}
101+
'''
80102
going_out = state.copy()
81103
for _ in range(self.n_timesteps):
82104
dist = la.norm(going_out[:2])
@@ -89,7 +111,9 @@ def prop_one_timestep(self, state):
89111
return going_out
90112

91113

92-
114+
'''
115+
Helper functions to tell what rows you are indexing into in the 'L' matrix
116+
'''
93117
def state_idx(self, i: int) -> int:
94118
return 4*i
95119

@@ -123,9 +147,9 @@ def F_mat(self,state):
123147
Takes in a current state and finds the derivative of the
124148
propagate forward one step function (prop_one_timestep)
125149
126-
Inputs: state -- a 3-vector (np.array)
150+
Inputs: state -- a 4-vector (np.array)
127151
128-
Returns a 3x3 np.array
152+
Returns a 4x4 np.array
129153
'''
130154
F = np.eye(4)
131155
curr_state = state.copy()
@@ -151,6 +175,12 @@ def F_mat(self,state):
151175
return F
152176

153177
def create_L(self):
178+
'''
179+
This creates the big matrix (L) given the current state of the whole system
180+
'''
181+
# First, determine how many non zero entries (nnz_entries) will be in the
182+
# sparse matrix. Then create the 3 parallel arrays that will be used to
183+
# form this matrix
154184
H_size = 4
155185
F_size=16 # Should be state size**2
156186
nnz_entries = 2*F_size*(self.N-1) + H_size*self.N
@@ -180,6 +210,11 @@ def create_L(self):
180210
return sp.csr_matrix((data_l,(row_l,col_l)))
181211

182212
def create_y(self, state_vec=None):
213+
'''
214+
Compute the residual vector. Can compute it for the
215+
current state (pass in "None") or for a new state that you
216+
want to test without setting the internal state.
217+
'''
183218
if state_vec is not None:
184219
state_data = self.vec_to_data(state_vec)
185220
else:
@@ -211,7 +246,7 @@ def add_delta(self,delta_x: np.array = None) -> np.array:
211246
This takes the current state and adds on delta_x.
212247
It DOES NOT modify any internal state
213248
214-
Inputs: delta_x, a self.N X 3 vector (np.array)
249+
Inputs: delta_x, a self.N X 3 vector (np.array)h
215250
Returns: a full state vector of the same size
216251
'''
217252
going_out = np.zeros(self.N*4)
@@ -238,7 +273,7 @@ def opt(self):
238273
Create the Jacobian matrix (L) and the residual vector (y) for
239274
the current state. Find the best linear approximation to minimize y
240275
and move in that direction. Repeat until convergence.
241-
(This is a Gauss-Newton optimization procedure)
276+
(This is a damped Gauss-Newton optimization procedure)
242277
'''
243278
finished=False
244279
num_iters=0
@@ -257,6 +292,7 @@ def opt(self):
257292
Lty = L.T.dot(y)
258293
delta_x = spla.spsolve(M,Lty)
259294
scale = 1
295+
# Damp the Gauss-Newton step if it doesn't do what the linearization predicts
260296
scale_good = False
261297
while not scale_good:
262298
next_y = self.create_y(self.add_delta(delta_x*scale))
@@ -274,8 +310,13 @@ def opt(self):
274310
self.update_state(delta_x*scale)
275311
print('iteration',num_iters,'delta_x length was',la.norm(delta_x*scale), 'scale was',scale)
276312
finished = la.norm(delta_x)<12 or num_iters > 100
277-
313+
314+
278315
def test_Jacobian(batch_uni, col, dx = .001):
316+
'''
317+
This function is useful for debugging. If things don't work, try to
318+
figure out where the derivative matrix is wrong.
319+
'''
279320
curr_y = batch_uni.create_y()
280321
orig_state = batch_uni.add_delta()
281322
delta_x = np.zeros(orig_state.shape)

0 commit comments

Comments
 (0)