Skip to content

Commit 5f8c3f8

Browse files
authored
Remove unused code and fix JSDoc (#219)
* Remove unused code and fixes JSDoc * Adds types to JSDoc.
1 parent bcb9a4a commit 5f8c3f8

File tree

1 file changed

+13
-12
lines changed

1 file changed

+13
-12
lines changed

mnist/index.js

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -104,10 +104,22 @@ function createDenseModel() {
104104
return model;
105105
}
106106

107+
/**
108+
* This callback type is used by the `train` function for insertion into
109+
* the model.fit callback loop.
110+
*
111+
* @callback onIterationCallback
112+
* @param {string} eventType Selector for which type of event to fire on.
113+
* @param {number} batchOrEpochNumber The current epoch / batch number
114+
* @param {tf.Logs} logs Logs to append to
115+
*/
116+
107117
/**
108118
* Compile and train the given model.
109119
*
110-
* @param {*} model The model to
120+
* @param {tf.Model} model The model to train.
121+
* @param {onIterationCallback} onIteration A callback to execute every 10
122+
* batches & epoch end.
111123
*/
112124
async function train(model, onIteration) {
113125
ui.logStatus('Training model...');
@@ -117,17 +129,6 @@ async function train(model, onIteration) {
117129
// training so that we can decrease our training loss and increase our
118130
// classification accuracy.
119131

120-
// The learning rate defines the magnitude by which we update our weights each
121-
// training step. The higher the value, the faster our loss values converge,
122-
// but also the more likely we are to overshoot optimal parameters
123-
// when making an update. A learning rate that is too low will take too long
124-
// to find optimal (or good enough) weight parameters while a learning rate
125-
// that is too high may overshoot optimal parameters. Learning rate is one of
126-
// the most important hyperparameters to set correctly. Finding the right
127-
// value takes practice and is often best found empirically by trying many
128-
// values.
129-
const LEARNING_RATE = 0.01;
130-
131132
// We are using rmsprop as our optimizer.
132133
// An optimizer is an iterative method for minimizing an loss function.
133134
// It tries to find the minimum of our loss function with respect to the

0 commit comments

Comments
 (0)