@@ -104,10 +104,22 @@ function createDenseModel() {
104
104
return model ;
105
105
}
106
106
107
+ /**
108
+ * This callback type is used by the `train` function for insertion into
109
+ * the model.fit callback loop.
110
+ *
111
+ * @callback onIterationCallback
112
+ * @param {string } eventType Selector for which type of event to fire on.
113
+ * @param {number } batchOrEpochNumber The current epoch / batch number
114
+ * @param {tf.Logs } logs Logs to append to
115
+ */
116
+
107
117
/**
108
118
* Compile and train the given model.
109
119
*
110
- * @param {* } model The model to
120
+ * @param {tf.Model } model The model to train.
121
+ * @param {onIterationCallback } onIteration A callback to execute every 10
122
+ * batches & epoch end.
111
123
*/
112
124
async function train ( model , onIteration ) {
113
125
ui . logStatus ( 'Training model...' ) ;
@@ -117,17 +129,6 @@ async function train(model, onIteration) {
117
129
// training so that we can decrease our training loss and increase our
118
130
// classification accuracy.
119
131
120
- // The learning rate defines the magnitude by which we update our weights each
121
- // training step. The higher the value, the faster our loss values converge,
122
- // but also the more likely we are to overshoot optimal parameters
123
- // when making an update. A learning rate that is too low will take too long
124
- // to find optimal (or good enough) weight parameters while a learning rate
125
- // that is too high may overshoot optimal parameters. Learning rate is one of
126
- // the most important hyperparameters to set correctly. Finding the right
127
- // value takes practice and is often best found empirically by trying many
128
- // values.
129
- const LEARNING_RATE = 0.01 ;
130
-
131
132
// We are using rmsprop as our optimizer.
132
133
// An optimizer is an iterative method for minimizing an loss function.
133
134
// It tries to find the minimum of our loss function with respect to the
0 commit comments