We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 2248095 commit 8fd6d67Copy full SHA for 8fd6d67
site/en/tutorials/eager/automatic_differentiation.ipynb
@@ -268,10 +268,8 @@
268
"source": [
269
"x = tf.ones((2, 2))\n",
270
" \n",
271
- "# TODO(b/78880779): Remove the 'persistent=True' argument and use\n",
272
"# a single t.gradient() call when the bug is resolved.\n",
273
"with tf.GradientTape(persistent=True) as t:\n",
274
- " # TODO(ashankar): Explain with \"watch\" argument better?\n",
275
" t.watch(x)\n",
276
" y = tf.reduce_sum(x)\n",
277
" z = tf.multiply(y, y)\n",
@@ -315,8 +313,6 @@
315
313
},
316
314
"outputs": [],
317
318
- "# TODO(ashankar): Should we use the persistent tape here instead? Follow up on Tom and Alex's discussion\n",
319
- "\n",
320
"x = tf.constant(1.0) # Convert the Python 1.0 to a Tensor object\n",
321
"\n",
322
"with tf.GradientTape() as t:\n",
0 commit comments