@@ -82,12 +82,10 @@ def _resource_apply_dense(self, grad, var, apply_state=None):
82
82
def _apply ():
83
83
if "apply_state" in self ._optimizer ._dense_apply_args :
84
84
train_op = self ._optimizer ._resource_apply_dense (
85
- accum_gradient . read_value () , var , apply_state = apply_state
85
+ accum_gradient , var , apply_state = apply_state
86
86
)
87
87
else :
88
- train_op = self ._optimizer ._resource_apply_dense (
89
- accum_gradient .read_value (), var
90
- )
88
+ train_op = self ._optimizer ._resource_apply_dense (accum_gradient , var )
91
89
reset_op = accum_gradient .assign (
92
90
tf .zeros_like (accum_gradient ),
93
91
use_locking = self ._use_locking ,
@@ -108,14 +106,14 @@ def _resource_apply_sparse(self, grad: types.TensorLike, var, indices, apply_sta
108
106
def _apply ():
109
107
if "apply_state" in self ._optimizer ._sparse_apply_args :
110
108
train_op = self ._optimizer ._resource_apply_sparse (
111
- accum_gradient . read_value () ,
109
+ accum_gradient ,
112
110
var ,
113
111
indices ,
114
112
apply_state = apply_state ,
115
113
)
116
114
else :
117
115
train_op = self ._optimizer ._resource_apply_sparse (
118
- accum_gradient . read_value () , var , indices
116
+ accum_gradient , var , indices
119
117
)
120
118
reset_op = accum_gradient .assign (
121
119
tf .zeros_like (accum_gradient ),
0 commit comments