Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_training_ops.py: 11%

1687 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1"""Python wrappers around TensorFlow ops. 

2 

3This file is MACHINE GENERATED! Do not edit. 

4""" 

5 

6import collections 

7 

8from tensorflow.python import pywrap_tfe as pywrap_tfe 

9from tensorflow.python.eager import context as _context 

10from tensorflow.python.eager import core as _core 

11from tensorflow.python.eager import execute as _execute 

12from tensorflow.python.framework import dtypes as _dtypes 

13from tensorflow.security.fuzzing.py import annotation_types as _atypes 

14 

15from tensorflow.python.framework import op_def_registry as _op_def_registry 

16from tensorflow.python.framework import ops as _ops 

17from tensorflow.python.framework import op_def_library as _op_def_library 

18from tensorflow.python.util.deprecation import deprecated_endpoints 

19from tensorflow.python.util import dispatch as _dispatch 

20from tensorflow.python.util.tf_export import tf_export 

21 

22from typing import TypeVar 

23 

24def apply_ada_max(var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad, use_locking=False, name=None): 

25 r"""Update '*var' according to the AdaMax algorithm. 

26 

27 m_t <- beta1 * m_{t-1} + (1 - beta1) * g 

28 v_t <- max(beta2 * v_{t-1}, abs(g)) 

29 variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) 

30 

31 Args: 

32 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

33 Should be from a Variable(). 

34 m: A mutable `Tensor`. Must have the same type as `var`. 

35 Should be from a Variable(). 

36 v: A mutable `Tensor`. Must have the same type as `var`. 

37 Should be from a Variable(). 

38 beta1_power: A `Tensor`. Must have the same type as `var`. 

39 Must be a scalar. 

40 lr: A `Tensor`. Must have the same type as `var`. 

41 Scaling factor. Must be a scalar. 

42 beta1: A `Tensor`. Must have the same type as `var`. 

43 Momentum factor. Must be a scalar. 

44 beta2: A `Tensor`. Must have the same type as `var`. 

45 Momentum factor. Must be a scalar. 

46 epsilon: A `Tensor`. Must have the same type as `var`. 

47 Ridge term. Must be a scalar. 

48 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

49 use_locking: An optional `bool`. Defaults to `False`. 

50 If `True`, updating of the var, m, and v tensors will be protected 

51 by a lock; otherwise the behavior is undefined, but may exhibit less 

52 contention. 

53 name: A name for the operation (optional). 

54 

55 Returns: 

56 A mutable `Tensor`. Has the same type as `var`. 

57 """ 

58 _ctx = _context._context or _context.context() 

59 tld = _ctx._thread_local_data 

60 if tld.is_eager: 

61 raise RuntimeError("apply_ada_max op does not support eager execution. Arg 'out' is a ref.") 

62 # Add nodes to the TensorFlow graph. 

63 if use_locking is None: 

64 use_locking = False 

65 use_locking = _execute.make_bool(use_locking, "use_locking") 

66 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

67 "ApplyAdaMax", var=var, m=m, v=v, beta1_power=beta1_power, lr=lr, 

68 beta1=beta1, beta2=beta2, epsilon=epsilon, grad=grad, 

69 use_locking=use_locking, name=name) 

70 _result = _outputs[:] 

71 if _execute.must_record_gradient(): 

72 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

73 _op._get_attr_bool("use_locking")) 

74 _inputs_flat = _op.inputs 

75 _execute.record_gradient( 

76 "ApplyAdaMax", _inputs_flat, _attrs, _result) 

77 _result, = _result 

78 return _result 

79 

80ApplyAdaMax = tf_export("raw_ops.ApplyAdaMax")(_ops.to_raw_op(apply_ada_max)) 

81 

82 

83def apply_ada_max_eager_fallback(var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad, use_locking, name, ctx): 

84 raise RuntimeError("apply_ada_max op does not support eager execution. Arg 'out' is a ref.") 

85 

86def apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad, use_locking=False, name=None): 

87 r"""Update '*var' according to the adadelta scheme. 

88 

89 accum = rho() * accum + (1 - rho()) * grad.square(); 

90 update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; 

91 update_accum = rho() * update_accum + (1 - rho()) * update.square(); 

92 var -= update; 

93 

94 Args: 

95 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

96 Should be from a Variable(). 

97 accum: A mutable `Tensor`. Must have the same type as `var`. 

98 Should be from a Variable(). 

99 accum_update: A mutable `Tensor`. Must have the same type as `var`. 

100 Should be from a Variable(). 

101 lr: A `Tensor`. Must have the same type as `var`. 

102 Scaling factor. Must be a scalar. 

103 rho: A `Tensor`. Must have the same type as `var`. 

104 Decay factor. Must be a scalar. 

105 epsilon: A `Tensor`. Must have the same type as `var`. 

106 Constant factor. Must be a scalar. 

107 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

108 use_locking: An optional `bool`. Defaults to `False`. 

109 If True, updating of the var, accum and update_accum tensors will be protected by 

110 a lock; otherwise the behavior is undefined, but may exhibit less contention. 

111 name: A name for the operation (optional). 

112 

113 Returns: 

114 A mutable `Tensor`. Has the same type as `var`. 

115 """ 

116 _ctx = _context._context or _context.context() 

117 tld = _ctx._thread_local_data 

118 if tld.is_eager: 

119 raise RuntimeError("apply_adadelta op does not support eager execution. Arg 'out' is a ref.") 

120 # Add nodes to the TensorFlow graph. 

121 if use_locking is None: 

122 use_locking = False 

123 use_locking = _execute.make_bool(use_locking, "use_locking") 

124 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

125 "ApplyAdadelta", var=var, accum=accum, accum_update=accum_update, 

126 lr=lr, rho=rho, epsilon=epsilon, grad=grad, 

127 use_locking=use_locking, name=name) 

128 _result = _outputs[:] 

129 if _execute.must_record_gradient(): 

130 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

131 _op._get_attr_bool("use_locking")) 

132 _inputs_flat = _op.inputs 

133 _execute.record_gradient( 

134 "ApplyAdadelta", _inputs_flat, _attrs, _result) 

135 _result, = _result 

136 return _result 

137 

138ApplyAdadelta = tf_export("raw_ops.ApplyAdadelta")(_ops.to_raw_op(apply_adadelta)) 

139 

140 

141def apply_adadelta_eager_fallback(var, accum, accum_update, lr, rho, epsilon, grad, use_locking, name, ctx): 

142 raise RuntimeError("apply_adadelta op does not support eager execution. Arg 'out' is a ref.") 

143 

144def apply_adagrad(var, accum, lr, grad, use_locking=False, update_slots=True, name=None): 

145 r"""Update '*var' according to the adagrad scheme. 

146 

147 accum += grad * grad 

148 var -= lr * grad * (1 / sqrt(accum)) 

149 

150 Args: 

151 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

152 Should be from a Variable(). 

153 accum: A mutable `Tensor`. Must have the same type as `var`. 

154 Should be from a Variable(). 

155 lr: A `Tensor`. Must have the same type as `var`. 

156 Scaling factor. Must be a scalar. 

157 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

158 use_locking: An optional `bool`. Defaults to `False`. 

159 If `True`, updating of the var and accum tensors will be protected 

160 by a lock; otherwise the behavior is undefined, but may exhibit less 

161 contention. 

162 update_slots: An optional `bool`. Defaults to `True`. 

163 name: A name for the operation (optional). 

164 

165 Returns: 

166 A mutable `Tensor`. Has the same type as `var`. 

167 """ 

168 _ctx = _context._context or _context.context() 

169 tld = _ctx._thread_local_data 

170 if tld.is_eager: 

171 raise RuntimeError("apply_adagrad op does not support eager execution. Arg 'out' is a ref.") 

172 # Add nodes to the TensorFlow graph. 

173 if use_locking is None: 

174 use_locking = False 

175 use_locking = _execute.make_bool(use_locking, "use_locking") 

176 if update_slots is None: 

177 update_slots = True 

178 update_slots = _execute.make_bool(update_slots, "update_slots") 

179 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

180 "ApplyAdagrad", var=var, accum=accum, lr=lr, grad=grad, 

181 use_locking=use_locking, update_slots=update_slots, 

182 name=name) 

183 _result = _outputs[:] 

184 if _execute.must_record_gradient(): 

185 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

186 _op._get_attr_bool("use_locking"), "update_slots", 

187 _op._get_attr_bool("update_slots")) 

188 _inputs_flat = _op.inputs 

189 _execute.record_gradient( 

190 "ApplyAdagrad", _inputs_flat, _attrs, _result) 

191 _result, = _result 

192 return _result 

193 

194ApplyAdagrad = tf_export("raw_ops.ApplyAdagrad")(_ops.to_raw_op(apply_adagrad)) 

195 

196 

197def apply_adagrad_eager_fallback(var, accum, lr, grad, use_locking, update_slots, name, ctx): 

198 raise RuntimeError("apply_adagrad op does not support eager execution. Arg 'out' is a ref.") 

199 

200def apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step, use_locking=False, name=None): 

201 r"""Update '*var' according to the proximal adagrad scheme. 

202 

203 Args: 

204 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

205 Should be from a Variable(). 

206 gradient_accumulator: A mutable `Tensor`. Must have the same type as `var`. 

207 Should be from a Variable(). 

208 gradient_squared_accumulator: A mutable `Tensor`. Must have the same type as `var`. 

209 Should be from a Variable(). 

210 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

211 lr: A `Tensor`. Must have the same type as `var`. 

212 Scaling factor. Must be a scalar. 

213 l1: A `Tensor`. Must have the same type as `var`. 

214 L1 regularization. Must be a scalar. 

215 l2: A `Tensor`. Must have the same type as `var`. 

216 L2 regularization. Must be a scalar. 

217 global_step: A `Tensor` of type `int64`. 

218 Training step number. Must be a scalar. 

219 use_locking: An optional `bool`. Defaults to `False`. 

220 If True, updating of the var and accum tensors will be protected by 

221 a lock; otherwise the behavior is undefined, but may exhibit less contention. 

222 name: A name for the operation (optional). 

223 

224 Returns: 

225 A mutable `Tensor`. Has the same type as `var`. 

226 """ 

227 _ctx = _context._context or _context.context() 

228 tld = _ctx._thread_local_data 

229 if tld.is_eager: 

230 raise RuntimeError("apply_adagrad_da op does not support eager execution. Arg 'out' is a ref.") 

231 # Add nodes to the TensorFlow graph. 

232 if use_locking is None: 

233 use_locking = False 

234 use_locking = _execute.make_bool(use_locking, "use_locking") 

235 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

236 "ApplyAdagradDA", var=var, gradient_accumulator=gradient_accumulator, 

237 gradient_squared_accumulator=gradient_squared_accumulator, 

238 grad=grad, lr=lr, l1=l1, l2=l2, 

239 global_step=global_step, use_locking=use_locking, 

240 name=name) 

241 _result = _outputs[:] 

242 if _execute.must_record_gradient(): 

243 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

244 _op._get_attr_bool("use_locking")) 

245 _inputs_flat = _op.inputs 

246 _execute.record_gradient( 

247 "ApplyAdagradDA", _inputs_flat, _attrs, _result) 

248 _result, = _result 

249 return _result 

250 

251ApplyAdagradDA = tf_export("raw_ops.ApplyAdagradDA")(_ops.to_raw_op(apply_adagrad_da)) 

252 

253 

254def apply_adagrad_da_eager_fallback(var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step, use_locking, name, ctx): 

255 raise RuntimeError("apply_adagrad_da op does not support eager execution. Arg 'out' is a ref.") 

256 

257def apply_adagrad_v2(var, accum, lr, epsilon, grad, use_locking=False, update_slots=True, name=None): 

258 r"""Update '*var' according to the adagrad scheme. 

259 

260 accum += grad * grad 

261 var -= lr * grad * (1 / sqrt(accum)) 

262 

263 Args: 

264 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

265 Should be from a Variable(). 

266 accum: A mutable `Tensor`. Must have the same type as `var`. 

267 Should be from a Variable(). 

268 lr: A `Tensor`. Must have the same type as `var`. 

269 Scaling factor. Must be a scalar. 

270 epsilon: A `Tensor`. Must have the same type as `var`. 

271 Constant factor. Must be a scalar. 

272 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

273 use_locking: An optional `bool`. Defaults to `False`. 

274 If `True`, updating of the var and accum tensors will be protected 

275 by a lock; otherwise the behavior is undefined, but may exhibit less 

276 contention. 

277 update_slots: An optional `bool`. Defaults to `True`. 

278 name: A name for the operation (optional). 

279 

280 Returns: 

281 A mutable `Tensor`. Has the same type as `var`. 

282 """ 

283 _ctx = _context._context or _context.context() 

284 tld = _ctx._thread_local_data 

285 if tld.is_eager: 

286 raise RuntimeError("apply_adagrad_v2 op does not support eager execution. Arg 'out' is a ref.") 

287 # Add nodes to the TensorFlow graph. 

288 if use_locking is None: 

289 use_locking = False 

290 use_locking = _execute.make_bool(use_locking, "use_locking") 

291 if update_slots is None: 

292 update_slots = True 

293 update_slots = _execute.make_bool(update_slots, "update_slots") 

294 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

295 "ApplyAdagradV2", var=var, accum=accum, lr=lr, epsilon=epsilon, 

296 grad=grad, use_locking=use_locking, 

297 update_slots=update_slots, name=name) 

298 _result = _outputs[:] 

299 if _execute.must_record_gradient(): 

300 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

301 _op._get_attr_bool("use_locking"), "update_slots", 

302 _op._get_attr_bool("update_slots")) 

303 _inputs_flat = _op.inputs 

304 _execute.record_gradient( 

305 "ApplyAdagradV2", _inputs_flat, _attrs, _result) 

306 _result, = _result 

307 return _result 

308 

309ApplyAdagradV2 = tf_export("raw_ops.ApplyAdagradV2")(_ops.to_raw_op(apply_adagrad_v2)) 

310 

311 

312def apply_adagrad_v2_eager_fallback(var, accum, lr, epsilon, grad, use_locking, update_slots, name, ctx): 

313 raise RuntimeError("apply_adagrad_v2 op does not support eager execution. Arg 'out' is a ref.") 

314 

315def apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking=False, use_nesterov=False, name=None): 

316 r"""Update '*var' according to the Adam algorithm. 

317 

318 $$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ 

319 $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$ 

320 $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ 

321 $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ 

322 

323 Args: 

324 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

325 Should be from a Variable(). 

326 m: A mutable `Tensor`. Must have the same type as `var`. 

327 Should be from a Variable(). 

328 v: A mutable `Tensor`. Must have the same type as `var`. 

329 Should be from a Variable(). 

330 beta1_power: A `Tensor`. Must have the same type as `var`. 

331 Must be a scalar. 

332 beta2_power: A `Tensor`. Must have the same type as `var`. 

333 Must be a scalar. 

334 lr: A `Tensor`. Must have the same type as `var`. 

335 Scaling factor. Must be a scalar. 

336 beta1: A `Tensor`. Must have the same type as `var`. 

337 Momentum factor. Must be a scalar. 

338 beta2: A `Tensor`. Must have the same type as `var`. 

339 Momentum factor. Must be a scalar. 

340 epsilon: A `Tensor`. Must have the same type as `var`. 

341 Ridge term. Must be a scalar. 

342 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

343 use_locking: An optional `bool`. Defaults to `False`. 

344 If `True`, updating of the var, m, and v tensors will be protected 

345 by a lock; otherwise the behavior is undefined, but may exhibit less 

346 contention. 

347 use_nesterov: An optional `bool`. Defaults to `False`. 

348 If `True`, uses the nesterov update. 

349 name: A name for the operation (optional). 

350 

351 Returns: 

352 A mutable `Tensor`. Has the same type as `var`. 

353 """ 

354 _ctx = _context._context or _context.context() 

355 tld = _ctx._thread_local_data 

356 if tld.is_eager: 

357 raise RuntimeError("apply_adam op does not support eager execution. Arg 'out' is a ref.") 

358 # Add nodes to the TensorFlow graph. 

359 if use_locking is None: 

360 use_locking = False 

361 use_locking = _execute.make_bool(use_locking, "use_locking") 

362 if use_nesterov is None: 

363 use_nesterov = False 

364 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

365 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

366 "ApplyAdam", var=var, m=m, v=v, beta1_power=beta1_power, 

367 beta2_power=beta2_power, lr=lr, beta1=beta1, beta2=beta2, 

368 epsilon=epsilon, grad=grad, use_locking=use_locking, 

369 use_nesterov=use_nesterov, name=name) 

370 _result = _outputs[:] 

371 if _execute.must_record_gradient(): 

372 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

373 _op._get_attr_bool("use_locking"), "use_nesterov", 

374 _op._get_attr_bool("use_nesterov")) 

375 _inputs_flat = _op.inputs 

376 _execute.record_gradient( 

377 "ApplyAdam", _inputs_flat, _attrs, _result) 

378 _result, = _result 

379 return _result 

380 

381ApplyAdam = tf_export("raw_ops.ApplyAdam")(_ops.to_raw_op(apply_adam)) 

382 

383 

384def apply_adam_eager_fallback(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking, use_nesterov, name, ctx): 

385 raise RuntimeError("apply_adam op does not support eager execution. Arg 'out' is a ref.") 

386 

387def apply_add_sign(var, m, lr, alpha, sign_decay, beta, grad, use_locking=False, name=None): 

388 r"""Update '*var' according to the AddSign update. 

389 

390 m_t <- beta1 * m_{t-1} + (1 - beta1) * g 

391 update <- (alpha + sign_decay * sign(g) *sign(m)) * g 

392 variable <- variable - lr_t * update 

393 

394 Args: 

395 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

396 Should be from a Variable(). 

397 m: A mutable `Tensor`. Must have the same type as `var`. 

398 Should be from a Variable(). 

399 lr: A `Tensor`. Must have the same type as `var`. 

400 Scaling factor. Must be a scalar. 

401 alpha: A `Tensor`. Must have the same type as `var`. Must be a scalar. 

402 sign_decay: A `Tensor`. Must have the same type as `var`. 

403 Must be a scalar. 

404 beta: A `Tensor`. Must have the same type as `var`. Must be a scalar. 

405 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

406 use_locking: An optional `bool`. Defaults to `False`. 

407 If `True`, updating of the var and m tensors is 

408 protected by a lock; otherwise the behavior is undefined, but may exhibit less 

409 contention. 

410 name: A name for the operation (optional). 

411 

412 Returns: 

413 A mutable `Tensor`. Has the same type as `var`. 

414 """ 

415 _ctx = _context._context or _context.context() 

416 tld = _ctx._thread_local_data 

417 if tld.is_eager: 

418 raise RuntimeError("apply_add_sign op does not support eager execution. Arg 'out' is a ref.") 

419 # Add nodes to the TensorFlow graph. 

420 if use_locking is None: 

421 use_locking = False 

422 use_locking = _execute.make_bool(use_locking, "use_locking") 

423 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

424 "ApplyAddSign", var=var, m=m, lr=lr, alpha=alpha, 

425 sign_decay=sign_decay, beta=beta, grad=grad, 

426 use_locking=use_locking, name=name) 

427 _result = _outputs[:] 

428 if _execute.must_record_gradient(): 

429 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

430 _op._get_attr_bool("use_locking")) 

431 _inputs_flat = _op.inputs 

432 _execute.record_gradient( 

433 "ApplyAddSign", _inputs_flat, _attrs, _result) 

434 _result, = _result 

435 return _result 

436 

437ApplyAddSign = tf_export("raw_ops.ApplyAddSign")(_ops.to_raw_op(apply_add_sign)) 

438 

439 

440def apply_add_sign_eager_fallback(var, m, lr, alpha, sign_decay, beta, grad, use_locking, name, ctx): 

441 raise RuntimeError("apply_add_sign op does not support eager execution. Arg 'out' is a ref.") 

442 

443def apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, use_locking=False, name=None): 

444 r"""Update '*var' according to the centered RMSProp algorithm. 

445 

446 The centered RMSProp algorithm uses an estimate of the centered second moment 

447 (i.e., the variance) for normalization, as opposed to regular RMSProp, which 

448 uses the (uncentered) second moment. This often helps with training, but is 

449 slightly more expensive in terms of computation and memory. 

450 

451 Note that in dense implementation of this algorithm, mg, ms, and mom will 

452 update even if the grad is zero, but in this sparse implementation, mg, ms, 

453 and mom will not update in iterations during which the grad is zero. 

454 

455 mean_square = decay * mean_square + (1-decay) * gradient ** 2 

456 mean_grad = decay * mean_grad + (1-decay) * gradient 

457 

458 Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) 

459 

460 mg <- rho * mg_{t-1} + (1-rho) * grad 

461 ms <- rho * ms_{t-1} + (1-rho) * grad * grad 

462 mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) 

463 var <- var - mom 

464 

465 Args: 

466 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

467 Should be from a Variable(). 

468 mg: A mutable `Tensor`. Must have the same type as `var`. 

469 Should be from a Variable(). 

470 ms: A mutable `Tensor`. Must have the same type as `var`. 

471 Should be from a Variable(). 

472 mom: A mutable `Tensor`. Must have the same type as `var`. 

473 Should be from a Variable(). 

474 lr: A `Tensor`. Must have the same type as `var`. 

475 Scaling factor. Must be a scalar. 

476 rho: A `Tensor`. Must have the same type as `var`. 

477 Decay rate. Must be a scalar. 

478 momentum: A `Tensor`. Must have the same type as `var`. 

479 Momentum Scale. Must be a scalar. 

480 epsilon: A `Tensor`. Must have the same type as `var`. 

481 Ridge term. Must be a scalar. 

482 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

483 use_locking: An optional `bool`. Defaults to `False`. 

484 If `True`, updating of the var, mg, ms, and mom tensors is 

485 protected by a lock; otherwise the behavior is undefined, but may exhibit less 

486 contention. 

487 name: A name for the operation (optional). 

488 

489 Returns: 

490 A mutable `Tensor`. Has the same type as `var`. 

491 """ 

492 _ctx = _context._context or _context.context() 

493 tld = _ctx._thread_local_data 

494 if tld.is_eager: 

495 raise RuntimeError("apply_centered_rms_prop op does not support eager execution. Arg 'out' is a ref.") 

496 # Add nodes to the TensorFlow graph. 

497 if use_locking is None: 

498 use_locking = False 

499 use_locking = _execute.make_bool(use_locking, "use_locking") 

500 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

501 "ApplyCenteredRMSProp", var=var, mg=mg, ms=ms, mom=mom, lr=lr, 

502 rho=rho, momentum=momentum, epsilon=epsilon, 

503 grad=grad, use_locking=use_locking, name=name) 

504 _result = _outputs[:] 

505 if _execute.must_record_gradient(): 

506 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

507 _op._get_attr_bool("use_locking")) 

508 _inputs_flat = _op.inputs 

509 _execute.record_gradient( 

510 "ApplyCenteredRMSProp", _inputs_flat, _attrs, _result) 

511 _result, = _result 

512 return _result 

513 

514ApplyCenteredRMSProp = tf_export("raw_ops.ApplyCenteredRMSProp")(_ops.to_raw_op(apply_centered_rms_prop)) 

515 

516 

517def apply_centered_rms_prop_eager_fallback(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, use_locking, name, ctx): 

518 raise RuntimeError("apply_centered_rms_prop op does not support eager execution. Arg 'out' is a ref.") 

519 

520def apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power, use_locking=False, multiply_linear_by_lr=False, name=None): 

521 r"""Update '*var' according to the Ftrl-proximal scheme. 

522 

523 accum_new = accum + grad * grad 

524 linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var 

525 quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 

526 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 

527 accum = accum_new 

528 

529 Args: 

530 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

531 Should be from a Variable(). 

532 accum: A mutable `Tensor`. Must have the same type as `var`. 

533 Should be from a Variable(). 

534 linear: A mutable `Tensor`. Must have the same type as `var`. 

535 Should be from a Variable(). 

536 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

537 lr: A `Tensor`. Must have the same type as `var`. 

538 Scaling factor. Must be a scalar. 

539 l1: A `Tensor`. Must have the same type as `var`. 

540 L1 regularization. Must be a scalar. 

541 l2: A `Tensor`. Must have the same type as `var`. 

542 L2 regularization. Must be a scalar. 

543 lr_power: A `Tensor`. Must have the same type as `var`. 

544 Scaling factor. Must be a scalar. 

545 use_locking: An optional `bool`. Defaults to `False`. 

546 If `True`, updating of the var and accum tensors will be protected 

547 by a lock; otherwise the behavior is undefined, but may exhibit less 

548 contention. 

549 multiply_linear_by_lr: An optional `bool`. Defaults to `False`. 

550 name: A name for the operation (optional). 

551 

552 Returns: 

553 A mutable `Tensor`. Has the same type as `var`. 

554 """ 

555 _ctx = _context._context or _context.context() 

556 tld = _ctx._thread_local_data 

557 if tld.is_eager: 

558 raise RuntimeError("apply_ftrl op does not support eager execution. Arg 'out' is a ref.") 

559 # Add nodes to the TensorFlow graph. 

560 if use_locking is None: 

561 use_locking = False 

562 use_locking = _execute.make_bool(use_locking, "use_locking") 

563 if multiply_linear_by_lr is None: 

564 multiply_linear_by_lr = False 

565 multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") 

566 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

567 "ApplyFtrl", var=var, accum=accum, linear=linear, grad=grad, lr=lr, 

568 l1=l1, l2=l2, lr_power=lr_power, use_locking=use_locking, 

569 multiply_linear_by_lr=multiply_linear_by_lr, name=name) 

570 _result = _outputs[:] 

571 if _execute.must_record_gradient(): 

572 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

573 _op._get_attr_bool("use_locking"), "multiply_linear_by_lr", 

574 _op._get_attr_bool("multiply_linear_by_lr")) 

575 _inputs_flat = _op.inputs 

576 _execute.record_gradient( 

577 "ApplyFtrl", _inputs_flat, _attrs, _result) 

578 _result, = _result 

579 return _result 

580 

581ApplyFtrl = tf_export("raw_ops.ApplyFtrl")(_ops.to_raw_op(apply_ftrl)) 

582 

583 

584def apply_ftrl_eager_fallback(var, accum, linear, grad, lr, l1, l2, lr_power, use_locking, multiply_linear_by_lr, name, ctx): 

585 raise RuntimeError("apply_ftrl op does not support eager execution. Arg 'out' is a ref.") 

586 

587def apply_ftrl_v2(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False, multiply_linear_by_lr=False, name=None): 

588 r"""Update '*var' according to the Ftrl-proximal scheme. 

589 

590 grad_with_shrinkage = grad + 2 * l2_shrinkage * var 

591 accum_new = accum + grad * grad 

592 linear += grad_with_shrinkage - 

593 (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var 

594 quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 

595 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 

596 accum = accum_new 

597 

598 Args: 

599 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

600 Should be from a Variable(). 

601 accum: A mutable `Tensor`. Must have the same type as `var`. 

602 Should be from a Variable(). 

603 linear: A mutable `Tensor`. Must have the same type as `var`. 

604 Should be from a Variable(). 

605 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

606 lr: A `Tensor`. Must have the same type as `var`. 

607 Scaling factor. Must be a scalar. 

608 l1: A `Tensor`. Must have the same type as `var`. 

609 L1 regularization. Must be a scalar. 

610 l2: A `Tensor`. Must have the same type as `var`. 

611 L2 shrinkage regularization. Must be a scalar. 

612 l2_shrinkage: A `Tensor`. Must have the same type as `var`. 

613 lr_power: A `Tensor`. Must have the same type as `var`. 

614 Scaling factor. Must be a scalar. 

615 use_locking: An optional `bool`. Defaults to `False`. 

616 If `True`, updating of the var and accum tensors will be protected 

617 by a lock; otherwise the behavior is undefined, but may exhibit less 

618 contention. 

619 multiply_linear_by_lr: An optional `bool`. Defaults to `False`. 

620 name: A name for the operation (optional). 

621 

622 Returns: 

623 A mutable `Tensor`. Has the same type as `var`. 

624 """ 

625 _ctx = _context._context or _context.context() 

626 tld = _ctx._thread_local_data 

627 if tld.is_eager: 

628 raise RuntimeError("apply_ftrl_v2 op does not support eager execution. Arg 'out' is a ref.") 

629 # Add nodes to the TensorFlow graph. 

630 if use_locking is None: 

631 use_locking = False 

632 use_locking = _execute.make_bool(use_locking, "use_locking") 

633 if multiply_linear_by_lr is None: 

634 multiply_linear_by_lr = False 

635 multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") 

636 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

637 "ApplyFtrlV2", var=var, accum=accum, linear=linear, grad=grad, lr=lr, 

638 l1=l1, l2=l2, l2_shrinkage=l2_shrinkage, 

639 lr_power=lr_power, use_locking=use_locking, 

640 multiply_linear_by_lr=multiply_linear_by_lr, name=name) 

641 _result = _outputs[:] 

642 if _execute.must_record_gradient(): 

643 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

644 _op._get_attr_bool("use_locking"), "multiply_linear_by_lr", 

645 _op._get_attr_bool("multiply_linear_by_lr")) 

646 _inputs_flat = _op.inputs 

647 _execute.record_gradient( 

648 "ApplyFtrlV2", _inputs_flat, _attrs, _result) 

649 _result, = _result 

650 return _result 

651 

652ApplyFtrlV2 = tf_export("raw_ops.ApplyFtrlV2")(_ops.to_raw_op(apply_ftrl_v2)) 

653 

654 

655def apply_ftrl_v2_eager_fallback(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, use_locking, multiply_linear_by_lr, name, ctx): 

656 raise RuntimeError("apply_ftrl_v2 op does not support eager execution. Arg 'out' is a ref.") 

657 

658def apply_gradient_descent(var, alpha, delta, use_locking=False, name=None): 

659 r"""Update '*var' by subtracting 'alpha' * 'delta' from it. 

660 

661 Args: 

662 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

663 Should be from a Variable(). 

664 alpha: A `Tensor`. Must have the same type as `var`. 

665 Scaling factor. Must be a scalar. 

666 delta: A `Tensor`. Must have the same type as `var`. The change. 

667 use_locking: An optional `bool`. Defaults to `False`. 

668 If `True`, the subtraction will be protected by a lock; 

669 otherwise the behavior is undefined, but may exhibit less contention. 

670 name: A name for the operation (optional). 

671 

672 Returns: 

673 A mutable `Tensor`. Has the same type as `var`. 

674 """ 

675 _ctx = _context._context or _context.context() 

676 tld = _ctx._thread_local_data 

677 if tld.is_eager: 

678 raise RuntimeError("apply_gradient_descent op does not support eager execution. Arg 'out' is a ref.") 

679 # Add nodes to the TensorFlow graph. 

680 if use_locking is None: 

681 use_locking = False 

682 use_locking = _execute.make_bool(use_locking, "use_locking") 

683 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

684 "ApplyGradientDescent", var=var, alpha=alpha, delta=delta, 

685 use_locking=use_locking, name=name) 

686 _result = _outputs[:] 

687 if _execute.must_record_gradient(): 

688 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

689 _op._get_attr_bool("use_locking")) 

690 _inputs_flat = _op.inputs 

691 _execute.record_gradient( 

692 "ApplyGradientDescent", _inputs_flat, _attrs, _result) 

693 _result, = _result 

694 return _result 

695 

696ApplyGradientDescent = tf_export("raw_ops.ApplyGradientDescent")(_ops.to_raw_op(apply_gradient_descent)) 

697 

698 

699def apply_gradient_descent_eager_fallback(var, alpha, delta, use_locking, name, ctx): 

700 raise RuntimeError("apply_gradient_descent op does not support eager execution. Arg 'out' is a ref.") 

701 

702def apply_momentum(var, accum, lr, grad, momentum, use_locking=False, use_nesterov=False, name=None): 

703 r"""Update '*var' according to the momentum scheme. 

704 

705 Set use_nesterov = True if you want to use Nesterov momentum. 

706 

707 accum = accum * momentum + grad 

708 var -= lr * accum 

709 

710 Args: 

711 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

712 Should be from a Variable(). 

713 accum: A mutable `Tensor`. Must have the same type as `var`. 

714 Should be from a Variable(). 

715 lr: A `Tensor`. Must have the same type as `var`. 

716 Scaling factor. Must be a scalar. 

717 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

718 momentum: A `Tensor`. Must have the same type as `var`. 

719 Momentum. Must be a scalar. 

720 use_locking: An optional `bool`. Defaults to `False`. 

721 If `True`, updating of the var and accum tensors will be protected 

722 by a lock; otherwise the behavior is undefined, but may exhibit less 

723 contention. 

724 use_nesterov: An optional `bool`. Defaults to `False`. 

725 If `True`, the tensor passed to compute grad will be 

726 var - lr * momentum * accum, so in the end, the var you get is actually 

727 var - lr * momentum * accum. 

728 name: A name for the operation (optional). 

729 

730 Returns: 

731 A mutable `Tensor`. Has the same type as `var`. 

732 """ 

733 _ctx = _context._context or _context.context() 

734 tld = _ctx._thread_local_data 

735 if tld.is_eager: 

736 raise RuntimeError("apply_momentum op does not support eager execution. Arg 'out' is a ref.") 

737 # Add nodes to the TensorFlow graph. 

738 if use_locking is None: 

739 use_locking = False 

740 use_locking = _execute.make_bool(use_locking, "use_locking") 

741 if use_nesterov is None: 

742 use_nesterov = False 

743 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

744 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

745 "ApplyMomentum", var=var, accum=accum, lr=lr, grad=grad, 

746 momentum=momentum, use_locking=use_locking, 

747 use_nesterov=use_nesterov, name=name) 

748 _result = _outputs[:] 

749 if _execute.must_record_gradient(): 

750 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

751 _op._get_attr_bool("use_locking"), "use_nesterov", 

752 _op._get_attr_bool("use_nesterov")) 

753 _inputs_flat = _op.inputs 

754 _execute.record_gradient( 

755 "ApplyMomentum", _inputs_flat, _attrs, _result) 

756 _result, = _result 

757 return _result 

758 

759ApplyMomentum = tf_export("raw_ops.ApplyMomentum")(_ops.to_raw_op(apply_momentum)) 

760 

761 

762def apply_momentum_eager_fallback(var, accum, lr, grad, momentum, use_locking, use_nesterov, name, ctx): 

763 raise RuntimeError("apply_momentum op does not support eager execution. Arg 'out' is a ref.") 

764 

765def apply_power_sign(var, m, lr, logbase, sign_decay, beta, grad, use_locking=False, name=None): 

766 r"""Update '*var' according to the AddSign update. 

767 

768 m_t <- beta1 * m_{t-1} + (1 - beta1) * g 

769 update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g 

770 variable <- variable - lr_t * update 

771 

772 Args: 

773 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

774 Should be from a Variable(). 

775 m: A mutable `Tensor`. Must have the same type as `var`. 

776 Should be from a Variable(). 

777 lr: A `Tensor`. Must have the same type as `var`. 

778 Scaling factor. Must be a scalar. 

779 logbase: A `Tensor`. Must have the same type as `var`. Must be a scalar. 

780 sign_decay: A `Tensor`. Must have the same type as `var`. 

781 Must be a scalar. 

782 beta: A `Tensor`. Must have the same type as `var`. Must be a scalar. 

783 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

784 use_locking: An optional `bool`. Defaults to `False`. 

785 If `True`, updating of the var and m tensors is 

786 protected by a lock; otherwise the behavior is undefined, but may exhibit less 

787 contention. 

788 name: A name for the operation (optional). 

789 

790 Returns: 

791 A mutable `Tensor`. Has the same type as `var`. 

792 """ 

793 _ctx = _context._context or _context.context() 

794 tld = _ctx._thread_local_data 

795 if tld.is_eager: 

796 raise RuntimeError("apply_power_sign op does not support eager execution. Arg 'out' is a ref.") 

797 # Add nodes to the TensorFlow graph. 

798 if use_locking is None: 

799 use_locking = False 

800 use_locking = _execute.make_bool(use_locking, "use_locking") 

801 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

802 "ApplyPowerSign", var=var, m=m, lr=lr, logbase=logbase, 

803 sign_decay=sign_decay, beta=beta, grad=grad, 

804 use_locking=use_locking, name=name) 

805 _result = _outputs[:] 

806 if _execute.must_record_gradient(): 

807 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

808 _op._get_attr_bool("use_locking")) 

809 _inputs_flat = _op.inputs 

810 _execute.record_gradient( 

811 "ApplyPowerSign", _inputs_flat, _attrs, _result) 

812 _result, = _result 

813 return _result 

814 

815ApplyPowerSign = tf_export("raw_ops.ApplyPowerSign")(_ops.to_raw_op(apply_power_sign)) 

816 

817 

818def apply_power_sign_eager_fallback(var, m, lr, logbase, sign_decay, beta, grad, use_locking, name, ctx): 

819 raise RuntimeError("apply_power_sign op does not support eager execution. Arg 'out' is a ref.") 

820 

821def apply_proximal_adagrad(var, accum, lr, l1, l2, grad, use_locking=False, name=None): 

822 r"""Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. 

823 

824 accum += grad * grad 

825 prox_v = var - lr * grad * (1 / sqrt(accum)) 

826 var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} 

827 

828 Args: 

829 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

830 Should be from a Variable(). 

831 accum: A mutable `Tensor`. Must have the same type as `var`. 

832 Should be from a Variable(). 

833 lr: A `Tensor`. Must have the same type as `var`. 

834 Scaling factor. Must be a scalar. 

835 l1: A `Tensor`. Must have the same type as `var`. 

836 L1 regularization. Must be a scalar. 

837 l2: A `Tensor`. Must have the same type as `var`. 

838 L2 regularization. Must be a scalar. 

839 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

840 use_locking: An optional `bool`. Defaults to `False`. 

841 If True, updating of the var and accum tensors will be protected by 

842 a lock; otherwise the behavior is undefined, but may exhibit less contention. 

843 name: A name for the operation (optional). 

844 

845 Returns: 

846 A mutable `Tensor`. Has the same type as `var`. 

847 """ 

848 _ctx = _context._context or _context.context() 

849 tld = _ctx._thread_local_data 

850 if tld.is_eager: 

851 raise RuntimeError("apply_proximal_adagrad op does not support eager execution. Arg 'out' is a ref.") 

852 # Add nodes to the TensorFlow graph. 

853 if use_locking is None: 

854 use_locking = False 

855 use_locking = _execute.make_bool(use_locking, "use_locking") 

856 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

857 "ApplyProximalAdagrad", var=var, accum=accum, lr=lr, l1=l1, l2=l2, 

858 grad=grad, use_locking=use_locking, name=name) 

859 _result = _outputs[:] 

860 if _execute.must_record_gradient(): 

861 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

862 _op._get_attr_bool("use_locking")) 

863 _inputs_flat = _op.inputs 

864 _execute.record_gradient( 

865 "ApplyProximalAdagrad", _inputs_flat, _attrs, _result) 

866 _result, = _result 

867 return _result 

868 

869ApplyProximalAdagrad = tf_export("raw_ops.ApplyProximalAdagrad")(_ops.to_raw_op(apply_proximal_adagrad)) 

870 

871 

872def apply_proximal_adagrad_eager_fallback(var, accum, lr, l1, l2, grad, use_locking, name, ctx): 

873 raise RuntimeError("apply_proximal_adagrad op does not support eager execution. Arg 'out' is a ref.") 

874 

875def apply_proximal_gradient_descent(var, alpha, l1, l2, delta, use_locking=False, name=None): 

876 r"""Update '*var' as FOBOS algorithm with fixed learning rate. 

877 

878 prox_v = var - alpha * delta 

879 var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} 

880 

881 Args: 

882 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

883 Should be from a Variable(). 

884 alpha: A `Tensor`. Must have the same type as `var`. 

885 Scaling factor. Must be a scalar. 

886 l1: A `Tensor`. Must have the same type as `var`. 

887 L1 regularization. Must be a scalar. 

888 l2: A `Tensor`. Must have the same type as `var`. 

889 L2 regularization. Must be a scalar. 

890 delta: A `Tensor`. Must have the same type as `var`. The change. 

891 use_locking: An optional `bool`. Defaults to `False`. 

892 If True, the subtraction will be protected by a lock; 

893 otherwise the behavior is undefined, but may exhibit less contention. 

894 name: A name for the operation (optional). 

895 

896 Returns: 

897 A mutable `Tensor`. Has the same type as `var`. 

898 """ 

899 _ctx = _context._context or _context.context() 

900 tld = _ctx._thread_local_data 

901 if tld.is_eager: 

902 raise RuntimeError("apply_proximal_gradient_descent op does not support eager execution. Arg 'out' is a ref.") 

903 # Add nodes to the TensorFlow graph. 

904 if use_locking is None: 

905 use_locking = False 

906 use_locking = _execute.make_bool(use_locking, "use_locking") 

907 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

908 "ApplyProximalGradientDescent", var=var, alpha=alpha, l1=l1, l2=l2, 

909 delta=delta, use_locking=use_locking, 

910 name=name) 

911 _result = _outputs[:] 

912 if _execute.must_record_gradient(): 

913 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

914 _op._get_attr_bool("use_locking")) 

915 _inputs_flat = _op.inputs 

916 _execute.record_gradient( 

917 "ApplyProximalGradientDescent", _inputs_flat, _attrs, _result) 

918 _result, = _result 

919 return _result 

920 

921ApplyProximalGradientDescent = tf_export("raw_ops.ApplyProximalGradientDescent")(_ops.to_raw_op(apply_proximal_gradient_descent)) 

922 

923 

924def apply_proximal_gradient_descent_eager_fallback(var, alpha, l1, l2, delta, use_locking, name, ctx): 

925 raise RuntimeError("apply_proximal_gradient_descent op does not support eager execution. Arg 'out' is a ref.") 

926 

927def apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad, use_locking=False, name=None): 

928 r"""Update '*var' according to the RMSProp algorithm. 

929 

930 Note that in dense implementation of this algorithm, ms and mom will 

931 update even if the grad is zero, but in this sparse implementation, ms 

932 and mom will not update in iterations during which the grad is zero. 

933 

934 mean_square = decay * mean_square + (1-decay) * gradient ** 2 

935 Delta = learning_rate * gradient / sqrt(mean_square + epsilon) 

936 

937 ms <- rho * ms_{t-1} + (1-rho) * grad * grad 

938 mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) 

939 var <- var - mom 

940 

941 Args: 

942 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

943 Should be from a Variable(). 

944 ms: A mutable `Tensor`. Must have the same type as `var`. 

945 Should be from a Variable(). 

946 mom: A mutable `Tensor`. Must have the same type as `var`. 

947 Should be from a Variable(). 

948 lr: A `Tensor`. Must have the same type as `var`. 

949 Scaling factor. Must be a scalar. 

950 rho: A `Tensor`. Must have the same type as `var`. 

951 Decay rate. Must be a scalar. 

952 momentum: A `Tensor`. Must have the same type as `var`. 

953 epsilon: A `Tensor`. Must have the same type as `var`. 

954 Ridge term. Must be a scalar. 

955 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

956 use_locking: An optional `bool`. Defaults to `False`. 

957 If `True`, updating of the var, ms, and mom tensors is protected 

958 by a lock; otherwise the behavior is undefined, but may exhibit less 

959 contention. 

960 name: A name for the operation (optional). 

961 

962 Returns: 

963 A mutable `Tensor`. Has the same type as `var`. 

964 """ 

965 _ctx = _context._context or _context.context() 

966 tld = _ctx._thread_local_data 

967 if tld.is_eager: 

968 raise RuntimeError("apply_rms_prop op does not support eager execution. Arg 'out' is a ref.") 

969 # Add nodes to the TensorFlow graph. 

970 if use_locking is None: 

971 use_locking = False 

972 use_locking = _execute.make_bool(use_locking, "use_locking") 

973 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

974 "ApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr, rho=rho, 

975 momentum=momentum, epsilon=epsilon, grad=grad, 

976 use_locking=use_locking, name=name) 

977 _result = _outputs[:] 

978 if _execute.must_record_gradient(): 

979 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

980 _op._get_attr_bool("use_locking")) 

981 _inputs_flat = _op.inputs 

982 _execute.record_gradient( 

983 "ApplyRMSProp", _inputs_flat, _attrs, _result) 

984 _result, = _result 

985 return _result 

986 

987ApplyRMSProp = tf_export("raw_ops.ApplyRMSProp")(_ops.to_raw_op(apply_rms_prop)) 

988 

989 

990def apply_rms_prop_eager_fallback(var, ms, mom, lr, rho, momentum, epsilon, grad, use_locking, name, ctx): 

991 raise RuntimeError("apply_rms_prop op does not support eager execution. Arg 'out' is a ref.") 

992 

993def resource_apply_ada_max(var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad, use_locking=False, name=None): 

994 r"""Update '*var' according to the AdaMax algorithm. 

995 

996 m_t <- beta1 * m_{t-1} + (1 - beta1) * g 

997 v_t <- max(beta2 * v_{t-1}, abs(g)) 

998 variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) 

999 

1000 Args: 

1001 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1002 m: A `Tensor` of type `resource`. Should be from a Variable(). 

1003 v: A `Tensor` of type `resource`. Should be from a Variable(). 

1004 beta1_power: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1005 Must be a scalar. 

1006 lr: A `Tensor`. Must have the same type as `beta1_power`. 

1007 Scaling factor. Must be a scalar. 

1008 beta1: A `Tensor`. Must have the same type as `beta1_power`. 

1009 Momentum factor. Must be a scalar. 

1010 beta2: A `Tensor`. Must have the same type as `beta1_power`. 

1011 Momentum factor. Must be a scalar. 

1012 epsilon: A `Tensor`. Must have the same type as `beta1_power`. 

1013 Ridge term. Must be a scalar. 

1014 grad: A `Tensor`. Must have the same type as `beta1_power`. The gradient. 

1015 use_locking: An optional `bool`. Defaults to `False`. 

1016 If `True`, updating of the var, m, and v tensors will be protected 

1017 by a lock; otherwise the behavior is undefined, but may exhibit less 

1018 contention. 

1019 name: A name for the operation (optional). 

1020 

1021 Returns: 

1022 The created Operation. 

1023 """ 

1024 _ctx = _context._context or _context.context() 

1025 tld = _ctx._thread_local_data 

1026 if tld.is_eager: 

1027 try: 

1028 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1029 _ctx, "ResourceApplyAdaMax", name, var, m, v, beta1_power, lr, beta1, 

1030 beta2, epsilon, grad, "use_locking", use_locking) 

1031 return _result 

1032 except _core._NotOkStatusException as e: 

1033 _ops.raise_from_not_ok_status(e, name) 

1034 except _core._FallbackException: 

1035 pass 

1036 try: 

1037 return resource_apply_ada_max_eager_fallback( 

1038 var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad, 

1039 use_locking=use_locking, name=name, ctx=_ctx) 

1040 except _core._SymbolicException: 

1041 pass # Add nodes to the TensorFlow graph. 

1042 # Add nodes to the TensorFlow graph. 

1043 if use_locking is None: 

1044 use_locking = False 

1045 use_locking = _execute.make_bool(use_locking, "use_locking") 

1046 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1047 "ResourceApplyAdaMax", var=var, m=m, v=v, beta1_power=beta1_power, 

1048 lr=lr, beta1=beta1, beta2=beta2, 

1049 epsilon=epsilon, grad=grad, 

1050 use_locking=use_locking, name=name) 

1051 return _op 

1052ResourceApplyAdaMax = tf_export("raw_ops.ResourceApplyAdaMax")(_ops.to_raw_op(resource_apply_ada_max)) 

1053 

1054 

1055def resource_apply_ada_max_eager_fallback(var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad, use_locking, name, ctx): 

1056 if use_locking is None: 

1057 use_locking = False 

1058 use_locking = _execute.make_bool(use_locking, "use_locking") 

1059 _attr_T, _inputs_T = _execute.args_to_matching_eager([beta1_power, lr, beta1, beta2, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1060 (beta1_power, lr, beta1, beta2, epsilon, grad) = _inputs_T 

1061 var = _ops.convert_to_tensor(var, _dtypes.resource) 

1062 m = _ops.convert_to_tensor(m, _dtypes.resource) 

1063 v = _ops.convert_to_tensor(v, _dtypes.resource) 

1064 _inputs_flat = [var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad] 

1065 _attrs = ("T", _attr_T, "use_locking", use_locking) 

1066 _result = _execute.execute(b"ResourceApplyAdaMax", 0, inputs=_inputs_flat, 

1067 attrs=_attrs, ctx=ctx, name=name) 

1068 _result = None 

1069 return _result 

1070 

1071 

1072def resource_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad, use_locking=False, name=None): 

1073 r"""Update '*var' according to the adadelta scheme. 

1074 

1075 accum = rho() * accum + (1 - rho()) * grad.square(); 

1076 update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; 

1077 update_accum = rho() * update_accum + (1 - rho()) * update.square(); 

1078 var -= update; 

1079 

1080 Args: 

1081 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1082 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

1083 accum_update: A `Tensor` of type `resource`. Should be from a Variable(). 

1084 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1085 Scaling factor. Must be a scalar. 

1086 rho: A `Tensor`. Must have the same type as `lr`. 

1087 Decay factor. Must be a scalar. 

1088 epsilon: A `Tensor`. Must have the same type as `lr`. 

1089 Constant factor. Must be a scalar. 

1090 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

1091 use_locking: An optional `bool`. Defaults to `False`. 

1092 If True, updating of the var, accum and update_accum tensors will be protected by 

1093 a lock; otherwise the behavior is undefined, but may exhibit less contention. 

1094 name: A name for the operation (optional). 

1095 

1096 Returns: 

1097 The created Operation. 

1098 """ 

1099 _ctx = _context._context or _context.context() 

1100 tld = _ctx._thread_local_data 

1101 if tld.is_eager: 

1102 try: 

1103 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1104 _ctx, "ResourceApplyAdadelta", name, var, accum, accum_update, lr, 

1105 rho, epsilon, grad, "use_locking", use_locking) 

1106 return _result 

1107 except _core._NotOkStatusException as e: 

1108 _ops.raise_from_not_ok_status(e, name) 

1109 except _core._FallbackException: 

1110 pass 

1111 try: 

1112 return resource_apply_adadelta_eager_fallback( 

1113 var, accum, accum_update, lr, rho, epsilon, grad, 

1114 use_locking=use_locking, name=name, ctx=_ctx) 

1115 except _core._SymbolicException: 

1116 pass # Add nodes to the TensorFlow graph. 

1117 # Add nodes to the TensorFlow graph. 

1118 if use_locking is None: 

1119 use_locking = False 

1120 use_locking = _execute.make_bool(use_locking, "use_locking") 

1121 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1122 "ResourceApplyAdadelta", var=var, accum=accum, 

1123 accum_update=accum_update, lr=lr, rho=rho, 

1124 epsilon=epsilon, grad=grad, 

1125 use_locking=use_locking, name=name) 

1126 return _op 

1127ResourceApplyAdadelta = tf_export("raw_ops.ResourceApplyAdadelta")(_ops.to_raw_op(resource_apply_adadelta)) 

1128 

1129 

1130def resource_apply_adadelta_eager_fallback(var, accum, accum_update, lr, rho, epsilon, grad, use_locking, name, ctx): 

1131 if use_locking is None: 

1132 use_locking = False 

1133 use_locking = _execute.make_bool(use_locking, "use_locking") 

1134 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1135 (lr, rho, epsilon, grad) = _inputs_T 

1136 var = _ops.convert_to_tensor(var, _dtypes.resource) 

1137 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

1138 accum_update = _ops.convert_to_tensor(accum_update, _dtypes.resource) 

1139 _inputs_flat = [var, accum, accum_update, lr, rho, epsilon, grad] 

1140 _attrs = ("T", _attr_T, "use_locking", use_locking) 

1141 _result = _execute.execute(b"ResourceApplyAdadelta", 0, inputs=_inputs_flat, 

1142 attrs=_attrs, ctx=ctx, name=name) 

1143 _result = None 

1144 return _result 

1145 

1146 

1147def resource_apply_adagrad(var, accum, lr, grad, use_locking=False, update_slots=True, name=None): 

1148 r"""Update '*var' according to the adagrad scheme. 

1149 

1150 accum += grad * grad 

1151 var -= lr * grad * (1 / sqrt(accum)) 

1152 

1153 Args: 

1154 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1155 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

1156 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1157 Scaling factor. Must be a scalar. 

1158 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

1159 use_locking: An optional `bool`. Defaults to `False`. 

1160 If `True`, updating of the var and accum tensors will be protected 

1161 by a lock; otherwise the behavior is undefined, but may exhibit less 

1162 contention. 

1163 update_slots: An optional `bool`. Defaults to `True`. 

1164 name: A name for the operation (optional). 

1165 

1166 Returns: 

1167 The created Operation. 

1168 """ 

1169 _ctx = _context._context or _context.context() 

1170 tld = _ctx._thread_local_data 

1171 if tld.is_eager: 

1172 try: 

1173 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1174 _ctx, "ResourceApplyAdagrad", name, var, accum, lr, grad, 

1175 "use_locking", use_locking, "update_slots", update_slots) 

1176 return _result 

1177 except _core._NotOkStatusException as e: 

1178 _ops.raise_from_not_ok_status(e, name) 

1179 except _core._FallbackException: 

1180 pass 

1181 try: 

1182 return resource_apply_adagrad_eager_fallback( 

1183 var, accum, lr, grad, use_locking=use_locking, 

1184 update_slots=update_slots, name=name, ctx=_ctx) 

1185 except _core._SymbolicException: 

1186 pass # Add nodes to the TensorFlow graph. 

1187 # Add nodes to the TensorFlow graph. 

1188 if use_locking is None: 

1189 use_locking = False 

1190 use_locking = _execute.make_bool(use_locking, "use_locking") 

1191 if update_slots is None: 

1192 update_slots = True 

1193 update_slots = _execute.make_bool(update_slots, "update_slots") 

1194 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1195 "ResourceApplyAdagrad", var=var, accum=accum, lr=lr, grad=grad, 

1196 use_locking=use_locking, 

1197 update_slots=update_slots, name=name) 

1198 return _op 

1199ResourceApplyAdagrad = tf_export("raw_ops.ResourceApplyAdagrad")(_ops.to_raw_op(resource_apply_adagrad)) 

1200 

1201 

1202def resource_apply_adagrad_eager_fallback(var, accum, lr, grad, use_locking, update_slots, name, ctx): 

1203 if use_locking is None: 

1204 use_locking = False 

1205 use_locking = _execute.make_bool(use_locking, "use_locking") 

1206 if update_slots is None: 

1207 update_slots = True 

1208 update_slots = _execute.make_bool(update_slots, "update_slots") 

1209 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1210 (lr, grad) = _inputs_T 

1211 var = _ops.convert_to_tensor(var, _dtypes.resource) 

1212 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

1213 _inputs_flat = [var, accum, lr, grad] 

1214 _attrs = ("T", _attr_T, "use_locking", use_locking, "update_slots", 

1215 update_slots) 

1216 _result = _execute.execute(b"ResourceApplyAdagrad", 0, inputs=_inputs_flat, 

1217 attrs=_attrs, ctx=ctx, name=name) 

1218 _result = None 

1219 return _result 

1220 

1221 

1222def resource_apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step, use_locking=False, name=None): 

1223 r"""Update '*var' according to the proximal adagrad scheme. 

1224 

1225 Args: 

1226 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1227 gradient_accumulator: A `Tensor` of type `resource`. 

1228 Should be from a Variable(). 

1229 gradient_squared_accumulator: A `Tensor` of type `resource`. 

1230 Should be from a Variable(). 

1231 grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1232 The gradient. 

1233 lr: A `Tensor`. Must have the same type as `grad`. 

1234 Scaling factor. Must be a scalar. 

1235 l1: A `Tensor`. Must have the same type as `grad`. 

1236 L1 regularization. Must be a scalar. 

1237 l2: A `Tensor`. Must have the same type as `grad`. 

1238 L2 regularization. Must be a scalar. 

1239 global_step: A `Tensor` of type `int64`. 

1240 Training step number. Must be a scalar. 

1241 use_locking: An optional `bool`. Defaults to `False`. 

1242 If True, updating of the var and accum tensors will be protected by 

1243 a lock; otherwise the behavior is undefined, but may exhibit less contention. 

1244 name: A name for the operation (optional). 

1245 

1246 Returns: 

1247 The created Operation. 

1248 """ 

1249 _ctx = _context._context or _context.context() 

1250 tld = _ctx._thread_local_data 

1251 if tld.is_eager: 

1252 try: 

1253 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1254 _ctx, "ResourceApplyAdagradDA", name, var, gradient_accumulator, 

1255 gradient_squared_accumulator, grad, lr, l1, l2, global_step, 

1256 "use_locking", use_locking) 

1257 return _result 

1258 except _core._NotOkStatusException as e: 

1259 _ops.raise_from_not_ok_status(e, name) 

1260 except _core._FallbackException: 

1261 pass 

1262 try: 

1263 return resource_apply_adagrad_da_eager_fallback( 

1264 var, gradient_accumulator, gradient_squared_accumulator, grad, lr, 

1265 l1, l2, global_step, use_locking=use_locking, name=name, ctx=_ctx) 

1266 except _core._SymbolicException: 

1267 pass # Add nodes to the TensorFlow graph. 

1268 # Add nodes to the TensorFlow graph. 

1269 if use_locking is None: 

1270 use_locking = False 

1271 use_locking = _execute.make_bool(use_locking, "use_locking") 

1272 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1273 "ResourceApplyAdagradDA", var=var, 

1274 gradient_accumulator=gradient_accumulator, 

1275 gradient_squared_accumulator=gradient_squared_accumulator, 

1276 grad=grad, lr=lr, l1=l1, l2=l2, 

1277 global_step=global_step, 

1278 use_locking=use_locking, name=name) 

1279 return _op 

1280ResourceApplyAdagradDA = tf_export("raw_ops.ResourceApplyAdagradDA")(_ops.to_raw_op(resource_apply_adagrad_da)) 

1281 

1282 

1283def resource_apply_adagrad_da_eager_fallback(var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step, use_locking, name, ctx): 

1284 if use_locking is None: 

1285 use_locking = False 

1286 use_locking = _execute.make_bool(use_locking, "use_locking") 

1287 _attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1288 (grad, lr, l1, l2) = _inputs_T 

1289 var = _ops.convert_to_tensor(var, _dtypes.resource) 

1290 gradient_accumulator = _ops.convert_to_tensor(gradient_accumulator, _dtypes.resource) 

1291 gradient_squared_accumulator = _ops.convert_to_tensor(gradient_squared_accumulator, _dtypes.resource) 

1292 global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) 

1293 _inputs_flat = [var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step] 

1294 _attrs = ("T", _attr_T, "use_locking", use_locking) 

1295 _result = _execute.execute(b"ResourceApplyAdagradDA", 0, 

1296 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1297 name=name) 

1298 _result = None 

1299 return _result 

1300 

1301 

1302def resource_apply_adagrad_v2(var, accum, lr, epsilon, grad, use_locking=False, update_slots=True, name=None): 

1303 r"""Update '*var' according to the adagrad scheme. 

1304 

1305 accum += grad * grad 

1306 var -= lr * grad * (1 / (sqrt(accum) + epsilon)) 

1307 

1308 Args: 

1309 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1310 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

1311 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1312 Scaling factor. Must be a scalar. 

1313 epsilon: A `Tensor`. Must have the same type as `lr`. 

1314 Constant factor. Must be a scalar. 

1315 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

1316 use_locking: An optional `bool`. Defaults to `False`. 

1317 If `True`, updating of the var and accum tensors will be protected 

1318 by a lock; otherwise the behavior is undefined, but may exhibit less 

1319 contention. 

1320 update_slots: An optional `bool`. Defaults to `True`. 

1321 name: A name for the operation (optional). 

1322 

1323 Returns: 

1324 The created Operation. 

1325 """ 

1326 _ctx = _context._context or _context.context() 

1327 tld = _ctx._thread_local_data 

1328 if tld.is_eager: 

1329 try: 

1330 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1331 _ctx, "ResourceApplyAdagradV2", name, var, accum, lr, epsilon, grad, 

1332 "use_locking", use_locking, "update_slots", update_slots) 

1333 return _result 

1334 except _core._NotOkStatusException as e: 

1335 _ops.raise_from_not_ok_status(e, name) 

1336 except _core._FallbackException: 

1337 pass 

1338 try: 

1339 return resource_apply_adagrad_v2_eager_fallback( 

1340 var, accum, lr, epsilon, grad, use_locking=use_locking, 

1341 update_slots=update_slots, name=name, ctx=_ctx) 

1342 except _core._SymbolicException: 

1343 pass # Add nodes to the TensorFlow graph. 

1344 # Add nodes to the TensorFlow graph. 

1345 if use_locking is None: 

1346 use_locking = False 

1347 use_locking = _execute.make_bool(use_locking, "use_locking") 

1348 if update_slots is None: 

1349 update_slots = True 

1350 update_slots = _execute.make_bool(update_slots, "update_slots") 

1351 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1352 "ResourceApplyAdagradV2", var=var, accum=accum, lr=lr, 

1353 epsilon=epsilon, grad=grad, 

1354 use_locking=use_locking, 

1355 update_slots=update_slots, name=name) 

1356 return _op 

1357ResourceApplyAdagradV2 = tf_export("raw_ops.ResourceApplyAdagradV2")(_ops.to_raw_op(resource_apply_adagrad_v2)) 

1358 

1359 

1360def resource_apply_adagrad_v2_eager_fallback(var, accum, lr, epsilon, grad, use_locking, update_slots, name, ctx): 

1361 if use_locking is None: 

1362 use_locking = False 

1363 use_locking = _execute.make_bool(use_locking, "use_locking") 

1364 if update_slots is None: 

1365 update_slots = True 

1366 update_slots = _execute.make_bool(update_slots, "update_slots") 

1367 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1368 (lr, epsilon, grad) = _inputs_T 

1369 var = _ops.convert_to_tensor(var, _dtypes.resource) 

1370 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

1371 _inputs_flat = [var, accum, lr, epsilon, grad] 

1372 _attrs = ("T", _attr_T, "use_locking", use_locking, "update_slots", 

1373 update_slots) 

1374 _result = _execute.execute(b"ResourceApplyAdagradV2", 0, 

1375 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1376 name=name) 

1377 _result = None 

1378 return _result 

1379 

1380 

1381def resource_apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking=False, use_nesterov=False, name=None): 

1382 r"""Update '*var' according to the Adam algorithm. 

1383 

1384 $$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ 

1385 $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$ 

1386 $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ 

1387 $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\\\ \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ 

1388 

1389 Args: 

1390 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1391 m: A `Tensor` of type `resource`. Should be from a Variable(). 

1392 v: A `Tensor` of type `resource`. Should be from a Variable(). 

1393 beta1_power: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1394 Must be a scalar. 

1395 beta2_power: A `Tensor`. Must have the same type as `beta1_power`. 

1396 Must be a scalar. 

1397 lr: A `Tensor`. Must have the same type as `beta1_power`. 

1398 Scaling factor. Must be a scalar. 

1399 beta1: A `Tensor`. Must have the same type as `beta1_power`. 

1400 Momentum factor. Must be a scalar. 

1401 beta2: A `Tensor`. Must have the same type as `beta1_power`. 

1402 Momentum factor. Must be a scalar. 

1403 epsilon: A `Tensor`. Must have the same type as `beta1_power`. 

1404 Ridge term. Must be a scalar. 

1405 grad: A `Tensor`. Must have the same type as `beta1_power`. The gradient. 

1406 use_locking: An optional `bool`. Defaults to `False`. 

1407 If `True`, updating of the var, m, and v tensors will be protected 

1408 by a lock; otherwise the behavior is undefined, but may exhibit less 

1409 contention. 

1410 use_nesterov: An optional `bool`. Defaults to `False`. 

1411 If `True`, uses the nesterov update. 

1412 name: A name for the operation (optional). 

1413 

1414 Returns: 

1415 The created Operation. 

1416 """ 

1417 _ctx = _context._context or _context.context() 

1418 tld = _ctx._thread_local_data 

1419 if tld.is_eager: 

1420 try: 

1421 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1422 _ctx, "ResourceApplyAdam", name, var, m, v, beta1_power, beta2_power, 

1423 lr, beta1, beta2, epsilon, grad, "use_locking", use_locking, 

1424 "use_nesterov", use_nesterov) 

1425 return _result 

1426 except _core._NotOkStatusException as e: 

1427 _ops.raise_from_not_ok_status(e, name) 

1428 except _core._FallbackException: 

1429 pass 

1430 try: 

1431 return resource_apply_adam_eager_fallback( 

1432 var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, 

1433 grad, use_locking=use_locking, use_nesterov=use_nesterov, name=name, 

1434 ctx=_ctx) 

1435 except _core._SymbolicException: 

1436 pass # Add nodes to the TensorFlow graph. 

1437 # Add nodes to the TensorFlow graph. 

1438 if use_locking is None: 

1439 use_locking = False 

1440 use_locking = _execute.make_bool(use_locking, "use_locking") 

1441 if use_nesterov is None: 

1442 use_nesterov = False 

1443 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

1444 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1445 "ResourceApplyAdam", var=var, m=m, v=v, beta1_power=beta1_power, 

1446 beta2_power=beta2_power, lr=lr, beta1=beta1, 

1447 beta2=beta2, epsilon=epsilon, grad=grad, 

1448 use_locking=use_locking, 

1449 use_nesterov=use_nesterov, name=name) 

1450 return _op 

1451ResourceApplyAdam = tf_export("raw_ops.ResourceApplyAdam")(_ops.to_raw_op(resource_apply_adam)) 

1452 

1453 

1454def resource_apply_adam_eager_fallback(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking, use_nesterov, name, ctx): 

1455 if use_locking is None: 

1456 use_locking = False 

1457 use_locking = _execute.make_bool(use_locking, "use_locking") 

1458 if use_nesterov is None: 

1459 use_nesterov = False 

1460 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

1461 _attr_T, _inputs_T = _execute.args_to_matching_eager([beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1462 (beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad) = _inputs_T 

1463 var = _ops.convert_to_tensor(var, _dtypes.resource) 

1464 m = _ops.convert_to_tensor(m, _dtypes.resource) 

1465 v = _ops.convert_to_tensor(v, _dtypes.resource) 

1466 _inputs_flat = [var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad] 

1467 _attrs = ("T", _attr_T, "use_locking", use_locking, "use_nesterov", 

1468 use_nesterov) 

1469 _result = _execute.execute(b"ResourceApplyAdam", 0, inputs=_inputs_flat, 

1470 attrs=_attrs, ctx=ctx, name=name) 

1471 _result = None 

1472 return _result 

1473 

1474 

1475def resource_apply_adam_with_amsgrad(var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking=False, name=None): 

1476 r"""Update '*var' according to the Adam algorithm. 

1477 

1478 $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ 

1479 $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ 

1480 $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ 

1481 $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$ 

1482 $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ 

1483 

1484 Args: 

1485 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1486 m: A `Tensor` of type `resource`. Should be from a Variable(). 

1487 v: A `Tensor` of type `resource`. Should be from a Variable(). 

1488 vhat: A `Tensor` of type `resource`. Should be from a Variable(). 

1489 beta1_power: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1490 Must be a scalar. 

1491 beta2_power: A `Tensor`. Must have the same type as `beta1_power`. 

1492 Must be a scalar. 

1493 lr: A `Tensor`. Must have the same type as `beta1_power`. 

1494 Scaling factor. Must be a scalar. 

1495 beta1: A `Tensor`. Must have the same type as `beta1_power`. 

1496 Momentum factor. Must be a scalar. 

1497 beta2: A `Tensor`. Must have the same type as `beta1_power`. 

1498 Momentum factor. Must be a scalar. 

1499 epsilon: A `Tensor`. Must have the same type as `beta1_power`. 

1500 Ridge term. Must be a scalar. 

1501 grad: A `Tensor`. Must have the same type as `beta1_power`. The gradient. 

1502 use_locking: An optional `bool`. Defaults to `False`. 

1503 If `True`, updating of the var, m, and v tensors will be protected 

1504 by a lock; otherwise the behavior is undefined, but may exhibit less 

1505 contention. 

1506 name: A name for the operation (optional). 

1507 

1508 Returns: 

1509 The created Operation. 

1510 """ 

1511 _ctx = _context._context or _context.context() 

1512 tld = _ctx._thread_local_data 

1513 if tld.is_eager: 

1514 try: 

1515 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1516 _ctx, "ResourceApplyAdamWithAmsgrad", name, var, m, v, vhat, 

1517 beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, 

1518 "use_locking", use_locking) 

1519 return _result 

1520 except _core._NotOkStatusException as e: 

1521 _ops.raise_from_not_ok_status(e, name) 

1522 except _core._FallbackException: 

1523 pass 

1524 try: 

1525 return resource_apply_adam_with_amsgrad_eager_fallback( 

1526 var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, 

1527 epsilon, grad, use_locking=use_locking, name=name, ctx=_ctx) 

1528 except _core._SymbolicException: 

1529 pass # Add nodes to the TensorFlow graph. 

1530 # Add nodes to the TensorFlow graph. 

1531 if use_locking is None: 

1532 use_locking = False 

1533 use_locking = _execute.make_bool(use_locking, "use_locking") 

1534 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1535 "ResourceApplyAdamWithAmsgrad", var=var, m=m, v=v, vhat=vhat, 

1536 beta1_power=beta1_power, 

1537 beta2_power=beta2_power, lr=lr, 

1538 beta1=beta1, beta2=beta2, 

1539 epsilon=epsilon, grad=grad, 

1540 use_locking=use_locking, name=name) 

1541 return _op 

1542ResourceApplyAdamWithAmsgrad = tf_export("raw_ops.ResourceApplyAdamWithAmsgrad")(_ops.to_raw_op(resource_apply_adam_with_amsgrad)) 

1543 

1544 

1545def resource_apply_adam_with_amsgrad_eager_fallback(var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking, name, ctx): 

1546 if use_locking is None: 

1547 use_locking = False 

1548 use_locking = _execute.make_bool(use_locking, "use_locking") 

1549 _attr_T, _inputs_T = _execute.args_to_matching_eager([beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1550 (beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad) = _inputs_T 

1551 var = _ops.convert_to_tensor(var, _dtypes.resource) 

1552 m = _ops.convert_to_tensor(m, _dtypes.resource) 

1553 v = _ops.convert_to_tensor(v, _dtypes.resource) 

1554 vhat = _ops.convert_to_tensor(vhat, _dtypes.resource) 

1555 _inputs_flat = [var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad] 

1556 _attrs = ("T", _attr_T, "use_locking", use_locking) 

1557 _result = _execute.execute(b"ResourceApplyAdamWithAmsgrad", 0, 

1558 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1559 name=name) 

1560 _result = None 

1561 return _result 

1562 

1563 

1564def resource_apply_add_sign(var, m, lr, alpha, sign_decay, beta, grad, use_locking=False, name=None): 

1565 r"""Update '*var' according to the AddSign update. 

1566 

1567 m_t <- beta1 * m_{t-1} + (1 - beta1) * g 

1568 update <- (alpha + sign_decay * sign(g) *sign(m)) * g 

1569 variable <- variable - lr_t * update 

1570 

1571 Args: 

1572 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1573 m: A `Tensor` of type `resource`. Should be from a Variable(). 

1574 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1575 Scaling factor. Must be a scalar. 

1576 alpha: A `Tensor`. Must have the same type as `lr`. Must be a scalar. 

1577 sign_decay: A `Tensor`. Must have the same type as `lr`. Must be a scalar. 

1578 beta: A `Tensor`. Must have the same type as `lr`. Must be a scalar. 

1579 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

1580 use_locking: An optional `bool`. Defaults to `False`. 

1581 If `True`, updating of the var and m tensors is 

1582 protected by a lock; otherwise the behavior is undefined, but may exhibit less 

1583 contention. 

1584 name: A name for the operation (optional). 

1585 

1586 Returns: 

1587 The created Operation. 

1588 """ 

1589 _ctx = _context._context or _context.context() 

1590 tld = _ctx._thread_local_data 

1591 if tld.is_eager: 

1592 try: 

1593 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1594 _ctx, "ResourceApplyAddSign", name, var, m, lr, alpha, sign_decay, 

1595 beta, grad, "use_locking", use_locking) 

1596 return _result 

1597 except _core._NotOkStatusException as e: 

1598 _ops.raise_from_not_ok_status(e, name) 

1599 except _core._FallbackException: 

1600 pass 

1601 try: 

1602 return resource_apply_add_sign_eager_fallback( 

1603 var, m, lr, alpha, sign_decay, beta, grad, use_locking=use_locking, 

1604 name=name, ctx=_ctx) 

1605 except _core._SymbolicException: 

1606 pass # Add nodes to the TensorFlow graph. 

1607 # Add nodes to the TensorFlow graph. 

1608 if use_locking is None: 

1609 use_locking = False 

1610 use_locking = _execute.make_bool(use_locking, "use_locking") 

1611 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1612 "ResourceApplyAddSign", var=var, m=m, lr=lr, alpha=alpha, 

1613 sign_decay=sign_decay, beta=beta, grad=grad, 

1614 use_locking=use_locking, name=name) 

1615 return _op 

1616ResourceApplyAddSign = tf_export("raw_ops.ResourceApplyAddSign")(_ops.to_raw_op(resource_apply_add_sign)) 

1617 

1618 

1619def resource_apply_add_sign_eager_fallback(var, m, lr, alpha, sign_decay, beta, grad, use_locking, name, ctx): 

1620 if use_locking is None: 

1621 use_locking = False 

1622 use_locking = _execute.make_bool(use_locking, "use_locking") 

1623 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, alpha, sign_decay, beta, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1624 (lr, alpha, sign_decay, beta, grad) = _inputs_T 

1625 var = _ops.convert_to_tensor(var, _dtypes.resource) 

1626 m = _ops.convert_to_tensor(m, _dtypes.resource) 

1627 _inputs_flat = [var, m, lr, alpha, sign_decay, beta, grad] 

1628 _attrs = ("T", _attr_T, "use_locking", use_locking) 

1629 _result = _execute.execute(b"ResourceApplyAddSign", 0, inputs=_inputs_flat, 

1630 attrs=_attrs, ctx=ctx, name=name) 

1631 _result = None 

1632 return _result 

1633 

1634 

1635def resource_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, use_locking=False, name=None): 

1636 r"""Update '*var' according to the centered RMSProp algorithm. 

1637 

1638 The centered RMSProp algorithm uses an estimate of the centered second moment 

1639 (i.e., the variance) for normalization, as opposed to regular RMSProp, which 

1640 uses the (uncentered) second moment. This often helps with training, but is 

1641 slightly more expensive in terms of computation and memory. 

1642 

1643 Note that in dense implementation of this algorithm, mg, ms, and mom will 

1644 update even if the grad is zero, but in this sparse implementation, mg, ms, 

1645 and mom will not update in iterations during which the grad is zero. 

1646 

1647 mean_square = decay * mean_square + (1-decay) * gradient ** 2 

1648 mean_grad = decay * mean_grad + (1-decay) * gradient 

1649 

1650 Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) 

1651 

1652 mg <- rho * mg_{t-1} + (1-rho) * grad 

1653 ms <- rho * ms_{t-1} + (1-rho) * grad * grad 

1654 mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) 

1655 var <- var - mom 

1656 

1657 Args: 

1658 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1659 mg: A `Tensor` of type `resource`. Should be from a Variable(). 

1660 ms: A `Tensor` of type `resource`. Should be from a Variable(). 

1661 mom: A `Tensor` of type `resource`. Should be from a Variable(). 

1662 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1663 Scaling factor. Must be a scalar. 

1664 rho: A `Tensor`. Must have the same type as `lr`. 

1665 Decay rate. Must be a scalar. 

1666 momentum: A `Tensor`. Must have the same type as `lr`. 

1667 Momentum Scale. Must be a scalar. 

1668 epsilon: A `Tensor`. Must have the same type as `lr`. 

1669 Ridge term. Must be a scalar. 

1670 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

1671 use_locking: An optional `bool`. Defaults to `False`. 

1672 If `True`, updating of the var, mg, ms, and mom tensors is 

1673 protected by a lock; otherwise the behavior is undefined, but may exhibit less 

1674 contention. 

1675 name: A name for the operation (optional). 

1676 

1677 Returns: 

1678 The created Operation. 

1679 """ 

1680 _ctx = _context._context or _context.context() 

1681 tld = _ctx._thread_local_data 

1682 if tld.is_eager: 

1683 try: 

1684 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1685 _ctx, "ResourceApplyCenteredRMSProp", name, var, mg, ms, mom, lr, rho, 

1686 momentum, epsilon, grad, "use_locking", use_locking) 

1687 return _result 

1688 except _core._NotOkStatusException as e: 

1689 _ops.raise_from_not_ok_status(e, name) 

1690 except _core._FallbackException: 

1691 pass 

1692 try: 

1693 return resource_apply_centered_rms_prop_eager_fallback( 

1694 var, mg, ms, mom, lr, rho, momentum, epsilon, grad, 

1695 use_locking=use_locking, name=name, ctx=_ctx) 

1696 except _core._SymbolicException: 

1697 pass # Add nodes to the TensorFlow graph. 

1698 # Add nodes to the TensorFlow graph. 

1699 if use_locking is None: 

1700 use_locking = False 

1701 use_locking = _execute.make_bool(use_locking, "use_locking") 

1702 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1703 "ResourceApplyCenteredRMSProp", var=var, mg=mg, ms=ms, mom=mom, lr=lr, 

1704 rho=rho, momentum=momentum, 

1705 epsilon=epsilon, grad=grad, 

1706 use_locking=use_locking, name=name) 

1707 return _op 

1708ResourceApplyCenteredRMSProp = tf_export("raw_ops.ResourceApplyCenteredRMSProp")(_ops.to_raw_op(resource_apply_centered_rms_prop)) 

1709 

1710 

1711def resource_apply_centered_rms_prop_eager_fallback(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, use_locking, name, ctx): 

1712 if use_locking is None: 

1713 use_locking = False 

1714 use_locking = _execute.make_bool(use_locking, "use_locking") 

1715 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, momentum, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1716 (lr, rho, momentum, epsilon, grad) = _inputs_T 

1717 var = _ops.convert_to_tensor(var, _dtypes.resource) 

1718 mg = _ops.convert_to_tensor(mg, _dtypes.resource) 

1719 ms = _ops.convert_to_tensor(ms, _dtypes.resource) 

1720 mom = _ops.convert_to_tensor(mom, _dtypes.resource) 

1721 _inputs_flat = [var, mg, ms, mom, lr, rho, momentum, epsilon, grad] 

1722 _attrs = ("T", _attr_T, "use_locking", use_locking) 

1723 _result = _execute.execute(b"ResourceApplyCenteredRMSProp", 0, 

1724 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1725 name=name) 

1726 _result = None 

1727 return _result 

1728 

1729 

1730def resource_apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power, use_locking=False, multiply_linear_by_lr=False, name=None): 

1731 r"""Update '*var' according to the Ftrl-proximal scheme. 

1732 

1733 accum_new = accum + grad * grad 

1734 linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var 

1735 quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 

1736 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 

1737 accum = accum_new 

1738 

1739 Args: 

1740 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1741 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

1742 linear: A `Tensor` of type `resource`. Should be from a Variable(). 

1743 grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1744 The gradient. 

1745 lr: A `Tensor`. Must have the same type as `grad`. 

1746 Scaling factor. Must be a scalar. 

1747 l1: A `Tensor`. Must have the same type as `grad`. 

1748 L1 regularization. Must be a scalar. 

1749 l2: A `Tensor`. Must have the same type as `grad`. 

1750 L2 regularization. Must be a scalar. 

1751 lr_power: A `Tensor`. Must have the same type as `grad`. 

1752 Scaling factor. Must be a scalar. 

1753 use_locking: An optional `bool`. Defaults to `False`. 

1754 If `True`, updating of the var and accum tensors will be protected 

1755 by a lock; otherwise the behavior is undefined, but may exhibit less 

1756 contention. 

1757 multiply_linear_by_lr: An optional `bool`. Defaults to `False`. 

1758 name: A name for the operation (optional). 

1759 

1760 Returns: 

1761 The created Operation. 

1762 """ 

1763 _ctx = _context._context or _context.context() 

1764 tld = _ctx._thread_local_data 

1765 if tld.is_eager: 

1766 try: 

1767 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1768 _ctx, "ResourceApplyFtrl", name, var, accum, linear, grad, lr, l1, l2, 

1769 lr_power, "use_locking", use_locking, "multiply_linear_by_lr", 

1770 multiply_linear_by_lr) 

1771 return _result 

1772 except _core._NotOkStatusException as e: 

1773 _ops.raise_from_not_ok_status(e, name) 

1774 except _core._FallbackException: 

1775 pass 

1776 try: 

1777 return resource_apply_ftrl_eager_fallback( 

1778 var, accum, linear, grad, lr, l1, l2, lr_power, 

1779 use_locking=use_locking, 

1780 multiply_linear_by_lr=multiply_linear_by_lr, name=name, ctx=_ctx) 

1781 except _core._SymbolicException: 

1782 pass # Add nodes to the TensorFlow graph. 

1783 # Add nodes to the TensorFlow graph. 

1784 if use_locking is None: 

1785 use_locking = False 

1786 use_locking = _execute.make_bool(use_locking, "use_locking") 

1787 if multiply_linear_by_lr is None: 

1788 multiply_linear_by_lr = False 

1789 multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") 

1790 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1791 "ResourceApplyFtrl", var=var, accum=accum, linear=linear, grad=grad, 

1792 lr=lr, l1=l1, l2=l2, lr_power=lr_power, 

1793 use_locking=use_locking, 

1794 multiply_linear_by_lr=multiply_linear_by_lr, 

1795 name=name) 

1796 return _op 

1797ResourceApplyFtrl = tf_export("raw_ops.ResourceApplyFtrl")(_ops.to_raw_op(resource_apply_ftrl)) 

1798 

1799 

1800def resource_apply_ftrl_eager_fallback(var, accum, linear, grad, lr, l1, l2, lr_power, use_locking, multiply_linear_by_lr, name, ctx): 

1801 if use_locking is None: 

1802 use_locking = False 

1803 use_locking = _execute.make_bool(use_locking, "use_locking") 

1804 if multiply_linear_by_lr is None: 

1805 multiply_linear_by_lr = False 

1806 multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") 

1807 _attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2, lr_power], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1808 (grad, lr, l1, l2, lr_power) = _inputs_T 

1809 var = _ops.convert_to_tensor(var, _dtypes.resource) 

1810 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

1811 linear = _ops.convert_to_tensor(linear, _dtypes.resource) 

1812 _inputs_flat = [var, accum, linear, grad, lr, l1, l2, lr_power] 

1813 _attrs = ("T", _attr_T, "use_locking", use_locking, "multiply_linear_by_lr", 

1814 multiply_linear_by_lr) 

1815 _result = _execute.execute(b"ResourceApplyFtrl", 0, inputs=_inputs_flat, 

1816 attrs=_attrs, ctx=ctx, name=name) 

1817 _result = None 

1818 return _result 

1819 

1820 

1821def resource_apply_ftrl_v2(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False, multiply_linear_by_lr=False, name=None): 

1822 r"""Update '*var' according to the Ftrl-proximal scheme. 

1823 

1824 accum_new = accum + grad * grad 

1825 grad_with_shrinkage = grad + 2 * l2_shrinkage * var 

1826 linear += grad_with_shrinkage + 

1827 (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var 

1828 quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 

1829 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 

1830 accum = accum_new 

1831 

1832 Args: 

1833 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1834 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

1835 linear: A `Tensor` of type `resource`. Should be from a Variable(). 

1836 grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1837 The gradient. 

1838 lr: A `Tensor`. Must have the same type as `grad`. 

1839 Scaling factor. Must be a scalar. 

1840 l1: A `Tensor`. Must have the same type as `grad`. 

1841 L1 regularization. Must be a scalar. 

1842 l2: A `Tensor`. Must have the same type as `grad`. 

1843 L2 shrinkage regularization. Must be a scalar. 

1844 l2_shrinkage: A `Tensor`. Must have the same type as `grad`. 

1845 lr_power: A `Tensor`. Must have the same type as `grad`. 

1846 Scaling factor. Must be a scalar. 

1847 use_locking: An optional `bool`. Defaults to `False`. 

1848 If `True`, updating of the var and accum tensors will be protected 

1849 by a lock; otherwise the behavior is undefined, but may exhibit less 

1850 contention. 

1851 multiply_linear_by_lr: An optional `bool`. Defaults to `False`. 

1852 name: A name for the operation (optional). 

1853 

1854 Returns: 

1855 The created Operation. 

1856 """ 

1857 _ctx = _context._context or _context.context() 

1858 tld = _ctx._thread_local_data 

1859 if tld.is_eager: 

1860 try: 

1861 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1862 _ctx, "ResourceApplyFtrlV2", name, var, accum, linear, grad, lr, l1, 

1863 l2, l2_shrinkage, lr_power, "use_locking", use_locking, 

1864 "multiply_linear_by_lr", multiply_linear_by_lr) 

1865 return _result 

1866 except _core._NotOkStatusException as e: 

1867 _ops.raise_from_not_ok_status(e, name) 

1868 except _core._FallbackException: 

1869 pass 

1870 try: 

1871 return resource_apply_ftrl_v2_eager_fallback( 

1872 var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, 

1873 use_locking=use_locking, 

1874 multiply_linear_by_lr=multiply_linear_by_lr, name=name, ctx=_ctx) 

1875 except _core._SymbolicException: 

1876 pass # Add nodes to the TensorFlow graph. 

1877 # Add nodes to the TensorFlow graph. 

1878 if use_locking is None: 

1879 use_locking = False 

1880 use_locking = _execute.make_bool(use_locking, "use_locking") 

1881 if multiply_linear_by_lr is None: 

1882 multiply_linear_by_lr = False 

1883 multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") 

1884 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1885 "ResourceApplyFtrlV2", var=var, accum=accum, linear=linear, grad=grad, 

1886 lr=lr, l1=l1, l2=l2, l2_shrinkage=l2_shrinkage, 

1887 lr_power=lr_power, use_locking=use_locking, 

1888 multiply_linear_by_lr=multiply_linear_by_lr, 

1889 name=name) 

1890 return _op 

1891ResourceApplyFtrlV2 = tf_export("raw_ops.ResourceApplyFtrlV2")(_ops.to_raw_op(resource_apply_ftrl_v2)) 

1892 

1893 

1894def resource_apply_ftrl_v2_eager_fallback(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power, use_locking, multiply_linear_by_lr, name, ctx): 

1895 if use_locking is None: 

1896 use_locking = False 

1897 use_locking = _execute.make_bool(use_locking, "use_locking") 

1898 if multiply_linear_by_lr is None: 

1899 multiply_linear_by_lr = False 

1900 multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") 

1901 _attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2, l2_shrinkage, lr_power], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1902 (grad, lr, l1, l2, l2_shrinkage, lr_power) = _inputs_T 

1903 var = _ops.convert_to_tensor(var, _dtypes.resource) 

1904 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

1905 linear = _ops.convert_to_tensor(linear, _dtypes.resource) 

1906 _inputs_flat = [var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power] 

1907 _attrs = ("T", _attr_T, "use_locking", use_locking, "multiply_linear_by_lr", 

1908 multiply_linear_by_lr) 

1909 _result = _execute.execute(b"ResourceApplyFtrlV2", 0, inputs=_inputs_flat, 

1910 attrs=_attrs, ctx=ctx, name=name) 

1911 _result = None 

1912 return _result 

1913 

1914 

1915def resource_apply_gradient_descent(var, alpha, delta, use_locking=False, name=None): 

1916 r"""Update '*var' by subtracting 'alpha' * 'delta' from it. 

1917 

1918 Args: 

1919 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1920 alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1921 Scaling factor. Must be a scalar. 

1922 delta: A `Tensor`. Must have the same type as `alpha`. The change. 

1923 use_locking: An optional `bool`. Defaults to `False`. 

1924 If `True`, the subtraction will be protected by a lock; 

1925 otherwise the behavior is undefined, but may exhibit less contention. 

1926 name: A name for the operation (optional). 

1927 

1928 Returns: 

1929 The created Operation. 

1930 """ 

1931 _ctx = _context._context or _context.context() 

1932 tld = _ctx._thread_local_data 

1933 if tld.is_eager: 

1934 try: 

1935 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1936 _ctx, "ResourceApplyGradientDescent", name, var, alpha, delta, 

1937 "use_locking", use_locking) 

1938 return _result 

1939 except _core._NotOkStatusException as e: 

1940 _ops.raise_from_not_ok_status(e, name) 

1941 except _core._FallbackException: 

1942 pass 

1943 try: 

1944 return resource_apply_gradient_descent_eager_fallback( 

1945 var, alpha, delta, use_locking=use_locking, name=name, ctx=_ctx) 

1946 except _core._SymbolicException: 

1947 pass # Add nodes to the TensorFlow graph. 

1948 # Add nodes to the TensorFlow graph. 

1949 if use_locking is None: 

1950 use_locking = False 

1951 use_locking = _execute.make_bool(use_locking, "use_locking") 

1952 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1953 "ResourceApplyGradientDescent", var=var, alpha=alpha, delta=delta, 

1954 use_locking=use_locking, name=name) 

1955 return _op 

1956ResourceApplyGradientDescent = tf_export("raw_ops.ResourceApplyGradientDescent")(_ops.to_raw_op(resource_apply_gradient_descent)) 

1957 

1958 

1959def resource_apply_gradient_descent_eager_fallback(var, alpha, delta, use_locking, name, ctx): 

1960 if use_locking is None: 

1961 use_locking = False 

1962 use_locking = _execute.make_bool(use_locking, "use_locking") 

1963 _attr_T, _inputs_T = _execute.args_to_matching_eager([alpha, delta], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1964 (alpha, delta) = _inputs_T 

1965 var = _ops.convert_to_tensor(var, _dtypes.resource) 

1966 _inputs_flat = [var, alpha, delta] 

1967 _attrs = ("T", _attr_T, "use_locking", use_locking) 

1968 _result = _execute.execute(b"ResourceApplyGradientDescent", 0, 

1969 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1970 name=name) 

1971 _result = None 

1972 return _result 

1973 

1974 

1975def resource_apply_keras_momentum(var, accum, lr, grad, momentum, use_locking=False, use_nesterov=False, name=None): 

1976 r"""Update '*var' according to the momentum scheme. 

1977 

1978 Set use_nesterov = True if you want to use Nesterov momentum. 

1979 

1980 accum = accum * momentum - lr * grad 

1981 var += accum 

1982 

1983 Args: 

1984 var: A `Tensor` of type `resource`. Should be from a Variable(). 

1985 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

1986 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1987 Scaling factor. Must be a scalar. 

1988 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

1989 momentum: A `Tensor`. Must have the same type as `lr`. 

1990 Momentum. Must be a scalar. 

1991 use_locking: An optional `bool`. Defaults to `False`. 

1992 If `True`, updating of the var and accum tensors will be protected 

1993 by a lock; otherwise the behavior is undefined, but may exhibit less 

1994 contention. 

1995 use_nesterov: An optional `bool`. Defaults to `False`. 

1996 If `True`, the tensor passed to compute grad will be 

1997 var + momentum * accum, so in the end, the var you get is actually 

1998 var + momentum * accum. 

1999 name: A name for the operation (optional). 

2000 

2001 Returns: 

2002 The created Operation. 

2003 """ 

2004 _ctx = _context._context or _context.context() 

2005 tld = _ctx._thread_local_data 

2006 if tld.is_eager: 

2007 try: 

2008 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2009 _ctx, "ResourceApplyKerasMomentum", name, var, accum, lr, grad, 

2010 momentum, "use_locking", use_locking, "use_nesterov", use_nesterov) 

2011 return _result 

2012 except _core._NotOkStatusException as e: 

2013 _ops.raise_from_not_ok_status(e, name) 

2014 except _core._FallbackException: 

2015 pass 

2016 try: 

2017 return resource_apply_keras_momentum_eager_fallback( 

2018 var, accum, lr, grad, momentum, use_locking=use_locking, 

2019 use_nesterov=use_nesterov, name=name, ctx=_ctx) 

2020 except _core._SymbolicException: 

2021 pass # Add nodes to the TensorFlow graph. 

2022 # Add nodes to the TensorFlow graph. 

2023 if use_locking is None: 

2024 use_locking = False 

2025 use_locking = _execute.make_bool(use_locking, "use_locking") 

2026 if use_nesterov is None: 

2027 use_nesterov = False 

2028 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

2029 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2030 "ResourceApplyKerasMomentum", var=var, accum=accum, lr=lr, grad=grad, 

2031 momentum=momentum, 

2032 use_locking=use_locking, 

2033 use_nesterov=use_nesterov, name=name) 

2034 return _op 

2035ResourceApplyKerasMomentum = tf_export("raw_ops.ResourceApplyKerasMomentum")(_ops.to_raw_op(resource_apply_keras_momentum)) 

2036 

2037 

2038def resource_apply_keras_momentum_eager_fallback(var, accum, lr, grad, momentum, use_locking, use_nesterov, name, ctx): 

2039 if use_locking is None: 

2040 use_locking = False 

2041 use_locking = _execute.make_bool(use_locking, "use_locking") 

2042 if use_nesterov is None: 

2043 use_nesterov = False 

2044 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

2045 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad, momentum], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

2046 (lr, grad, momentum) = _inputs_T 

2047 var = _ops.convert_to_tensor(var, _dtypes.resource) 

2048 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

2049 _inputs_flat = [var, accum, lr, grad, momentum] 

2050 _attrs = ("T", _attr_T, "use_locking", use_locking, "use_nesterov", 

2051 use_nesterov) 

2052 _result = _execute.execute(b"ResourceApplyKerasMomentum", 0, 

2053 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2054 name=name) 

2055 _result = None 

2056 return _result 

2057 

2058 

2059def resource_apply_momentum(var, accum, lr, grad, momentum, use_locking=False, use_nesterov=False, name=None): 

2060 r"""Update '*var' according to the momentum scheme. 

2061 

2062 Set use_nesterov = True if you want to use Nesterov momentum. 

2063 

2064 accum = accum * momentum + grad 

2065 var -= lr * accum 

2066 

2067 Args: 

2068 var: A `Tensor` of type `resource`. Should be from a Variable(). 

2069 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

2070 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

2071 Scaling factor. Must be a scalar. 

2072 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

2073 momentum: A `Tensor`. Must have the same type as `lr`. 

2074 Momentum. Must be a scalar. 

2075 use_locking: An optional `bool`. Defaults to `False`. 

2076 If `True`, updating of the var and accum tensors will be protected 

2077 by a lock; otherwise the behavior is undefined, but may exhibit less 

2078 contention. 

2079 use_nesterov: An optional `bool`. Defaults to `False`. 

2080 If `True`, the tensor passed to compute grad will be 

2081 var - lr * momentum * accum, so in the end, the var you get is actually 

2082 var - lr * momentum * accum. 

2083 name: A name for the operation (optional). 

2084 

2085 Returns: 

2086 The created Operation. 

2087 """ 

2088 _ctx = _context._context or _context.context() 

2089 tld = _ctx._thread_local_data 

2090 if tld.is_eager: 

2091 try: 

2092 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2093 _ctx, "ResourceApplyMomentum", name, var, accum, lr, grad, momentum, 

2094 "use_locking", use_locking, "use_nesterov", use_nesterov) 

2095 return _result 

2096 except _core._NotOkStatusException as e: 

2097 _ops.raise_from_not_ok_status(e, name) 

2098 except _core._FallbackException: 

2099 pass 

2100 try: 

2101 return resource_apply_momentum_eager_fallback( 

2102 var, accum, lr, grad, momentum, use_locking=use_locking, 

2103 use_nesterov=use_nesterov, name=name, ctx=_ctx) 

2104 except _core._SymbolicException: 

2105 pass # Add nodes to the TensorFlow graph. 

2106 # Add nodes to the TensorFlow graph. 

2107 if use_locking is None: 

2108 use_locking = False 

2109 use_locking = _execute.make_bool(use_locking, "use_locking") 

2110 if use_nesterov is None: 

2111 use_nesterov = False 

2112 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

2113 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2114 "ResourceApplyMomentum", var=var, accum=accum, lr=lr, grad=grad, 

2115 momentum=momentum, use_locking=use_locking, 

2116 use_nesterov=use_nesterov, name=name) 

2117 return _op 

2118ResourceApplyMomentum = tf_export("raw_ops.ResourceApplyMomentum")(_ops.to_raw_op(resource_apply_momentum)) 

2119 

2120 

2121def resource_apply_momentum_eager_fallback(var, accum, lr, grad, momentum, use_locking, use_nesterov, name, ctx): 

2122 if use_locking is None: 

2123 use_locking = False 

2124 use_locking = _execute.make_bool(use_locking, "use_locking") 

2125 if use_nesterov is None: 

2126 use_nesterov = False 

2127 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

2128 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad, momentum], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

2129 (lr, grad, momentum) = _inputs_T 

2130 var = _ops.convert_to_tensor(var, _dtypes.resource) 

2131 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

2132 _inputs_flat = [var, accum, lr, grad, momentum] 

2133 _attrs = ("T", _attr_T, "use_locking", use_locking, "use_nesterov", 

2134 use_nesterov) 

2135 _result = _execute.execute(b"ResourceApplyMomentum", 0, inputs=_inputs_flat, 

2136 attrs=_attrs, ctx=ctx, name=name) 

2137 _result = None 

2138 return _result 

2139 

2140 

2141def resource_apply_power_sign(var, m, lr, logbase, sign_decay, beta, grad, use_locking=False, name=None): 

2142 r"""Update '*var' according to the AddSign update. 

2143 

2144 m_t <- beta1 * m_{t-1} + (1 - beta1) * g 

2145 update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g 

2146 variable <- variable - lr_t * update 

2147 

2148 Args: 

2149 var: A `Tensor` of type `resource`. Should be from a Variable(). 

2150 m: A `Tensor` of type `resource`. Should be from a Variable(). 

2151 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

2152 Scaling factor. Must be a scalar. 

2153 logbase: A `Tensor`. Must have the same type as `lr`. Must be a scalar. 

2154 sign_decay: A `Tensor`. Must have the same type as `lr`. Must be a scalar. 

2155 beta: A `Tensor`. Must have the same type as `lr`. Must be a scalar. 

2156 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

2157 use_locking: An optional `bool`. Defaults to `False`. 

2158 If `True`, updating of the var and m tensors is 

2159 protected by a lock; otherwise the behavior is undefined, but may exhibit less 

2160 contention. 

2161 name: A name for the operation (optional). 

2162 

2163 Returns: 

2164 The created Operation. 

2165 """ 

2166 _ctx = _context._context or _context.context() 

2167 tld = _ctx._thread_local_data 

2168 if tld.is_eager: 

2169 try: 

2170 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2171 _ctx, "ResourceApplyPowerSign", name, var, m, lr, logbase, sign_decay, 

2172 beta, grad, "use_locking", use_locking) 

2173 return _result 

2174 except _core._NotOkStatusException as e: 

2175 _ops.raise_from_not_ok_status(e, name) 

2176 except _core._FallbackException: 

2177 pass 

2178 try: 

2179 return resource_apply_power_sign_eager_fallback( 

2180 var, m, lr, logbase, sign_decay, beta, grad, 

2181 use_locking=use_locking, name=name, ctx=_ctx) 

2182 except _core._SymbolicException: 

2183 pass # Add nodes to the TensorFlow graph. 

2184 # Add nodes to the TensorFlow graph. 

2185 if use_locking is None: 

2186 use_locking = False 

2187 use_locking = _execute.make_bool(use_locking, "use_locking") 

2188 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2189 "ResourceApplyPowerSign", var=var, m=m, lr=lr, logbase=logbase, 

2190 sign_decay=sign_decay, beta=beta, grad=grad, 

2191 use_locking=use_locking, name=name) 

2192 return _op 

2193ResourceApplyPowerSign = tf_export("raw_ops.ResourceApplyPowerSign")(_ops.to_raw_op(resource_apply_power_sign)) 

2194 

2195 

2196def resource_apply_power_sign_eager_fallback(var, m, lr, logbase, sign_decay, beta, grad, use_locking, name, ctx): 

2197 if use_locking is None: 

2198 use_locking = False 

2199 use_locking = _execute.make_bool(use_locking, "use_locking") 

2200 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, logbase, sign_decay, beta, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

2201 (lr, logbase, sign_decay, beta, grad) = _inputs_T 

2202 var = _ops.convert_to_tensor(var, _dtypes.resource) 

2203 m = _ops.convert_to_tensor(m, _dtypes.resource) 

2204 _inputs_flat = [var, m, lr, logbase, sign_decay, beta, grad] 

2205 _attrs = ("T", _attr_T, "use_locking", use_locking) 

2206 _result = _execute.execute(b"ResourceApplyPowerSign", 0, 

2207 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2208 name=name) 

2209 _result = None 

2210 return _result 

2211 

2212 

2213def resource_apply_proximal_adagrad(var, accum, lr, l1, l2, grad, use_locking=False, name=None): 

2214 r"""Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. 

2215 

2216 accum += grad * grad 

2217 prox_v = var - lr * grad * (1 / sqrt(accum)) 

2218 var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} 

2219 

2220 Args: 

2221 var: A `Tensor` of type `resource`. Should be from a Variable(). 

2222 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

2223 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

2224 Scaling factor. Must be a scalar. 

2225 l1: A `Tensor`. Must have the same type as `lr`. 

2226 L1 regularization. Must be a scalar. 

2227 l2: A `Tensor`. Must have the same type as `lr`. 

2228 L2 regularization. Must be a scalar. 

2229 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

2230 use_locking: An optional `bool`. Defaults to `False`. 

2231 If True, updating of the var and accum tensors will be protected by 

2232 a lock; otherwise the behavior is undefined, but may exhibit less contention. 

2233 name: A name for the operation (optional). 

2234 

2235 Returns: 

2236 The created Operation. 

2237 """ 

2238 _ctx = _context._context or _context.context() 

2239 tld = _ctx._thread_local_data 

2240 if tld.is_eager: 

2241 try: 

2242 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2243 _ctx, "ResourceApplyProximalAdagrad", name, var, accum, lr, l1, l2, 

2244 grad, "use_locking", use_locking) 

2245 return _result 

2246 except _core._NotOkStatusException as e: 

2247 _ops.raise_from_not_ok_status(e, name) 

2248 except _core._FallbackException: 

2249 pass 

2250 try: 

2251 return resource_apply_proximal_adagrad_eager_fallback( 

2252 var, accum, lr, l1, l2, grad, use_locking=use_locking, name=name, 

2253 ctx=_ctx) 

2254 except _core._SymbolicException: 

2255 pass # Add nodes to the TensorFlow graph. 

2256 # Add nodes to the TensorFlow graph. 

2257 if use_locking is None: 

2258 use_locking = False 

2259 use_locking = _execute.make_bool(use_locking, "use_locking") 

2260 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2261 "ResourceApplyProximalAdagrad", var=var, accum=accum, lr=lr, l1=l1, 

2262 l2=l2, grad=grad, 

2263 use_locking=use_locking, name=name) 

2264 return _op 

2265ResourceApplyProximalAdagrad = tf_export("raw_ops.ResourceApplyProximalAdagrad")(_ops.to_raw_op(resource_apply_proximal_adagrad)) 

2266 

2267 

2268def resource_apply_proximal_adagrad_eager_fallback(var, accum, lr, l1, l2, grad, use_locking, name, ctx): 

2269 if use_locking is None: 

2270 use_locking = False 

2271 use_locking = _execute.make_bool(use_locking, "use_locking") 

2272 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, l1, l2, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

2273 (lr, l1, l2, grad) = _inputs_T 

2274 var = _ops.convert_to_tensor(var, _dtypes.resource) 

2275 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

2276 _inputs_flat = [var, accum, lr, l1, l2, grad] 

2277 _attrs = ("T", _attr_T, "use_locking", use_locking) 

2278 _result = _execute.execute(b"ResourceApplyProximalAdagrad", 0, 

2279 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2280 name=name) 

2281 _result = None 

2282 return _result 

2283 

2284 

2285def resource_apply_proximal_gradient_descent(var, alpha, l1, l2, delta, use_locking=False, name=None): 

2286 r"""Update '*var' as FOBOS algorithm with fixed learning rate. 

2287 

2288 prox_v = var - alpha * delta 

2289 var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} 

2290 

2291 Args: 

2292 var: A `Tensor` of type `resource`. Should be from a Variable(). 

2293 alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

2294 Scaling factor. Must be a scalar. 

2295 l1: A `Tensor`. Must have the same type as `alpha`. 

2296 L1 regularization. Must be a scalar. 

2297 l2: A `Tensor`. Must have the same type as `alpha`. 

2298 L2 regularization. Must be a scalar. 

2299 delta: A `Tensor`. Must have the same type as `alpha`. The change. 

2300 use_locking: An optional `bool`. Defaults to `False`. 

2301 If True, the subtraction will be protected by a lock; 

2302 otherwise the behavior is undefined, but may exhibit less contention. 

2303 name: A name for the operation (optional). 

2304 

2305 Returns: 

2306 The created Operation. 

2307 """ 

2308 _ctx = _context._context or _context.context() 

2309 tld = _ctx._thread_local_data 

2310 if tld.is_eager: 

2311 try: 

2312 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2313 _ctx, "ResourceApplyProximalGradientDescent", name, var, alpha, l1, 

2314 l2, delta, "use_locking", use_locking) 

2315 return _result 

2316 except _core._NotOkStatusException as e: 

2317 _ops.raise_from_not_ok_status(e, name) 

2318 except _core._FallbackException: 

2319 pass 

2320 try: 

2321 return resource_apply_proximal_gradient_descent_eager_fallback( 

2322 var, alpha, l1, l2, delta, use_locking=use_locking, name=name, 

2323 ctx=_ctx) 

2324 except _core._SymbolicException: 

2325 pass # Add nodes to the TensorFlow graph. 

2326 # Add nodes to the TensorFlow graph. 

2327 if use_locking is None: 

2328 use_locking = False 

2329 use_locking = _execute.make_bool(use_locking, "use_locking") 

2330 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2331 "ResourceApplyProximalGradientDescent", var=var, alpha=alpha, l1=l1, 

2332 l2=l2, delta=delta, 

2333 use_locking=use_locking, 

2334 name=name) 

2335 return _op 

2336ResourceApplyProximalGradientDescent = tf_export("raw_ops.ResourceApplyProximalGradientDescent")(_ops.to_raw_op(resource_apply_proximal_gradient_descent)) 

2337 

2338 

2339def resource_apply_proximal_gradient_descent_eager_fallback(var, alpha, l1, l2, delta, use_locking, name, ctx): 

2340 if use_locking is None: 

2341 use_locking = False 

2342 use_locking = _execute.make_bool(use_locking, "use_locking") 

2343 _attr_T, _inputs_T = _execute.args_to_matching_eager([alpha, l1, l2, delta], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

2344 (alpha, l1, l2, delta) = _inputs_T 

2345 var = _ops.convert_to_tensor(var, _dtypes.resource) 

2346 _inputs_flat = [var, alpha, l1, l2, delta] 

2347 _attrs = ("T", _attr_T, "use_locking", use_locking) 

2348 _result = _execute.execute(b"ResourceApplyProximalGradientDescent", 0, 

2349 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2350 name=name) 

2351 _result = None 

2352 return _result 

2353 

2354 

2355def resource_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad, use_locking=False, name=None): 

2356 r"""Update '*var' according to the RMSProp algorithm. 

2357 

2358 Note that in dense implementation of this algorithm, ms and mom will 

2359 update even if the grad is zero, but in this sparse implementation, ms 

2360 and mom will not update in iterations during which the grad is zero. 

2361 

2362 mean_square = decay * mean_square + (1-decay) * gradient ** 2 

2363 Delta = learning_rate * gradient / sqrt(mean_square + epsilon) 

2364 

2365 ms <- rho * ms_{t-1} + (1-rho) * grad * grad 

2366 mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) 

2367 var <- var - mom 

2368 

2369 Args: 

2370 var: A `Tensor` of type `resource`. Should be from a Variable(). 

2371 ms: A `Tensor` of type `resource`. Should be from a Variable(). 

2372 mom: A `Tensor` of type `resource`. Should be from a Variable(). 

2373 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

2374 Scaling factor. Must be a scalar. 

2375 rho: A `Tensor`. Must have the same type as `lr`. 

2376 Decay rate. Must be a scalar. 

2377 momentum: A `Tensor`. Must have the same type as `lr`. 

2378 epsilon: A `Tensor`. Must have the same type as `lr`. 

2379 Ridge term. Must be a scalar. 

2380 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

2381 use_locking: An optional `bool`. Defaults to `False`. 

2382 If `True`, updating of the var, ms, and mom tensors is protected 

2383 by a lock; otherwise the behavior is undefined, but may exhibit less 

2384 contention. 

2385 name: A name for the operation (optional). 

2386 

2387 Returns: 

2388 The created Operation. 

2389 """ 

2390 _ctx = _context._context or _context.context() 

2391 tld = _ctx._thread_local_data 

2392 if tld.is_eager: 

2393 try: 

2394 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2395 _ctx, "ResourceApplyRMSProp", name, var, ms, mom, lr, rho, momentum, 

2396 epsilon, grad, "use_locking", use_locking) 

2397 return _result 

2398 except _core._NotOkStatusException as e: 

2399 _ops.raise_from_not_ok_status(e, name) 

2400 except _core._FallbackException: 

2401 pass 

2402 try: 

2403 return resource_apply_rms_prop_eager_fallback( 

2404 var, ms, mom, lr, rho, momentum, epsilon, grad, 

2405 use_locking=use_locking, name=name, ctx=_ctx) 

2406 except _core._SymbolicException: 

2407 pass # Add nodes to the TensorFlow graph. 

2408 # Add nodes to the TensorFlow graph. 

2409 if use_locking is None: 

2410 use_locking = False 

2411 use_locking = _execute.make_bool(use_locking, "use_locking") 

2412 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2413 "ResourceApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr, rho=rho, 

2414 momentum=momentum, epsilon=epsilon, grad=grad, 

2415 use_locking=use_locking, name=name) 

2416 return _op 

2417ResourceApplyRMSProp = tf_export("raw_ops.ResourceApplyRMSProp")(_ops.to_raw_op(resource_apply_rms_prop)) 

2418 

2419 

2420def resource_apply_rms_prop_eager_fallback(var, ms, mom, lr, rho, momentum, epsilon, grad, use_locking, name, ctx): 

2421 if use_locking is None: 

2422 use_locking = False 

2423 use_locking = _execute.make_bool(use_locking, "use_locking") 

2424 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, momentum, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

2425 (lr, rho, momentum, epsilon, grad) = _inputs_T 

2426 var = _ops.convert_to_tensor(var, _dtypes.resource) 

2427 ms = _ops.convert_to_tensor(ms, _dtypes.resource) 

2428 mom = _ops.convert_to_tensor(mom, _dtypes.resource) 

2429 _inputs_flat = [var, ms, mom, lr, rho, momentum, epsilon, grad] 

2430 _attrs = ("T", _attr_T, "use_locking", use_locking) 

2431 _result = _execute.execute(b"ResourceApplyRMSProp", 0, inputs=_inputs_flat, 

2432 attrs=_attrs, ctx=ctx, name=name) 

2433 _result = None 

2434 return _result 

2435 

2436 

2437def resource_sparse_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad, indices, use_locking=False, name=None): 

2438 r"""var: Should be from a Variable(). 

2439 

2440 Args: 

2441 var: A `Tensor` of type `resource`. 

2442 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

2443 accum_update: A `Tensor` of type `resource`. 

2444 : Should be from a Variable(). 

2445 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

2446 Learning rate. Must be a scalar. 

2447 rho: A `Tensor`. Must have the same type as `lr`. 

2448 Decay factor. Must be a scalar. 

2449 epsilon: A `Tensor`. Must have the same type as `lr`. 

2450 Constant factor. Must be a scalar. 

2451 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

2452 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

2453 A vector of indices into the first dimension of var and accum. 

2454 use_locking: An optional `bool`. Defaults to `False`. 

2455 If True, updating of the var and accum tensors will be protected by 

2456 a lock; otherwise the behavior is undefined, but may exhibit less contention. 

2457 name: A name for the operation (optional). 

2458 

2459 Returns: 

2460 The created Operation. 

2461 """ 

2462 _ctx = _context._context or _context.context() 

2463 tld = _ctx._thread_local_data 

2464 if tld.is_eager: 

2465 try: 

2466 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2467 _ctx, "ResourceSparseApplyAdadelta", name, var, accum, accum_update, 

2468 lr, rho, epsilon, grad, indices, "use_locking", use_locking) 

2469 return _result 

2470 except _core._NotOkStatusException as e: 

2471 _ops.raise_from_not_ok_status(e, name) 

2472 except _core._FallbackException: 

2473 pass 

2474 try: 

2475 return resource_sparse_apply_adadelta_eager_fallback( 

2476 var, accum, accum_update, lr, rho, epsilon, grad, indices, 

2477 use_locking=use_locking, name=name, ctx=_ctx) 

2478 except _core._SymbolicException: 

2479 pass # Add nodes to the TensorFlow graph. 

2480 # Add nodes to the TensorFlow graph. 

2481 if use_locking is None: 

2482 use_locking = False 

2483 use_locking = _execute.make_bool(use_locking, "use_locking") 

2484 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2485 "ResourceSparseApplyAdadelta", var=var, accum=accum, 

2486 accum_update=accum_update, lr=lr, 

2487 rho=rho, epsilon=epsilon, grad=grad, 

2488 indices=indices, 

2489 use_locking=use_locking, name=name) 

2490 return _op 

2491ResourceSparseApplyAdadelta = tf_export("raw_ops.ResourceSparseApplyAdadelta")(_ops.to_raw_op(resource_sparse_apply_adadelta)) 

2492 

2493 

2494def resource_sparse_apply_adadelta_eager_fallback(var, accum, accum_update, lr, rho, epsilon, grad, indices, use_locking, name, ctx): 

2495 if use_locking is None: 

2496 use_locking = False 

2497 use_locking = _execute.make_bool(use_locking, "use_locking") 

2498 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

2499 (lr, rho, epsilon, grad) = _inputs_T 

2500 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

2501 var = _ops.convert_to_tensor(var, _dtypes.resource) 

2502 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

2503 accum_update = _ops.convert_to_tensor(accum_update, _dtypes.resource) 

2504 _inputs_flat = [var, accum, accum_update, lr, rho, epsilon, grad, indices] 

2505 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

2506 use_locking) 

2507 _result = _execute.execute(b"ResourceSparseApplyAdadelta", 0, 

2508 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2509 name=name) 

2510 _result = None 

2511 return _result 

2512 

2513 

2514def resource_sparse_apply_adagrad(var, accum, lr, grad, indices, use_locking=False, update_slots=True, name=None): 

2515 r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme. 

2516 

2517 That is for rows we have grad for, we update var and accum as follows: 

2518 accum += grad * grad 

2519 var -= lr * grad * (1 / sqrt(accum)) 

2520 

2521 Args: 

2522 var: A `Tensor` of type `resource`. Should be from a Variable(). 

2523 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

2524 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

2525 Learning rate. Must be a scalar. 

2526 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

2527 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

2528 A vector of indices into the first dimension of var and accum. 

2529 use_locking: An optional `bool`. Defaults to `False`. 

2530 If `True`, updating of the var and accum tensors will be protected 

2531 by a lock; otherwise the behavior is undefined, but may exhibit less 

2532 contention. 

2533 update_slots: An optional `bool`. Defaults to `True`. 

2534 name: A name for the operation (optional). 

2535 

2536 Returns: 

2537 The created Operation. 

2538 """ 

2539 _ctx = _context._context or _context.context() 

2540 tld = _ctx._thread_local_data 

2541 if tld.is_eager: 

2542 try: 

2543 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2544 _ctx, "ResourceSparseApplyAdagrad", name, var, accum, lr, grad, 

2545 indices, "use_locking", use_locking, "update_slots", update_slots) 

2546 return _result 

2547 except _core._NotOkStatusException as e: 

2548 _ops.raise_from_not_ok_status(e, name) 

2549 except _core._FallbackException: 

2550 pass 

2551 try: 

2552 return resource_sparse_apply_adagrad_eager_fallback( 

2553 var, accum, lr, grad, indices, use_locking=use_locking, 

2554 update_slots=update_slots, name=name, ctx=_ctx) 

2555 except _core._SymbolicException: 

2556 pass # Add nodes to the TensorFlow graph. 

2557 # Add nodes to the TensorFlow graph. 

2558 if use_locking is None: 

2559 use_locking = False 

2560 use_locking = _execute.make_bool(use_locking, "use_locking") 

2561 if update_slots is None: 

2562 update_slots = True 

2563 update_slots = _execute.make_bool(update_slots, "update_slots") 

2564 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2565 "ResourceSparseApplyAdagrad", var=var, accum=accum, lr=lr, grad=grad, 

2566 indices=indices, 

2567 use_locking=use_locking, 

2568 update_slots=update_slots, name=name) 

2569 return _op 

2570ResourceSparseApplyAdagrad = tf_export("raw_ops.ResourceSparseApplyAdagrad")(_ops.to_raw_op(resource_sparse_apply_adagrad)) 

2571 

2572 

2573def resource_sparse_apply_adagrad_eager_fallback(var, accum, lr, grad, indices, use_locking, update_slots, name, ctx): 

2574 if use_locking is None: 

2575 use_locking = False 

2576 use_locking = _execute.make_bool(use_locking, "use_locking") 

2577 if update_slots is None: 

2578 update_slots = True 

2579 update_slots = _execute.make_bool(update_slots, "update_slots") 

2580 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

2581 (lr, grad) = _inputs_T 

2582 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

2583 var = _ops.convert_to_tensor(var, _dtypes.resource) 

2584 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

2585 _inputs_flat = [var, accum, lr, grad, indices] 

2586 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

2587 use_locking, "update_slots", update_slots) 

2588 _result = _execute.execute(b"ResourceSparseApplyAdagrad", 0, 

2589 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2590 name=name) 

2591 _result = None 

2592 return _result 

2593 

2594 

2595def resource_sparse_apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step, use_locking=False, name=None): 

2596 r"""Update entries in '*var' and '*accum' according to the proximal adagrad scheme. 

2597 

2598 Args: 

2599 var: A `Tensor` of type `resource`. Should be from a Variable(). 

2600 gradient_accumulator: A `Tensor` of type `resource`. 

2601 Should be from a Variable(). 

2602 gradient_squared_accumulator: A `Tensor` of type `resource`. 

2603 Should be from a Variable(). 

2604 grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

2605 The gradient. 

2606 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

2607 A vector of indices into the first dimension of var and accum. 

2608 lr: A `Tensor`. Must have the same type as `grad`. 

2609 Learning rate. Must be a scalar. 

2610 l1: A `Tensor`. Must have the same type as `grad`. 

2611 L1 regularization. Must be a scalar. 

2612 l2: A `Tensor`. Must have the same type as `grad`. 

2613 L2 regularization. Must be a scalar. 

2614 global_step: A `Tensor` of type `int64`. 

2615 Training step number. Must be a scalar. 

2616 use_locking: An optional `bool`. Defaults to `False`. 

2617 If True, updating of the var and accum tensors will be protected by 

2618 a lock; otherwise the behavior is undefined, but may exhibit less contention. 

2619 name: A name for the operation (optional). 

2620 

2621 Returns: 

2622 The created Operation. 

2623 """ 

2624 _ctx = _context._context or _context.context() 

2625 tld = _ctx._thread_local_data 

2626 if tld.is_eager: 

2627 try: 

2628 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2629 _ctx, "ResourceSparseApplyAdagradDA", name, var, gradient_accumulator, 

2630 gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step, 

2631 "use_locking", use_locking) 

2632 return _result 

2633 except _core._NotOkStatusException as e: 

2634 _ops.raise_from_not_ok_status(e, name) 

2635 except _core._FallbackException: 

2636 pass 

2637 try: 

2638 return resource_sparse_apply_adagrad_da_eager_fallback( 

2639 var, gradient_accumulator, gradient_squared_accumulator, grad, 

2640 indices, lr, l1, l2, global_step, use_locking=use_locking, 

2641 name=name, ctx=_ctx) 

2642 except _core._SymbolicException: 

2643 pass # Add nodes to the TensorFlow graph. 

2644 # Add nodes to the TensorFlow graph. 

2645 if use_locking is None: 

2646 use_locking = False 

2647 use_locking = _execute.make_bool(use_locking, "use_locking") 

2648 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2649 "ResourceSparseApplyAdagradDA", var=var, 

2650 gradient_accumulator=gradient_accumulator, 

2651 gradient_squared_accumulator=gradient_squared_accumulator, 

2652 grad=grad, indices=indices, lr=lr, 

2653 l1=l1, l2=l2, global_step=global_step, 

2654 use_locking=use_locking, name=name) 

2655 return _op 

2656ResourceSparseApplyAdagradDA = tf_export("raw_ops.ResourceSparseApplyAdagradDA")(_ops.to_raw_op(resource_sparse_apply_adagrad_da)) 

2657 

2658 

2659def resource_sparse_apply_adagrad_da_eager_fallback(var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step, use_locking, name, ctx): 

2660 if use_locking is None: 

2661 use_locking = False 

2662 use_locking = _execute.make_bool(use_locking, "use_locking") 

2663 _attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

2664 (grad, lr, l1, l2) = _inputs_T 

2665 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

2666 var = _ops.convert_to_tensor(var, _dtypes.resource) 

2667 gradient_accumulator = _ops.convert_to_tensor(gradient_accumulator, _dtypes.resource) 

2668 gradient_squared_accumulator = _ops.convert_to_tensor(gradient_squared_accumulator, _dtypes.resource) 

2669 global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) 

2670 _inputs_flat = [var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step] 

2671 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

2672 use_locking) 

2673 _result = _execute.execute(b"ResourceSparseApplyAdagradDA", 0, 

2674 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2675 name=name) 

2676 _result = None 

2677 return _result 

2678 

2679 

2680def resource_sparse_apply_adagrad_v2(var, accum, lr, epsilon, grad, indices, use_locking=False, update_slots=True, name=None): 

2681 r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme. 

2682 

2683 That is for rows we have grad for, we update var and accum as follows: 

2684 accum += grad * grad 

2685 var -= lr * grad * (1 / sqrt(accum)) 

2686 

2687 Args: 

2688 var: A `Tensor` of type `resource`. Should be from a Variable(). 

2689 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

2690 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

2691 Learning rate. Must be a scalar. 

2692 epsilon: A `Tensor`. Must have the same type as `lr`. 

2693 Constant factor. Must be a scalar. 

2694 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

2695 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

2696 A vector of indices into the first dimension of var and accum. 

2697 use_locking: An optional `bool`. Defaults to `False`. 

2698 If `True`, updating of the var and accum tensors will be protected 

2699 by a lock; otherwise the behavior is undefined, but may exhibit less 

2700 contention. 

2701 update_slots: An optional `bool`. Defaults to `True`. 

2702 name: A name for the operation (optional). 

2703 

2704 Returns: 

2705 The created Operation. 

2706 """ 

2707 _ctx = _context._context or _context.context() 

2708 tld = _ctx._thread_local_data 

2709 if tld.is_eager: 

2710 try: 

2711 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2712 _ctx, "ResourceSparseApplyAdagradV2", name, var, accum, lr, epsilon, 

2713 grad, indices, "use_locking", use_locking, "update_slots", 

2714 update_slots) 

2715 return _result 

2716 except _core._NotOkStatusException as e: 

2717 _ops.raise_from_not_ok_status(e, name) 

2718 except _core._FallbackException: 

2719 pass 

2720 try: 

2721 return resource_sparse_apply_adagrad_v2_eager_fallback( 

2722 var, accum, lr, epsilon, grad, indices, use_locking=use_locking, 

2723 update_slots=update_slots, name=name, ctx=_ctx) 

2724 except _core._SymbolicException: 

2725 pass # Add nodes to the TensorFlow graph. 

2726 # Add nodes to the TensorFlow graph. 

2727 if use_locking is None: 

2728 use_locking = False 

2729 use_locking = _execute.make_bool(use_locking, "use_locking") 

2730 if update_slots is None: 

2731 update_slots = True 

2732 update_slots = _execute.make_bool(update_slots, "update_slots") 

2733 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2734 "ResourceSparseApplyAdagradV2", var=var, accum=accum, lr=lr, 

2735 epsilon=epsilon, grad=grad, 

2736 indices=indices, 

2737 use_locking=use_locking, 

2738 update_slots=update_slots, name=name) 

2739 return _op 

2740ResourceSparseApplyAdagradV2 = tf_export("raw_ops.ResourceSparseApplyAdagradV2")(_ops.to_raw_op(resource_sparse_apply_adagrad_v2)) 

2741 

2742 

2743def resource_sparse_apply_adagrad_v2_eager_fallback(var, accum, lr, epsilon, grad, indices, use_locking, update_slots, name, ctx): 

2744 if use_locking is None: 

2745 use_locking = False 

2746 use_locking = _execute.make_bool(use_locking, "use_locking") 

2747 if update_slots is None: 

2748 update_slots = True 

2749 update_slots = _execute.make_bool(update_slots, "update_slots") 

2750 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

2751 (lr, epsilon, grad) = _inputs_T 

2752 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

2753 var = _ops.convert_to_tensor(var, _dtypes.resource) 

2754 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

2755 _inputs_flat = [var, accum, lr, epsilon, grad, indices] 

2756 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

2757 use_locking, "update_slots", update_slots) 

2758 _result = _execute.execute(b"ResourceSparseApplyAdagradV2", 0, 

2759 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2760 name=name) 

2761 _result = None 

2762 return _result 

2763 

2764 

2765def resource_sparse_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking=False, name=None): 

2766 r"""Update '*var' according to the centered RMSProp algorithm. 

2767 

2768 The centered RMSProp algorithm uses an estimate of the centered second moment 

2769 (i.e., the variance) for normalization, as opposed to regular RMSProp, which 

2770 uses the (uncentered) second moment. This often helps with training, but is 

2771 slightly more expensive in terms of computation and memory. 

2772 

2773 Note that in dense implementation of this algorithm, mg, ms, and mom will 

2774 update even if the grad is zero, but in this sparse implementation, mg, ms, 

2775 and mom will not update in iterations during which the grad is zero. 

2776 

2777 mean_square = decay * mean_square + (1-decay) * gradient ** 2 

2778 mean_grad = decay * mean_grad + (1-decay) * gradient 

2779 Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) 

2780 

2781 ms <- rho * ms_{t-1} + (1-rho) * grad * grad 

2782 mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) 

2783 var <- var - mom 

2784 

2785 Args: 

2786 var: A `Tensor` of type `resource`. Should be from a Variable(). 

2787 mg: A `Tensor` of type `resource`. Should be from a Variable(). 

2788 ms: A `Tensor` of type `resource`. Should be from a Variable(). 

2789 mom: A `Tensor` of type `resource`. Should be from a Variable(). 

2790 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

2791 Scaling factor. Must be a scalar. 

2792 rho: A `Tensor`. Must have the same type as `lr`. 

2793 Decay rate. Must be a scalar. 

2794 momentum: A `Tensor`. Must have the same type as `lr`. 

2795 epsilon: A `Tensor`. Must have the same type as `lr`. 

2796 Ridge term. Must be a scalar. 

2797 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

2798 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

2799 A vector of indices into the first dimension of var, ms and mom. 

2800 use_locking: An optional `bool`. Defaults to `False`. 

2801 If `True`, updating of the var, mg, ms, and mom tensors is 

2802 protected by a lock; otherwise the behavior is undefined, but may exhibit less 

2803 contention. 

2804 name: A name for the operation (optional). 

2805 

2806 Returns: 

2807 The created Operation. 

2808 """ 

2809 _ctx = _context._context or _context.context() 

2810 tld = _ctx._thread_local_data 

2811 if tld.is_eager: 

2812 try: 

2813 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2814 _ctx, "ResourceSparseApplyCenteredRMSProp", name, var, mg, ms, mom, 

2815 lr, rho, momentum, epsilon, grad, indices, "use_locking", use_locking) 

2816 return _result 

2817 except _core._NotOkStatusException as e: 

2818 _ops.raise_from_not_ok_status(e, name) 

2819 except _core._FallbackException: 

2820 pass 

2821 try: 

2822 return resource_sparse_apply_centered_rms_prop_eager_fallback( 

2823 var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices, 

2824 use_locking=use_locking, name=name, ctx=_ctx) 

2825 except _core._SymbolicException: 

2826 pass # Add nodes to the TensorFlow graph. 

2827 # Add nodes to the TensorFlow graph. 

2828 if use_locking is None: 

2829 use_locking = False 

2830 use_locking = _execute.make_bool(use_locking, "use_locking") 

2831 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2832 "ResourceSparseApplyCenteredRMSProp", var=var, mg=mg, ms=ms, mom=mom, 

2833 lr=lr, rho=rho, 

2834 momentum=momentum, 

2835 epsilon=epsilon, grad=grad, 

2836 indices=indices, 

2837 use_locking=use_locking, 

2838 name=name) 

2839 return _op 

2840ResourceSparseApplyCenteredRMSProp = tf_export("raw_ops.ResourceSparseApplyCenteredRMSProp")(_ops.to_raw_op(resource_sparse_apply_centered_rms_prop)) 

2841 

2842 

2843def resource_sparse_apply_centered_rms_prop_eager_fallback(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking, name, ctx): 

2844 if use_locking is None: 

2845 use_locking = False 

2846 use_locking = _execute.make_bool(use_locking, "use_locking") 

2847 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, momentum, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

2848 (lr, rho, momentum, epsilon, grad) = _inputs_T 

2849 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

2850 var = _ops.convert_to_tensor(var, _dtypes.resource) 

2851 mg = _ops.convert_to_tensor(mg, _dtypes.resource) 

2852 ms = _ops.convert_to_tensor(ms, _dtypes.resource) 

2853 mom = _ops.convert_to_tensor(mom, _dtypes.resource) 

2854 _inputs_flat = [var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices] 

2855 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

2856 use_locking) 

2857 _result = _execute.execute(b"ResourceSparseApplyCenteredRMSProp", 0, 

2858 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2859 name=name) 

2860 _result = None 

2861 return _result 

2862 

2863 

2864def resource_sparse_apply_ftrl(var, accum, linear, grad, indices, lr, l1, l2, lr_power, use_locking=False, multiply_linear_by_lr=False, name=None): 

2865 r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme. 

2866 

2867 That is for rows we have grad for, we update var, accum and linear as follows: 

2868 accum_new = accum + grad * grad 

2869 linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var 

2870 quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 

2871 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 

2872 accum = accum_new 

2873 

2874 Args: 

2875 var: A `Tensor` of type `resource`. Should be from a Variable(). 

2876 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

2877 linear: A `Tensor` of type `resource`. Should be from a Variable(). 

2878 grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

2879 The gradient. 

2880 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

2881 A vector of indices into the first dimension of var and accum. 

2882 lr: A `Tensor`. Must have the same type as `grad`. 

2883 Scaling factor. Must be a scalar. 

2884 l1: A `Tensor`. Must have the same type as `grad`. 

2885 L1 regularization. Must be a scalar. 

2886 l2: A `Tensor`. Must have the same type as `grad`. 

2887 L2 regularization. Must be a scalar. 

2888 lr_power: A `Tensor`. Must have the same type as `grad`. 

2889 Scaling factor. Must be a scalar. 

2890 use_locking: An optional `bool`. Defaults to `False`. 

2891 If `True`, updating of the var and accum tensors will be protected 

2892 by a lock; otherwise the behavior is undefined, but may exhibit less 

2893 contention. 

2894 multiply_linear_by_lr: An optional `bool`. Defaults to `False`. 

2895 name: A name for the operation (optional). 

2896 

2897 Returns: 

2898 The created Operation. 

2899 """ 

2900 _ctx = _context._context or _context.context() 

2901 tld = _ctx._thread_local_data 

2902 if tld.is_eager: 

2903 try: 

2904 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2905 _ctx, "ResourceSparseApplyFtrl", name, var, accum, linear, grad, 

2906 indices, lr, l1, l2, lr_power, "use_locking", use_locking, 

2907 "multiply_linear_by_lr", multiply_linear_by_lr) 

2908 return _result 

2909 except _core._NotOkStatusException as e: 

2910 _ops.raise_from_not_ok_status(e, name) 

2911 except _core._FallbackException: 

2912 pass 

2913 try: 

2914 return resource_sparse_apply_ftrl_eager_fallback( 

2915 var, accum, linear, grad, indices, lr, l1, l2, lr_power, 

2916 use_locking=use_locking, 

2917 multiply_linear_by_lr=multiply_linear_by_lr, name=name, ctx=_ctx) 

2918 except _core._SymbolicException: 

2919 pass # Add nodes to the TensorFlow graph. 

2920 # Add nodes to the TensorFlow graph. 

2921 if use_locking is None: 

2922 use_locking = False 

2923 use_locking = _execute.make_bool(use_locking, "use_locking") 

2924 if multiply_linear_by_lr is None: 

2925 multiply_linear_by_lr = False 

2926 multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") 

2927 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2928 "ResourceSparseApplyFtrl", var=var, accum=accum, linear=linear, 

2929 grad=grad, indices=indices, lr=lr, l1=l1, 

2930 l2=l2, lr_power=lr_power, 

2931 use_locking=use_locking, 

2932 multiply_linear_by_lr=multiply_linear_by_lr, 

2933 name=name) 

2934 return _op 

2935ResourceSparseApplyFtrl = tf_export("raw_ops.ResourceSparseApplyFtrl")(_ops.to_raw_op(resource_sparse_apply_ftrl)) 

2936 

2937 

2938def resource_sparse_apply_ftrl_eager_fallback(var, accum, linear, grad, indices, lr, l1, l2, lr_power, use_locking, multiply_linear_by_lr, name, ctx): 

2939 if use_locking is None: 

2940 use_locking = False 

2941 use_locking = _execute.make_bool(use_locking, "use_locking") 

2942 if multiply_linear_by_lr is None: 

2943 multiply_linear_by_lr = False 

2944 multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") 

2945 _attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2, lr_power], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

2946 (grad, lr, l1, l2, lr_power) = _inputs_T 

2947 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

2948 var = _ops.convert_to_tensor(var, _dtypes.resource) 

2949 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

2950 linear = _ops.convert_to_tensor(linear, _dtypes.resource) 

2951 _inputs_flat = [var, accum, linear, grad, indices, lr, l1, l2, lr_power] 

2952 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

2953 use_locking, "multiply_linear_by_lr", multiply_linear_by_lr) 

2954 _result = _execute.execute(b"ResourceSparseApplyFtrl", 0, 

2955 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2956 name=name) 

2957 _result = None 

2958 return _result 

2959 

2960 

2961def resource_sparse_apply_ftrl_v2(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False, multiply_linear_by_lr=False, name=None): 

2962 r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme. 

2963 

2964 That is for rows we have grad for, we update var, accum and linear as follows: 

2965 grad_with_shrinkage = grad + 2 * l2_shrinkage * var 

2966 accum_new = accum + grad_with_shrinkage * grad_with_shrinkage 

2967 linear += grad_with_shrinkage + 

2968 (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var 

2969 quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 

2970 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 

2971 accum = accum_new 

2972 

2973 Args: 

2974 var: A `Tensor` of type `resource`. Should be from a Variable(). 

2975 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

2976 linear: A `Tensor` of type `resource`. Should be from a Variable(). 

2977 grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

2978 The gradient. 

2979 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

2980 A vector of indices into the first dimension of var and accum. 

2981 lr: A `Tensor`. Must have the same type as `grad`. 

2982 Scaling factor. Must be a scalar. 

2983 l1: A `Tensor`. Must have the same type as `grad`. 

2984 L1 regularization. Must be a scalar. 

2985 l2: A `Tensor`. Must have the same type as `grad`. 

2986 L2 shrinkage regularization. Must be a scalar. 

2987 l2_shrinkage: A `Tensor`. Must have the same type as `grad`. 

2988 lr_power: A `Tensor`. Must have the same type as `grad`. 

2989 Scaling factor. Must be a scalar. 

2990 use_locking: An optional `bool`. Defaults to `False`. 

2991 If `True`, updating of the var and accum tensors will be protected 

2992 by a lock; otherwise the behavior is undefined, but may exhibit less 

2993 contention. 

2994 multiply_linear_by_lr: An optional `bool`. Defaults to `False`. 

2995 name: A name for the operation (optional). 

2996 

2997 Returns: 

2998 The created Operation. 

2999 """ 

3000 _ctx = _context._context or _context.context() 

3001 tld = _ctx._thread_local_data 

3002 if tld.is_eager: 

3003 try: 

3004 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3005 _ctx, "ResourceSparseApplyFtrlV2", name, var, accum, linear, grad, 

3006 indices, lr, l1, l2, l2_shrinkage, lr_power, "use_locking", 

3007 use_locking, "multiply_linear_by_lr", multiply_linear_by_lr) 

3008 return _result 

3009 except _core._NotOkStatusException as e: 

3010 _ops.raise_from_not_ok_status(e, name) 

3011 except _core._FallbackException: 

3012 pass 

3013 try: 

3014 return resource_sparse_apply_ftrl_v2_eager_fallback( 

3015 var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, 

3016 lr_power, use_locking=use_locking, 

3017 multiply_linear_by_lr=multiply_linear_by_lr, name=name, ctx=_ctx) 

3018 except _core._SymbolicException: 

3019 pass # Add nodes to the TensorFlow graph. 

3020 # Add nodes to the TensorFlow graph. 

3021 if use_locking is None: 

3022 use_locking = False 

3023 use_locking = _execute.make_bool(use_locking, "use_locking") 

3024 if multiply_linear_by_lr is None: 

3025 multiply_linear_by_lr = False 

3026 multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") 

3027 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3028 "ResourceSparseApplyFtrlV2", var=var, accum=accum, linear=linear, 

3029 grad=grad, indices=indices, lr=lr, l1=l1, 

3030 l2=l2, l2_shrinkage=l2_shrinkage, 

3031 lr_power=lr_power, 

3032 use_locking=use_locking, 

3033 multiply_linear_by_lr=multiply_linear_by_lr, 

3034 name=name) 

3035 return _op 

3036ResourceSparseApplyFtrlV2 = tf_export("raw_ops.ResourceSparseApplyFtrlV2")(_ops.to_raw_op(resource_sparse_apply_ftrl_v2)) 

3037 

3038 

3039def resource_sparse_apply_ftrl_v2_eager_fallback(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power, use_locking, multiply_linear_by_lr, name, ctx): 

3040 if use_locking is None: 

3041 use_locking = False 

3042 use_locking = _execute.make_bool(use_locking, "use_locking") 

3043 if multiply_linear_by_lr is None: 

3044 multiply_linear_by_lr = False 

3045 multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") 

3046 _attr_T, _inputs_T = _execute.args_to_matching_eager([grad, lr, l1, l2, l2_shrinkage, lr_power], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

3047 (grad, lr, l1, l2, l2_shrinkage, lr_power) = _inputs_T 

3048 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

3049 var = _ops.convert_to_tensor(var, _dtypes.resource) 

3050 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

3051 linear = _ops.convert_to_tensor(linear, _dtypes.resource) 

3052 _inputs_flat = [var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power] 

3053 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

3054 use_locking, "multiply_linear_by_lr", multiply_linear_by_lr) 

3055 _result = _execute.execute(b"ResourceSparseApplyFtrlV2", 0, 

3056 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3057 name=name) 

3058 _result = None 

3059 return _result 

3060 

3061 

3062def resource_sparse_apply_keras_momentum(var, accum, lr, grad, indices, momentum, use_locking=False, use_nesterov=False, name=None): 

3063 r"""Update relevant entries in '*var' and '*accum' according to the momentum scheme. 

3064 

3065 Set use_nesterov = True if you want to use Nesterov momentum. 

3066 

3067 That is for rows we have grad for, we update var and accum as follows: 

3068 

3069 accum = accum * momentum - lr * grad 

3070 var += accum 

3071 

3072 Args: 

3073 var: A `Tensor` of type `resource`. Should be from a Variable(). 

3074 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

3075 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3076 Learning rate. Must be a scalar. 

3077 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

3078 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3079 A vector of indices into the first dimension of var and accum. 

3080 momentum: A `Tensor`. Must have the same type as `lr`. 

3081 Momentum. Must be a scalar. 

3082 use_locking: An optional `bool`. Defaults to `False`. 

3083 If `True`, updating of the var and accum tensors will be protected 

3084 by a lock; otherwise the behavior is undefined, but may exhibit less 

3085 contention. 

3086 use_nesterov: An optional `bool`. Defaults to `False`. 

3087 If `True`, the tensor passed to compute grad will be 

3088 var + momentum * accum, so in the end, the var you get is actually 

3089 var + momentum * accum. 

3090 name: A name for the operation (optional). 

3091 

3092 Returns: 

3093 The created Operation. 

3094 """ 

3095 _ctx = _context._context or _context.context() 

3096 tld = _ctx._thread_local_data 

3097 if tld.is_eager: 

3098 try: 

3099 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3100 _ctx, "ResourceSparseApplyKerasMomentum", name, var, accum, lr, grad, 

3101 indices, momentum, "use_locking", use_locking, "use_nesterov", 

3102 use_nesterov) 

3103 return _result 

3104 except _core._NotOkStatusException as e: 

3105 _ops.raise_from_not_ok_status(e, name) 

3106 except _core._FallbackException: 

3107 pass 

3108 try: 

3109 return resource_sparse_apply_keras_momentum_eager_fallback( 

3110 var, accum, lr, grad, indices, momentum, use_locking=use_locking, 

3111 use_nesterov=use_nesterov, name=name, ctx=_ctx) 

3112 except _core._SymbolicException: 

3113 pass # Add nodes to the TensorFlow graph. 

3114 # Add nodes to the TensorFlow graph. 

3115 if use_locking is None: 

3116 use_locking = False 

3117 use_locking = _execute.make_bool(use_locking, "use_locking") 

3118 if use_nesterov is None: 

3119 use_nesterov = False 

3120 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

3121 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3122 "ResourceSparseApplyKerasMomentum", var=var, accum=accum, lr=lr, 

3123 grad=grad, indices=indices, 

3124 momentum=momentum, 

3125 use_locking=use_locking, 

3126 use_nesterov=use_nesterov, 

3127 name=name) 

3128 return _op 

3129ResourceSparseApplyKerasMomentum = tf_export("raw_ops.ResourceSparseApplyKerasMomentum")(_ops.to_raw_op(resource_sparse_apply_keras_momentum)) 

3130 

3131 

3132def resource_sparse_apply_keras_momentum_eager_fallback(var, accum, lr, grad, indices, momentum, use_locking, use_nesterov, name, ctx): 

3133 if use_locking is None: 

3134 use_locking = False 

3135 use_locking = _execute.make_bool(use_locking, "use_locking") 

3136 if use_nesterov is None: 

3137 use_nesterov = False 

3138 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

3139 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad, momentum], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

3140 (lr, grad, momentum) = _inputs_T 

3141 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

3142 var = _ops.convert_to_tensor(var, _dtypes.resource) 

3143 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

3144 _inputs_flat = [var, accum, lr, grad, indices, momentum] 

3145 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

3146 use_locking, "use_nesterov", use_nesterov) 

3147 _result = _execute.execute(b"ResourceSparseApplyKerasMomentum", 0, 

3148 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3149 name=name) 

3150 _result = None 

3151 return _result 

3152 

3153 

3154def resource_sparse_apply_momentum(var, accum, lr, grad, indices, momentum, use_locking=False, use_nesterov=False, name=None): 

3155 r"""Update relevant entries in '*var' and '*accum' according to the momentum scheme. 

3156 

3157 Set use_nesterov = True if you want to use Nesterov momentum. 

3158 

3159 That is for rows we have grad for, we update var and accum as follows: 

3160 

3161 accum = accum * momentum + grad 

3162 var -= lr * accum 

3163 

3164 Args: 

3165 var: A `Tensor` of type `resource`. Should be from a Variable(). 

3166 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

3167 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3168 Learning rate. Must be a scalar. 

3169 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

3170 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3171 A vector of indices into the first dimension of var and accum. 

3172 momentum: A `Tensor`. Must have the same type as `lr`. 

3173 Momentum. Must be a scalar. 

3174 use_locking: An optional `bool`. Defaults to `False`. 

3175 If `True`, updating of the var and accum tensors will be protected 

3176 by a lock; otherwise the behavior is undefined, but may exhibit less 

3177 contention. 

3178 use_nesterov: An optional `bool`. Defaults to `False`. 

3179 If `True`, the tensor passed to compute grad will be 

3180 var - lr * momentum * accum, so in the end, the var you get is actually 

3181 var - lr * momentum * accum. 

3182 name: A name for the operation (optional). 

3183 

3184 Returns: 

3185 The created Operation. 

3186 """ 

3187 _ctx = _context._context or _context.context() 

3188 tld = _ctx._thread_local_data 

3189 if tld.is_eager: 

3190 try: 

3191 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3192 _ctx, "ResourceSparseApplyMomentum", name, var, accum, lr, grad, 

3193 indices, momentum, "use_locking", use_locking, "use_nesterov", 

3194 use_nesterov) 

3195 return _result 

3196 except _core._NotOkStatusException as e: 

3197 _ops.raise_from_not_ok_status(e, name) 

3198 except _core._FallbackException: 

3199 pass 

3200 try: 

3201 return resource_sparse_apply_momentum_eager_fallback( 

3202 var, accum, lr, grad, indices, momentum, use_locking=use_locking, 

3203 use_nesterov=use_nesterov, name=name, ctx=_ctx) 

3204 except _core._SymbolicException: 

3205 pass # Add nodes to the TensorFlow graph. 

3206 # Add nodes to the TensorFlow graph. 

3207 if use_locking is None: 

3208 use_locking = False 

3209 use_locking = _execute.make_bool(use_locking, "use_locking") 

3210 if use_nesterov is None: 

3211 use_nesterov = False 

3212 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

3213 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3214 "ResourceSparseApplyMomentum", var=var, accum=accum, lr=lr, grad=grad, 

3215 indices=indices, momentum=momentum, 

3216 use_locking=use_locking, 

3217 use_nesterov=use_nesterov, name=name) 

3218 return _op 

3219ResourceSparseApplyMomentum = tf_export("raw_ops.ResourceSparseApplyMomentum")(_ops.to_raw_op(resource_sparse_apply_momentum)) 

3220 

3221 

3222def resource_sparse_apply_momentum_eager_fallback(var, accum, lr, grad, indices, momentum, use_locking, use_nesterov, name, ctx): 

3223 if use_locking is None: 

3224 use_locking = False 

3225 use_locking = _execute.make_bool(use_locking, "use_locking") 

3226 if use_nesterov is None: 

3227 use_nesterov = False 

3228 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

3229 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, grad, momentum], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

3230 (lr, grad, momentum) = _inputs_T 

3231 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

3232 var = _ops.convert_to_tensor(var, _dtypes.resource) 

3233 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

3234 _inputs_flat = [var, accum, lr, grad, indices, momentum] 

3235 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

3236 use_locking, "use_nesterov", use_nesterov) 

3237 _result = _execute.execute(b"ResourceSparseApplyMomentum", 0, 

3238 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3239 name=name) 

3240 _result = None 

3241 return _result 

3242 

3243 

3244def resource_sparse_apply_proximal_adagrad(var, accum, lr, l1, l2, grad, indices, use_locking=False, name=None): 

3245 r"""Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. 

3246 

3247 That is for rows we have grad for, we update var and accum as follows: 

3248 accum += grad * grad 

3249 prox_v = var 

3250 prox_v -= lr * grad * (1 / sqrt(accum)) 

3251 var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} 

3252 

3253 Args: 

3254 var: A `Tensor` of type `resource`. Should be from a Variable(). 

3255 accum: A `Tensor` of type `resource`. Should be from a Variable(). 

3256 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3257 Learning rate. Must be a scalar. 

3258 l1: A `Tensor`. Must have the same type as `lr`. 

3259 L1 regularization. Must be a scalar. 

3260 l2: A `Tensor`. Must have the same type as `lr`. 

3261 L2 regularization. Must be a scalar. 

3262 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

3263 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3264 A vector of indices into the first dimension of var and accum. 

3265 use_locking: An optional `bool`. Defaults to `False`. 

3266 If True, updating of the var and accum tensors will be protected by 

3267 a lock; otherwise the behavior is undefined, but may exhibit less contention. 

3268 name: A name for the operation (optional). 

3269 

3270 Returns: 

3271 The created Operation. 

3272 """ 

3273 _ctx = _context._context or _context.context() 

3274 tld = _ctx._thread_local_data 

3275 if tld.is_eager: 

3276 try: 

3277 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3278 _ctx, "ResourceSparseApplyProximalAdagrad", name, var, accum, lr, l1, 

3279 l2, grad, indices, "use_locking", use_locking) 

3280 return _result 

3281 except _core._NotOkStatusException as e: 

3282 _ops.raise_from_not_ok_status(e, name) 

3283 except _core._FallbackException: 

3284 pass 

3285 try: 

3286 return resource_sparse_apply_proximal_adagrad_eager_fallback( 

3287 var, accum, lr, l1, l2, grad, indices, use_locking=use_locking, 

3288 name=name, ctx=_ctx) 

3289 except _core._SymbolicException: 

3290 pass # Add nodes to the TensorFlow graph. 

3291 # Add nodes to the TensorFlow graph. 

3292 if use_locking is None: 

3293 use_locking = False 

3294 use_locking = _execute.make_bool(use_locking, "use_locking") 

3295 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3296 "ResourceSparseApplyProximalAdagrad", var=var, accum=accum, lr=lr, 

3297 l1=l1, l2=l2, grad=grad, 

3298 indices=indices, 

3299 use_locking=use_locking, 

3300 name=name) 

3301 return _op 

3302ResourceSparseApplyProximalAdagrad = tf_export("raw_ops.ResourceSparseApplyProximalAdagrad")(_ops.to_raw_op(resource_sparse_apply_proximal_adagrad)) 

3303 

3304 

3305def resource_sparse_apply_proximal_adagrad_eager_fallback(var, accum, lr, l1, l2, grad, indices, use_locking, name, ctx): 

3306 if use_locking is None: 

3307 use_locking = False 

3308 use_locking = _execute.make_bool(use_locking, "use_locking") 

3309 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, l1, l2, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

3310 (lr, l1, l2, grad) = _inputs_T 

3311 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

3312 var = _ops.convert_to_tensor(var, _dtypes.resource) 

3313 accum = _ops.convert_to_tensor(accum, _dtypes.resource) 

3314 _inputs_flat = [var, accum, lr, l1, l2, grad, indices] 

3315 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

3316 use_locking) 

3317 _result = _execute.execute(b"ResourceSparseApplyProximalAdagrad", 0, 

3318 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3319 name=name) 

3320 _result = None 

3321 return _result 

3322 

3323 

3324def resource_sparse_apply_proximal_gradient_descent(var, alpha, l1, l2, grad, indices, use_locking=False, name=None): 

3325 r"""Sparse update '*var' as FOBOS algorithm with fixed learning rate. 

3326 

3327 That is for rows we have grad for, we update var as follows: 

3328 prox_v = var - alpha * grad 

3329 var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} 

3330 

3331 Args: 

3332 var: A `Tensor` of type `resource`. Should be from a Variable(). 

3333 alpha: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3334 Scaling factor. Must be a scalar. 

3335 l1: A `Tensor`. Must have the same type as `alpha`. 

3336 L1 regularization. Must be a scalar. 

3337 l2: A `Tensor`. Must have the same type as `alpha`. 

3338 L2 regularization. Must be a scalar. 

3339 grad: A `Tensor`. Must have the same type as `alpha`. The gradient. 

3340 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3341 A vector of indices into the first dimension of var and accum. 

3342 use_locking: An optional `bool`. Defaults to `False`. 

3343 If True, the subtraction will be protected by a lock; 

3344 otherwise the behavior is undefined, but may exhibit less contention. 

3345 name: A name for the operation (optional). 

3346 

3347 Returns: 

3348 The created Operation. 

3349 """ 

3350 _ctx = _context._context or _context.context() 

3351 tld = _ctx._thread_local_data 

3352 if tld.is_eager: 

3353 try: 

3354 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3355 _ctx, "ResourceSparseApplyProximalGradientDescent", name, var, alpha, 

3356 l1, l2, grad, indices, "use_locking", use_locking) 

3357 return _result 

3358 except _core._NotOkStatusException as e: 

3359 _ops.raise_from_not_ok_status(e, name) 

3360 except _core._FallbackException: 

3361 pass 

3362 try: 

3363 return resource_sparse_apply_proximal_gradient_descent_eager_fallback( 

3364 var, alpha, l1, l2, grad, indices, use_locking=use_locking, 

3365 name=name, ctx=_ctx) 

3366 except _core._SymbolicException: 

3367 pass # Add nodes to the TensorFlow graph. 

3368 # Add nodes to the TensorFlow graph. 

3369 if use_locking is None: 

3370 use_locking = False 

3371 use_locking = _execute.make_bool(use_locking, "use_locking") 

3372 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3373 "ResourceSparseApplyProximalGradientDescent", var=var, alpha=alpha, 

3374 l1=l1, l2=l2, grad=grad, 

3375 indices=indices, 

3376 use_locking=use_locking, 

3377 name=name) 

3378 return _op 

3379ResourceSparseApplyProximalGradientDescent = tf_export("raw_ops.ResourceSparseApplyProximalGradientDescent")(_ops.to_raw_op(resource_sparse_apply_proximal_gradient_descent)) 

3380 

3381 

3382def resource_sparse_apply_proximal_gradient_descent_eager_fallback(var, alpha, l1, l2, grad, indices, use_locking, name, ctx): 

3383 if use_locking is None: 

3384 use_locking = False 

3385 use_locking = _execute.make_bool(use_locking, "use_locking") 

3386 _attr_T, _inputs_T = _execute.args_to_matching_eager([alpha, l1, l2, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

3387 (alpha, l1, l2, grad) = _inputs_T 

3388 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

3389 var = _ops.convert_to_tensor(var, _dtypes.resource) 

3390 _inputs_flat = [var, alpha, l1, l2, grad, indices] 

3391 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

3392 use_locking) 

3393 _result = _execute.execute(b"ResourceSparseApplyProximalGradientDescent", 0, 

3394 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3395 name=name) 

3396 _result = None 

3397 return _result 

3398 

3399 

3400def resource_sparse_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking=False, name=None): 

3401 r"""Update '*var' according to the RMSProp algorithm. 

3402 

3403 Note that in dense implementation of this algorithm, ms and mom will 

3404 update even if the grad is zero, but in this sparse implementation, ms 

3405 and mom will not update in iterations during which the grad is zero. 

3406 

3407 mean_square = decay * mean_square + (1-decay) * gradient ** 2 

3408 Delta = learning_rate * gradient / sqrt(mean_square + epsilon) 

3409 

3410 ms <- rho * ms_{t-1} + (1-rho) * grad * grad 

3411 mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) 

3412 var <- var - mom 

3413 

3414 Args: 

3415 var: A `Tensor` of type `resource`. Should be from a Variable(). 

3416 ms: A `Tensor` of type `resource`. Should be from a Variable(). 

3417 mom: A `Tensor` of type `resource`. Should be from a Variable(). 

3418 lr: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3419 Scaling factor. Must be a scalar. 

3420 rho: A `Tensor`. Must have the same type as `lr`. 

3421 Decay rate. Must be a scalar. 

3422 momentum: A `Tensor`. Must have the same type as `lr`. 

3423 epsilon: A `Tensor`. Must have the same type as `lr`. 

3424 Ridge term. Must be a scalar. 

3425 grad: A `Tensor`. Must have the same type as `lr`. The gradient. 

3426 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3427 A vector of indices into the first dimension of var, ms and mom. 

3428 use_locking: An optional `bool`. Defaults to `False`. 

3429 If `True`, updating of the var, ms, and mom tensors is protected 

3430 by a lock; otherwise the behavior is undefined, but may exhibit less 

3431 contention. 

3432 name: A name for the operation (optional). 

3433 

3434 Returns: 

3435 The created Operation. 

3436 """ 

3437 _ctx = _context._context or _context.context() 

3438 tld = _ctx._thread_local_data 

3439 if tld.is_eager: 

3440 try: 

3441 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3442 _ctx, "ResourceSparseApplyRMSProp", name, var, ms, mom, lr, rho, 

3443 momentum, epsilon, grad, indices, "use_locking", use_locking) 

3444 return _result 

3445 except _core._NotOkStatusException as e: 

3446 _ops.raise_from_not_ok_status(e, name) 

3447 except _core._FallbackException: 

3448 pass 

3449 try: 

3450 return resource_sparse_apply_rms_prop_eager_fallback( 

3451 var, ms, mom, lr, rho, momentum, epsilon, grad, indices, 

3452 use_locking=use_locking, name=name, ctx=_ctx) 

3453 except _core._SymbolicException: 

3454 pass # Add nodes to the TensorFlow graph. 

3455 # Add nodes to the TensorFlow graph. 

3456 if use_locking is None: 

3457 use_locking = False 

3458 use_locking = _execute.make_bool(use_locking, "use_locking") 

3459 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3460 "ResourceSparseApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr, rho=rho, 

3461 momentum=momentum, epsilon=epsilon, 

3462 grad=grad, indices=indices, 

3463 use_locking=use_locking, name=name) 

3464 return _op 

3465ResourceSparseApplyRMSProp = tf_export("raw_ops.ResourceSparseApplyRMSProp")(_ops.to_raw_op(resource_sparse_apply_rms_prop)) 

3466 

3467 

3468def resource_sparse_apply_rms_prop_eager_fallback(var, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking, name, ctx): 

3469 if use_locking is None: 

3470 use_locking = False 

3471 use_locking = _execute.make_bool(use_locking, "use_locking") 

3472 _attr_T, _inputs_T = _execute.args_to_matching_eager([lr, rho, momentum, epsilon, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

3473 (lr, rho, momentum, epsilon, grad) = _inputs_T 

3474 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

3475 var = _ops.convert_to_tensor(var, _dtypes.resource) 

3476 ms = _ops.convert_to_tensor(ms, _dtypes.resource) 

3477 mom = _ops.convert_to_tensor(mom, _dtypes.resource) 

3478 _inputs_flat = [var, ms, mom, lr, rho, momentum, epsilon, grad, indices] 

3479 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

3480 use_locking) 

3481 _result = _execute.execute(b"ResourceSparseApplyRMSProp", 0, 

3482 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3483 name=name) 

3484 _result = None 

3485 return _result 

3486 

3487 

3488def sparse_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad, indices, use_locking=False, name=None): 

3489 r"""var: Should be from a Variable(). 

3490 

3491 Args: 

3492 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3493 accum: A mutable `Tensor`. Must have the same type as `var`. 

3494 Should be from a Variable(). 

3495 accum_update: A mutable `Tensor`. Must have the same type as `var`. 

3496 : Should be from a Variable(). 

3497 lr: A `Tensor`. Must have the same type as `var`. 

3498 Learning rate. Must be a scalar. 

3499 rho: A `Tensor`. Must have the same type as `var`. 

3500 Decay factor. Must be a scalar. 

3501 epsilon: A `Tensor`. Must have the same type as `var`. 

3502 Constant factor. Must be a scalar. 

3503 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

3504 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3505 A vector of indices into the first dimension of var and accum. 

3506 use_locking: An optional `bool`. Defaults to `False`. 

3507 If True, updating of the var and accum tensors will be protected by 

3508 a lock; otherwise the behavior is undefined, but may exhibit less contention. 

3509 name: A name for the operation (optional). 

3510 

3511 Returns: 

3512 A mutable `Tensor`. Has the same type as `var`. 

3513 """ 

3514 _ctx = _context._context or _context.context() 

3515 tld = _ctx._thread_local_data 

3516 if tld.is_eager: 

3517 raise RuntimeError("sparse_apply_adadelta op does not support eager execution. Arg 'out' is a ref.") 

3518 # Add nodes to the TensorFlow graph. 

3519 if use_locking is None: 

3520 use_locking = False 

3521 use_locking = _execute.make_bool(use_locking, "use_locking") 

3522 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3523 "SparseApplyAdadelta", var=var, accum=accum, 

3524 accum_update=accum_update, lr=lr, rho=rho, 

3525 epsilon=epsilon, grad=grad, indices=indices, 

3526 use_locking=use_locking, name=name) 

3527 _result = _outputs[:] 

3528 if _execute.must_record_gradient(): 

3529 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

3530 _op._get_attr_type("Tindices"), "use_locking", 

3531 _op._get_attr_bool("use_locking")) 

3532 _inputs_flat = _op.inputs 

3533 _execute.record_gradient( 

3534 "SparseApplyAdadelta", _inputs_flat, _attrs, _result) 

3535 _result, = _result 

3536 return _result 

3537 

3538SparseApplyAdadelta = tf_export("raw_ops.SparseApplyAdadelta")(_ops.to_raw_op(sparse_apply_adadelta)) 

3539 

3540 

3541def sparse_apply_adadelta_eager_fallback(var, accum, accum_update, lr, rho, epsilon, grad, indices, use_locking, name, ctx): 

3542 raise RuntimeError("sparse_apply_adadelta op does not support eager execution. Arg 'out' is a ref.") 

3543 

3544def sparse_apply_adagrad(var, accum, lr, grad, indices, use_locking=False, update_slots=True, name=None): 

3545 r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme. 

3546 

3547 That is for rows we have grad for, we update var and accum as follows: 

3548 $$accum += grad * grad$$ 

3549 $$var -= lr * grad * (1 / sqrt(accum))$$ 

3550 

3551 Args: 

3552 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3553 Should be from a Variable(). 

3554 accum: A mutable `Tensor`. Must have the same type as `var`. 

3555 Should be from a Variable(). 

3556 lr: A `Tensor`. Must have the same type as `var`. 

3557 Learning rate. Must be a scalar. 

3558 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

3559 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3560 A vector of indices into the first dimension of var and accum. 

3561 use_locking: An optional `bool`. Defaults to `False`. 

3562 If `True`, updating of the var and accum tensors will be protected 

3563 by a lock; otherwise the behavior is undefined, but may exhibit less 

3564 contention. 

3565 update_slots: An optional `bool`. Defaults to `True`. 

3566 name: A name for the operation (optional). 

3567 

3568 Returns: 

3569 A mutable `Tensor`. Has the same type as `var`. 

3570 """ 

3571 _ctx = _context._context or _context.context() 

3572 tld = _ctx._thread_local_data 

3573 if tld.is_eager: 

3574 raise RuntimeError("sparse_apply_adagrad op does not support eager execution. Arg 'out' is a ref.") 

3575 # Add nodes to the TensorFlow graph. 

3576 if use_locking is None: 

3577 use_locking = False 

3578 use_locking = _execute.make_bool(use_locking, "use_locking") 

3579 if update_slots is None: 

3580 update_slots = True 

3581 update_slots = _execute.make_bool(update_slots, "update_slots") 

3582 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3583 "SparseApplyAdagrad", var=var, accum=accum, lr=lr, grad=grad, 

3584 indices=indices, use_locking=use_locking, 

3585 update_slots=update_slots, name=name) 

3586 _result = _outputs[:] 

3587 if _execute.must_record_gradient(): 

3588 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

3589 _op._get_attr_type("Tindices"), "use_locking", 

3590 _op._get_attr_bool("use_locking"), "update_slots", 

3591 _op._get_attr_bool("update_slots")) 

3592 _inputs_flat = _op.inputs 

3593 _execute.record_gradient( 

3594 "SparseApplyAdagrad", _inputs_flat, _attrs, _result) 

3595 _result, = _result 

3596 return _result 

3597 

3598SparseApplyAdagrad = tf_export("raw_ops.SparseApplyAdagrad")(_ops.to_raw_op(sparse_apply_adagrad)) 

3599 

3600 

3601def sparse_apply_adagrad_eager_fallback(var, accum, lr, grad, indices, use_locking, update_slots, name, ctx): 

3602 raise RuntimeError("sparse_apply_adagrad op does not support eager execution. Arg 'out' is a ref.") 

3603 

3604def sparse_apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step, use_locking=False, name=None): 

3605 r"""Update entries in '*var' and '*accum' according to the proximal adagrad scheme. 

3606 

3607 Args: 

3608 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3609 Should be from a Variable(). 

3610 gradient_accumulator: A mutable `Tensor`. Must have the same type as `var`. 

3611 Should be from a Variable(). 

3612 gradient_squared_accumulator: A mutable `Tensor`. Must have the same type as `var`. 

3613 Should be from a Variable(). 

3614 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

3615 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3616 A vector of indices into the first dimension of var and accum. 

3617 lr: A `Tensor`. Must have the same type as `var`. 

3618 Learning rate. Must be a scalar. 

3619 l1: A `Tensor`. Must have the same type as `var`. 

3620 L1 regularization. Must be a scalar. 

3621 l2: A `Tensor`. Must have the same type as `var`. 

3622 L2 regularization. Must be a scalar. 

3623 global_step: A `Tensor` of type `int64`. 

3624 Training step number. Must be a scalar. 

3625 use_locking: An optional `bool`. Defaults to `False`. 

3626 If True, updating of the var and accum tensors will be protected by 

3627 a lock; otherwise the behavior is undefined, but may exhibit less contention. 

3628 name: A name for the operation (optional). 

3629 

3630 Returns: 

3631 A mutable `Tensor`. Has the same type as `var`. 

3632 """ 

3633 _ctx = _context._context or _context.context() 

3634 tld = _ctx._thread_local_data 

3635 if tld.is_eager: 

3636 raise RuntimeError("sparse_apply_adagrad_da op does not support eager execution. Arg 'out' is a ref.") 

3637 # Add nodes to the TensorFlow graph. 

3638 if use_locking is None: 

3639 use_locking = False 

3640 use_locking = _execute.make_bool(use_locking, "use_locking") 

3641 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3642 "SparseApplyAdagradDA", var=var, 

3643 gradient_accumulator=gradient_accumulator, 

3644 gradient_squared_accumulator=gradient_squared_accumulator, 

3645 grad=grad, indices=indices, lr=lr, l1=l1, 

3646 l2=l2, global_step=global_step, 

3647 use_locking=use_locking, name=name) 

3648 _result = _outputs[:] 

3649 if _execute.must_record_gradient(): 

3650 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

3651 _op._get_attr_type("Tindices"), "use_locking", 

3652 _op._get_attr_bool("use_locking")) 

3653 _inputs_flat = _op.inputs 

3654 _execute.record_gradient( 

3655 "SparseApplyAdagradDA", _inputs_flat, _attrs, _result) 

3656 _result, = _result 

3657 return _result 

3658 

3659SparseApplyAdagradDA = tf_export("raw_ops.SparseApplyAdagradDA")(_ops.to_raw_op(sparse_apply_adagrad_da)) 

3660 

3661 

3662def sparse_apply_adagrad_da_eager_fallback(var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step, use_locking, name, ctx): 

3663 raise RuntimeError("sparse_apply_adagrad_da op does not support eager execution. Arg 'out' is a ref.") 

3664 

3665def sparse_apply_adagrad_v2(var, accum, lr, epsilon, grad, indices, use_locking=False, update_slots=True, name=None): 

3666 r"""Update relevant entries in '*var' and '*accum' according to the adagrad scheme. 

3667 

3668 That is for rows we have grad for, we update var and accum as follows: 

3669 $$accum += grad * grad$$ 

3670 $$var -= lr * grad * (1 / sqrt(accum))$$ 

3671 

3672 Args: 

3673 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3674 Should be from a Variable(). 

3675 accum: A mutable `Tensor`. Must have the same type as `var`. 

3676 Should be from a Variable(). 

3677 lr: A `Tensor`. Must have the same type as `var`. 

3678 Learning rate. Must be a scalar. 

3679 epsilon: A `Tensor`. Must have the same type as `var`. 

3680 Constant factor. Must be a scalar. 

3681 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

3682 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3683 A vector of indices into the first dimension of var and accum. 

3684 use_locking: An optional `bool`. Defaults to `False`. 

3685 If `True`, updating of the var and accum tensors will be protected 

3686 by a lock; otherwise the behavior is undefined, but may exhibit less 

3687 contention. 

3688 update_slots: An optional `bool`. Defaults to `True`. 

3689 name: A name for the operation (optional). 

3690 

3691 Returns: 

3692 A mutable `Tensor`. Has the same type as `var`. 

3693 """ 

3694 _ctx = _context._context or _context.context() 

3695 tld = _ctx._thread_local_data 

3696 if tld.is_eager: 

3697 raise RuntimeError("sparse_apply_adagrad_v2 op does not support eager execution. Arg 'out' is a ref.") 

3698 # Add nodes to the TensorFlow graph. 

3699 if use_locking is None: 

3700 use_locking = False 

3701 use_locking = _execute.make_bool(use_locking, "use_locking") 

3702 if update_slots is None: 

3703 update_slots = True 

3704 update_slots = _execute.make_bool(update_slots, "update_slots") 

3705 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3706 "SparseApplyAdagradV2", var=var, accum=accum, lr=lr, epsilon=epsilon, 

3707 grad=grad, indices=indices, 

3708 use_locking=use_locking, 

3709 update_slots=update_slots, name=name) 

3710 _result = _outputs[:] 

3711 if _execute.must_record_gradient(): 

3712 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

3713 _op._get_attr_type("Tindices"), "use_locking", 

3714 _op._get_attr_bool("use_locking"), "update_slots", 

3715 _op._get_attr_bool("update_slots")) 

3716 _inputs_flat = _op.inputs 

3717 _execute.record_gradient( 

3718 "SparseApplyAdagradV2", _inputs_flat, _attrs, _result) 

3719 _result, = _result 

3720 return _result 

3721 

3722SparseApplyAdagradV2 = tf_export("raw_ops.SparseApplyAdagradV2")(_ops.to_raw_op(sparse_apply_adagrad_v2)) 

3723 

3724 

3725def sparse_apply_adagrad_v2_eager_fallback(var, accum, lr, epsilon, grad, indices, use_locking, update_slots, name, ctx): 

3726 raise RuntimeError("sparse_apply_adagrad_v2 op does not support eager execution. Arg 'out' is a ref.") 

3727 

3728def sparse_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking=False, name=None): 

3729 r"""Update '*var' according to the centered RMSProp algorithm. 

3730 

3731 The centered RMSProp algorithm uses an estimate of the centered second moment 

3732 (i.e., the variance) for normalization, as opposed to regular RMSProp, which 

3733 uses the (uncentered) second moment. This often helps with training, but is 

3734 slightly more expensive in terms of computation and memory. 

3735 

3736 Note that in dense implementation of this algorithm, mg, ms, and mom will 

3737 update even if the grad is zero, but in this sparse implementation, mg, ms, 

3738 and mom will not update in iterations during which the grad is zero. 

3739 

3740 mean_square = decay * mean_square + (1-decay) * gradient ** 2 

3741 mean_grad = decay * mean_grad + (1-decay) * gradient 

3742 Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) 

3743 

3744 $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ 

3745 $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ 

3746 $$var <- var - mom$$ 

3747 

3748 Args: 

3749 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3750 Should be from a Variable(). 

3751 mg: A mutable `Tensor`. Must have the same type as `var`. 

3752 Should be from a Variable(). 

3753 ms: A mutable `Tensor`. Must have the same type as `var`. 

3754 Should be from a Variable(). 

3755 mom: A mutable `Tensor`. Must have the same type as `var`. 

3756 Should be from a Variable(). 

3757 lr: A `Tensor`. Must have the same type as `var`. 

3758 Scaling factor. Must be a scalar. 

3759 rho: A `Tensor`. Must have the same type as `var`. 

3760 Decay rate. Must be a scalar. 

3761 momentum: A `Tensor`. Must have the same type as `var`. 

3762 epsilon: A `Tensor`. Must have the same type as `var`. 

3763 Ridge term. Must be a scalar. 

3764 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

3765 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3766 A vector of indices into the first dimension of var, ms and mom. 

3767 use_locking: An optional `bool`. Defaults to `False`. 

3768 If `True`, updating of the var, mg, ms, and mom tensors is 

3769 protected by a lock; otherwise the behavior is undefined, but may exhibit less 

3770 contention. 

3771 name: A name for the operation (optional). 

3772 

3773 Returns: 

3774 A mutable `Tensor`. Has the same type as `var`. 

3775 """ 

3776 _ctx = _context._context or _context.context() 

3777 tld = _ctx._thread_local_data 

3778 if tld.is_eager: 

3779 raise RuntimeError("sparse_apply_centered_rms_prop op does not support eager execution. Arg 'out' is a ref.") 

3780 # Add nodes to the TensorFlow graph. 

3781 if use_locking is None: 

3782 use_locking = False 

3783 use_locking = _execute.make_bool(use_locking, "use_locking") 

3784 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3785 "SparseApplyCenteredRMSProp", var=var, mg=mg, ms=ms, mom=mom, lr=lr, 

3786 rho=rho, momentum=momentum, 

3787 epsilon=epsilon, grad=grad, 

3788 indices=indices, 

3789 use_locking=use_locking, name=name) 

3790 _result = _outputs[:] 

3791 if _execute.must_record_gradient(): 

3792 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

3793 _op._get_attr_type("Tindices"), "use_locking", 

3794 _op._get_attr_bool("use_locking")) 

3795 _inputs_flat = _op.inputs 

3796 _execute.record_gradient( 

3797 "SparseApplyCenteredRMSProp", _inputs_flat, _attrs, _result) 

3798 _result, = _result 

3799 return _result 

3800 

3801SparseApplyCenteredRMSProp = tf_export("raw_ops.SparseApplyCenteredRMSProp")(_ops.to_raw_op(sparse_apply_centered_rms_prop)) 

3802 

3803 

3804def sparse_apply_centered_rms_prop_eager_fallback(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking, name, ctx): 

3805 raise RuntimeError("sparse_apply_centered_rms_prop op does not support eager execution. Arg 'out' is a ref.") 

3806 

3807def sparse_apply_ftrl(var, accum, linear, grad, indices, lr, l1, l2, lr_power, use_locking=False, multiply_linear_by_lr=False, name=None): 

3808 r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme. 

3809 

3810 That is for rows we have grad for, we update var, accum and linear as follows: 

3811 $$accum_new = accum + grad * grad$$ 

3812 $$linear += grad + (accum_{new}^{-lr_{power}} - accum^{-lr_{power}} / lr * var$$ 

3813 $$quadratic = 1.0 / (accum_{new}^{lr_{power}} * lr) + 2 * l2$$ 

3814 $$var = (sign(linear) * l1 - linear) / quadratic\ if\ |linear| > l1\ else\ 0.0$$ 

3815 $$accum = accum_{new}$$ 

3816 

3817 Args: 

3818 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3819 Should be from a Variable(). 

3820 accum: A mutable `Tensor`. Must have the same type as `var`. 

3821 Should be from a Variable(). 

3822 linear: A mutable `Tensor`. Must have the same type as `var`. 

3823 Should be from a Variable(). 

3824 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

3825 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3826 A vector of indices into the first dimension of var and accum. 

3827 lr: A `Tensor`. Must have the same type as `var`. 

3828 Scaling factor. Must be a scalar. 

3829 l1: A `Tensor`. Must have the same type as `var`. 

3830 L1 regularization. Must be a scalar. 

3831 l2: A `Tensor`. Must have the same type as `var`. 

3832 L2 regularization. Must be a scalar. 

3833 lr_power: A `Tensor`. Must have the same type as `var`. 

3834 Scaling factor. Must be a scalar. 

3835 use_locking: An optional `bool`. Defaults to `False`. 

3836 If `True`, updating of the var and accum tensors will be protected 

3837 by a lock; otherwise the behavior is undefined, but may exhibit less 

3838 contention. 

3839 multiply_linear_by_lr: An optional `bool`. Defaults to `False`. 

3840 name: A name for the operation (optional). 

3841 

3842 Returns: 

3843 A mutable `Tensor`. Has the same type as `var`. 

3844 """ 

3845 _ctx = _context._context or _context.context() 

3846 tld = _ctx._thread_local_data 

3847 if tld.is_eager: 

3848 raise RuntimeError("sparse_apply_ftrl op does not support eager execution. Arg 'out' is a ref.") 

3849 # Add nodes to the TensorFlow graph. 

3850 if use_locking is None: 

3851 use_locking = False 

3852 use_locking = _execute.make_bool(use_locking, "use_locking") 

3853 if multiply_linear_by_lr is None: 

3854 multiply_linear_by_lr = False 

3855 multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") 

3856 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3857 "SparseApplyFtrl", var=var, accum=accum, linear=linear, grad=grad, 

3858 indices=indices, lr=lr, l1=l1, l2=l2, 

3859 lr_power=lr_power, use_locking=use_locking, 

3860 multiply_linear_by_lr=multiply_linear_by_lr, 

3861 name=name) 

3862 _result = _outputs[:] 

3863 if _execute.must_record_gradient(): 

3864 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

3865 _op._get_attr_type("Tindices"), "use_locking", 

3866 _op._get_attr_bool("use_locking"), "multiply_linear_by_lr", 

3867 _op._get_attr_bool("multiply_linear_by_lr")) 

3868 _inputs_flat = _op.inputs 

3869 _execute.record_gradient( 

3870 "SparseApplyFtrl", _inputs_flat, _attrs, _result) 

3871 _result, = _result 

3872 return _result 

3873 

3874SparseApplyFtrl = tf_export("raw_ops.SparseApplyFtrl")(_ops.to_raw_op(sparse_apply_ftrl)) 

3875 

3876 

3877def sparse_apply_ftrl_eager_fallback(var, accum, linear, grad, indices, lr, l1, l2, lr_power, use_locking, multiply_linear_by_lr, name, ctx): 

3878 raise RuntimeError("sparse_apply_ftrl op does not support eager execution. Arg 'out' is a ref.") 

3879 

3880def sparse_apply_ftrl_v2(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False, multiply_linear_by_lr=False, name=None): 

3881 r"""Update relevant entries in '*var' according to the Ftrl-proximal scheme. 

3882 

3883 That is for rows we have grad for, we update var, accum and linear as follows: 

3884 grad_with_shrinkage = grad + 2 * l2_shrinkage * var 

3885 accum_new = accum + grad * grad 

3886 linear += grad_with_shrinkage - 

3887 (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var 

3888 quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 

3889 var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 

3890 accum = accum_new 

3891 

3892 Args: 

3893 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3894 Should be from a Variable(). 

3895 accum: A mutable `Tensor`. Must have the same type as `var`. 

3896 Should be from a Variable(). 

3897 linear: A mutable `Tensor`. Must have the same type as `var`. 

3898 Should be from a Variable(). 

3899 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

3900 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3901 A vector of indices into the first dimension of var and accum. 

3902 lr: A `Tensor`. Must have the same type as `var`. 

3903 Scaling factor. Must be a scalar. 

3904 l1: A `Tensor`. Must have the same type as `var`. 

3905 L1 regularization. Must be a scalar. 

3906 l2: A `Tensor`. Must have the same type as `var`. 

3907 L2 shrinkage regularization. Must be a scalar. 

3908 l2_shrinkage: A `Tensor`. Must have the same type as `var`. 

3909 lr_power: A `Tensor`. Must have the same type as `var`. 

3910 Scaling factor. Must be a scalar. 

3911 use_locking: An optional `bool`. Defaults to `False`. 

3912 If `True`, updating of the var and accum tensors will be protected 

3913 by a lock; otherwise the behavior is undefined, but may exhibit less 

3914 contention. 

3915 multiply_linear_by_lr: An optional `bool`. Defaults to `False`. 

3916 name: A name for the operation (optional). 

3917 

3918 Returns: 

3919 A mutable `Tensor`. Has the same type as `var`. 

3920 """ 

3921 _ctx = _context._context or _context.context() 

3922 tld = _ctx._thread_local_data 

3923 if tld.is_eager: 

3924 raise RuntimeError("sparse_apply_ftrl_v2 op does not support eager execution. Arg 'out' is a ref.") 

3925 # Add nodes to the TensorFlow graph. 

3926 if use_locking is None: 

3927 use_locking = False 

3928 use_locking = _execute.make_bool(use_locking, "use_locking") 

3929 if multiply_linear_by_lr is None: 

3930 multiply_linear_by_lr = False 

3931 multiply_linear_by_lr = _execute.make_bool(multiply_linear_by_lr, "multiply_linear_by_lr") 

3932 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3933 "SparseApplyFtrlV2", var=var, accum=accum, linear=linear, grad=grad, 

3934 indices=indices, lr=lr, l1=l1, l2=l2, 

3935 l2_shrinkage=l2_shrinkage, lr_power=lr_power, 

3936 use_locking=use_locking, 

3937 multiply_linear_by_lr=multiply_linear_by_lr, 

3938 name=name) 

3939 _result = _outputs[:] 

3940 if _execute.must_record_gradient(): 

3941 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

3942 _op._get_attr_type("Tindices"), "use_locking", 

3943 _op._get_attr_bool("use_locking"), "multiply_linear_by_lr", 

3944 _op._get_attr_bool("multiply_linear_by_lr")) 

3945 _inputs_flat = _op.inputs 

3946 _execute.record_gradient( 

3947 "SparseApplyFtrlV2", _inputs_flat, _attrs, _result) 

3948 _result, = _result 

3949 return _result 

3950 

3951SparseApplyFtrlV2 = tf_export("raw_ops.SparseApplyFtrlV2")(_ops.to_raw_op(sparse_apply_ftrl_v2)) 

3952 

3953 

3954def sparse_apply_ftrl_v2_eager_fallback(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power, use_locking, multiply_linear_by_lr, name, ctx): 

3955 raise RuntimeError("sparse_apply_ftrl_v2 op does not support eager execution. Arg 'out' is a ref.") 

3956 

3957def sparse_apply_momentum(var, accum, lr, grad, indices, momentum, use_locking=False, use_nesterov=False, name=None): 

3958 r"""Update relevant entries in '*var' and '*accum' according to the momentum scheme. 

3959 

3960 Set use_nesterov = True if you want to use Nesterov momentum. 

3961 

3962 That is for rows we have grad for, we update var and accum as follows: 

3963 

3964 $$accum = accum * momentum + grad$$ 

3965 $$var -= lr * accum$$ 

3966 

3967 Args: 

3968 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

3969 Should be from a Variable(). 

3970 accum: A mutable `Tensor`. Must have the same type as `var`. 

3971 Should be from a Variable(). 

3972 lr: A `Tensor`. Must have the same type as `var`. 

3973 Learning rate. Must be a scalar. 

3974 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

3975 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

3976 A vector of indices into the first dimension of var and accum. 

3977 momentum: A `Tensor`. Must have the same type as `var`. 

3978 Momentum. Must be a scalar. 

3979 use_locking: An optional `bool`. Defaults to `False`. 

3980 If `True`, updating of the var and accum tensors will be protected 

3981 by a lock; otherwise the behavior is undefined, but may exhibit less 

3982 contention. 

3983 use_nesterov: An optional `bool`. Defaults to `False`. 

3984 If `True`, the tensor passed to compute grad will be 

3985 var - lr * momentum * accum, so in the end, the var you get is actually 

3986 var - lr * momentum * accum. 

3987 name: A name for the operation (optional). 

3988 

3989 Returns: 

3990 A mutable `Tensor`. Has the same type as `var`. 

3991 """ 

3992 _ctx = _context._context or _context.context() 

3993 tld = _ctx._thread_local_data 

3994 if tld.is_eager: 

3995 raise RuntimeError("sparse_apply_momentum op does not support eager execution. Arg 'out' is a ref.") 

3996 # Add nodes to the TensorFlow graph. 

3997 if use_locking is None: 

3998 use_locking = False 

3999 use_locking = _execute.make_bool(use_locking, "use_locking") 

4000 if use_nesterov is None: 

4001 use_nesterov = False 

4002 use_nesterov = _execute.make_bool(use_nesterov, "use_nesterov") 

4003 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4004 "SparseApplyMomentum", var=var, accum=accum, lr=lr, grad=grad, 

4005 indices=indices, momentum=momentum, 

4006 use_locking=use_locking, 

4007 use_nesterov=use_nesterov, name=name) 

4008 _result = _outputs[:] 

4009 if _execute.must_record_gradient(): 

4010 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

4011 _op._get_attr_type("Tindices"), "use_locking", 

4012 _op._get_attr_bool("use_locking"), "use_nesterov", 

4013 _op._get_attr_bool("use_nesterov")) 

4014 _inputs_flat = _op.inputs 

4015 _execute.record_gradient( 

4016 "SparseApplyMomentum", _inputs_flat, _attrs, _result) 

4017 _result, = _result 

4018 return _result 

4019 

4020SparseApplyMomentum = tf_export("raw_ops.SparseApplyMomentum")(_ops.to_raw_op(sparse_apply_momentum)) 

4021 

4022 

4023def sparse_apply_momentum_eager_fallback(var, accum, lr, grad, indices, momentum, use_locking, use_nesterov, name, ctx): 

4024 raise RuntimeError("sparse_apply_momentum op does not support eager execution. Arg 'out' is a ref.") 

4025 

4026def sparse_apply_proximal_adagrad(var, accum, lr, l1, l2, grad, indices, use_locking=False, name=None): 

4027 r"""Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. 

4028 

4029 That is for rows we have grad for, we update var and accum as follows: 

4030 $$accum += grad * grad$$ 

4031 $$prox_v = var$$ 

4032 $$prox_v -= lr * grad * (1 / sqrt(accum))$$ 

4033 $$var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}$$ 

4034 

4035 Args: 

4036 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

4037 Should be from a Variable(). 

4038 accum: A mutable `Tensor`. Must have the same type as `var`. 

4039 Should be from a Variable(). 

4040 lr: A `Tensor`. Must have the same type as `var`. 

4041 Learning rate. Must be a scalar. 

4042 l1: A `Tensor`. Must have the same type as `var`. 

4043 L1 regularization. Must be a scalar. 

4044 l2: A `Tensor`. Must have the same type as `var`. 

4045 L2 regularization. Must be a scalar. 

4046 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

4047 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

4048 A vector of indices into the first dimension of var and accum. 

4049 use_locking: An optional `bool`. Defaults to `False`. 

4050 If True, updating of the var and accum tensors will be protected by 

4051 a lock; otherwise the behavior is undefined, but may exhibit less contention. 

4052 name: A name for the operation (optional). 

4053 

4054 Returns: 

4055 A mutable `Tensor`. Has the same type as `var`. 

4056 """ 

4057 _ctx = _context._context or _context.context() 

4058 tld = _ctx._thread_local_data 

4059 if tld.is_eager: 

4060 raise RuntimeError("sparse_apply_proximal_adagrad op does not support eager execution. Arg 'out' is a ref.") 

4061 # Add nodes to the TensorFlow graph. 

4062 if use_locking is None: 

4063 use_locking = False 

4064 use_locking = _execute.make_bool(use_locking, "use_locking") 

4065 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4066 "SparseApplyProximalAdagrad", var=var, accum=accum, lr=lr, l1=l1, 

4067 l2=l2, grad=grad, indices=indices, 

4068 use_locking=use_locking, name=name) 

4069 _result = _outputs[:] 

4070 if _execute.must_record_gradient(): 

4071 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

4072 _op._get_attr_type("Tindices"), "use_locking", 

4073 _op._get_attr_bool("use_locking")) 

4074 _inputs_flat = _op.inputs 

4075 _execute.record_gradient( 

4076 "SparseApplyProximalAdagrad", _inputs_flat, _attrs, _result) 

4077 _result, = _result 

4078 return _result 

4079 

4080SparseApplyProximalAdagrad = tf_export("raw_ops.SparseApplyProximalAdagrad")(_ops.to_raw_op(sparse_apply_proximal_adagrad)) 

4081 

4082 

4083def sparse_apply_proximal_adagrad_eager_fallback(var, accum, lr, l1, l2, grad, indices, use_locking, name, ctx): 

4084 raise RuntimeError("sparse_apply_proximal_adagrad op does not support eager execution. Arg 'out' is a ref.") 

4085 

4086def sparse_apply_proximal_gradient_descent(var, alpha, l1, l2, grad, indices, use_locking=False, name=None): 

4087 r"""Sparse update '*var' as FOBOS algorithm with fixed learning rate. 

4088 

4089 That is for rows we have grad for, we update var as follows: 

4090 $$prox_v = var - alpha * grad$$ 

4091 $$var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}$$ 

4092 

4093 Args: 

4094 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

4095 Should be from a Variable(). 

4096 alpha: A `Tensor`. Must have the same type as `var`. 

4097 Scaling factor. Must be a scalar. 

4098 l1: A `Tensor`. Must have the same type as `var`. 

4099 L1 regularization. Must be a scalar. 

4100 l2: A `Tensor`. Must have the same type as `var`. 

4101 L2 regularization. Must be a scalar. 

4102 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

4103 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

4104 A vector of indices into the first dimension of var and accum. 

4105 use_locking: An optional `bool`. Defaults to `False`. 

4106 If True, the subtraction will be protected by a lock; 

4107 otherwise the behavior is undefined, but may exhibit less contention. 

4108 name: A name for the operation (optional). 

4109 

4110 Returns: 

4111 A mutable `Tensor`. Has the same type as `var`. 

4112 """ 

4113 _ctx = _context._context or _context.context() 

4114 tld = _ctx._thread_local_data 

4115 if tld.is_eager: 

4116 raise RuntimeError("sparse_apply_proximal_gradient_descent op does not support eager execution. Arg 'out' is a ref.") 

4117 # Add nodes to the TensorFlow graph. 

4118 if use_locking is None: 

4119 use_locking = False 

4120 use_locking = _execute.make_bool(use_locking, "use_locking") 

4121 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4122 "SparseApplyProximalGradientDescent", var=var, alpha=alpha, l1=l1, 

4123 l2=l2, grad=grad, 

4124 indices=indices, 

4125 use_locking=use_locking, 

4126 name=name) 

4127 _result = _outputs[:] 

4128 if _execute.must_record_gradient(): 

4129 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

4130 _op._get_attr_type("Tindices"), "use_locking", 

4131 _op._get_attr_bool("use_locking")) 

4132 _inputs_flat = _op.inputs 

4133 _execute.record_gradient( 

4134 "SparseApplyProximalGradientDescent", _inputs_flat, _attrs, _result) 

4135 _result, = _result 

4136 return _result 

4137 

4138SparseApplyProximalGradientDescent = tf_export("raw_ops.SparseApplyProximalGradientDescent")(_ops.to_raw_op(sparse_apply_proximal_gradient_descent)) 

4139 

4140 

4141def sparse_apply_proximal_gradient_descent_eager_fallback(var, alpha, l1, l2, grad, indices, use_locking, name, ctx): 

4142 raise RuntimeError("sparse_apply_proximal_gradient_descent op does not support eager execution. Arg 'out' is a ref.") 

4143 

4144def sparse_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking=False, name=None): 

4145 r"""Update '*var' according to the RMSProp algorithm. 

4146 

4147 Note that in dense implementation of this algorithm, ms and mom will 

4148 update even if the grad is zero, but in this sparse implementation, ms 

4149 and mom will not update in iterations during which the grad is zero. 

4150 

4151 mean_square = decay * mean_square + (1-decay) * gradient ** 2 

4152 Delta = learning_rate * gradient / sqrt(mean_square + epsilon) 

4153 

4154 $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ 

4155 $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ 

4156 $$var <- var - mom$$ 

4157 

4158 Args: 

4159 var: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

4160 Should be from a Variable(). 

4161 ms: A mutable `Tensor`. Must have the same type as `var`. 

4162 Should be from a Variable(). 

4163 mom: A mutable `Tensor`. Must have the same type as `var`. 

4164 Should be from a Variable(). 

4165 lr: A `Tensor`. Must have the same type as `var`. 

4166 Scaling factor. Must be a scalar. 

4167 rho: A `Tensor`. Must have the same type as `var`. 

4168 Decay rate. Must be a scalar. 

4169 momentum: A `Tensor`. Must have the same type as `var`. 

4170 epsilon: A `Tensor`. Must have the same type as `var`. 

4171 Ridge term. Must be a scalar. 

4172 grad: A `Tensor`. Must have the same type as `var`. The gradient. 

4173 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

4174 A vector of indices into the first dimension of var, ms and mom. 

4175 use_locking: An optional `bool`. Defaults to `False`. 

4176 If `True`, updating of the var, ms, and mom tensors is protected 

4177 by a lock; otherwise the behavior is undefined, but may exhibit less 

4178 contention. 

4179 name: A name for the operation (optional). 

4180 

4181 Returns: 

4182 A mutable `Tensor`. Has the same type as `var`. 

4183 """ 

4184 _ctx = _context._context or _context.context() 

4185 tld = _ctx._thread_local_data 

4186 if tld.is_eager: 

4187 raise RuntimeError("sparse_apply_rms_prop op does not support eager execution. Arg 'out' is a ref.") 

4188 # Add nodes to the TensorFlow graph. 

4189 if use_locking is None: 

4190 use_locking = False 

4191 use_locking = _execute.make_bool(use_locking, "use_locking") 

4192 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4193 "SparseApplyRMSProp", var=var, ms=ms, mom=mom, lr=lr, rho=rho, 

4194 momentum=momentum, epsilon=epsilon, grad=grad, 

4195 indices=indices, use_locking=use_locking, 

4196 name=name) 

4197 _result = _outputs[:] 

4198 if _execute.must_record_gradient(): 

4199 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

4200 _op._get_attr_type("Tindices"), "use_locking", 

4201 _op._get_attr_bool("use_locking")) 

4202 _inputs_flat = _op.inputs 

4203 _execute.record_gradient( 

4204 "SparseApplyRMSProp", _inputs_flat, _attrs, _result) 

4205 _result, = _result 

4206 return _result 

4207 

4208SparseApplyRMSProp = tf_export("raw_ops.SparseApplyRMSProp")(_ops.to_raw_op(sparse_apply_rms_prop)) 

4209 

4210 

4211def sparse_apply_rms_prop_eager_fallback(var, ms, mom, lr, rho, momentum, epsilon, grad, indices, use_locking, name, ctx): 

4212 raise RuntimeError("sparse_apply_rms_prop op does not support eager execution. Arg 'out' is a ref.")