Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_state_ops.py: 15%

623 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1"""Python wrappers around TensorFlow ops. 

2 

3This file is MACHINE GENERATED! Do not edit. 

4""" 

5 

6import collections 

7 

8from tensorflow.python import pywrap_tfe as pywrap_tfe 

9from tensorflow.python.eager import context as _context 

10from tensorflow.python.eager import core as _core 

11from tensorflow.python.eager import execute as _execute 

12from tensorflow.python.framework import dtypes as _dtypes 

13from tensorflow.security.fuzzing.py import annotation_types as _atypes 

14 

15from tensorflow.python.framework import op_def_registry as _op_def_registry 

16from tensorflow.python.framework import ops as _ops 

17from tensorflow.python.framework import op_def_library as _op_def_library 

18from tensorflow.python.util.deprecation import deprecated_endpoints 

19from tensorflow.python.util import dispatch as _dispatch 

20from tensorflow.python.util.tf_export import tf_export 

21 

22from typing import TypeVar 

23 

24def assign(ref, value, validate_shape=True, use_locking=True, name=None): 

25 r"""Update 'ref' by assigning 'value' to it. 

26 

27 This operation outputs "ref" after the assignment is done. 

28 This makes it easier to chain operations that need to use the reset value. 

29 

30 Args: 

31 ref: A mutable `Tensor`. 

32 Should be from a `Variable` node. May be uninitialized. 

33 value: A `Tensor`. Must have the same type as `ref`. 

34 The value to be assigned to the variable. 

35 validate_shape: An optional `bool`. Defaults to `True`. 

36 If true, the operation will validate that the shape 

37 of 'value' matches the shape of the Tensor being assigned to. If false, 

38 'ref' will take on the shape of 'value'. 

39 use_locking: An optional `bool`. Defaults to `True`. 

40 If True, the assignment will be protected by a lock; 

41 otherwise the behavior is undefined, but may exhibit less contention. 

42 name: A name for the operation (optional). 

43 

44 Returns: 

45 A mutable `Tensor`. Has the same type as `ref`. 

46 """ 

47 _ctx = _context._context or _context.context() 

48 tld = _ctx._thread_local_data 

49 if tld.is_eager: 

50 raise RuntimeError("assign op does not support eager execution. Arg 'output_ref' is a ref.") 

51 # Add nodes to the TensorFlow graph. 

52 if validate_shape is None: 

53 validate_shape = True 

54 validate_shape = _execute.make_bool(validate_shape, "validate_shape") 

55 if use_locking is None: 

56 use_locking = True 

57 use_locking = _execute.make_bool(use_locking, "use_locking") 

58 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

59 "Assign", ref=ref, value=value, validate_shape=validate_shape, 

60 use_locking=use_locking, name=name) 

61 _result = _outputs[:] 

62 if _execute.must_record_gradient(): 

63 _attrs = ("T", _op._get_attr_type("T"), "validate_shape", 

64 _op._get_attr_bool("validate_shape"), "use_locking", 

65 _op._get_attr_bool("use_locking")) 

66 _inputs_flat = _op.inputs 

67 _execute.record_gradient( 

68 "Assign", _inputs_flat, _attrs, _result) 

69 _result, = _result 

70 return _result 

71 

72Assign = tf_export("raw_ops.Assign")(_ops.to_raw_op(assign)) 

73 

74 

75def assign_eager_fallback(ref, value, validate_shape, use_locking, name, ctx): 

76 raise RuntimeError("assign op does not support eager execution. Arg 'output_ref' is a ref.") 

77 

78def assign_add(ref, value, use_locking=False, name=None): 

79 r"""Update 'ref' by adding 'value' to it. 

80 

81 This operation outputs "ref" after the update is done. 

82 This makes it easier to chain operations that need to use the reset value. 

83 

84 Args: 

85 ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

86 Should be from a `Variable` node. 

87 value: A `Tensor`. Must have the same type as `ref`. 

88 The value to be added to the variable. 

89 use_locking: An optional `bool`. Defaults to `False`. 

90 If True, the addition will be protected by a lock; 

91 otherwise the behavior is undefined, but may exhibit less contention. 

92 name: A name for the operation (optional). 

93 

94 Returns: 

95 A mutable `Tensor`. Has the same type as `ref`. 

96 """ 

97 _ctx = _context._context or _context.context() 

98 tld = _ctx._thread_local_data 

99 if tld.is_eager: 

100 raise RuntimeError("assign_add op does not support eager execution. Arg 'output_ref' is a ref.") 

101 # Add nodes to the TensorFlow graph. 

102 if use_locking is None: 

103 use_locking = False 

104 use_locking = _execute.make_bool(use_locking, "use_locking") 

105 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

106 "AssignAdd", ref=ref, value=value, use_locking=use_locking, name=name) 

107 _result = _outputs[:] 

108 if _execute.must_record_gradient(): 

109 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

110 _op._get_attr_bool("use_locking")) 

111 _inputs_flat = _op.inputs 

112 _execute.record_gradient( 

113 "AssignAdd", _inputs_flat, _attrs, _result) 

114 _result, = _result 

115 return _result 

116 

117AssignAdd = tf_export("raw_ops.AssignAdd")(_ops.to_raw_op(assign_add)) 

118 

119 

120def assign_add_eager_fallback(ref, value, use_locking, name, ctx): 

121 raise RuntimeError("assign_add op does not support eager execution. Arg 'output_ref' is a ref.") 

122 

123def assign_sub(ref, value, use_locking=False, name=None): 

124 r"""Update 'ref' by subtracting 'value' from it. 

125 

126 This operation outputs "ref" after the update is done. 

127 This makes it easier to chain operations that need to use the reset value. 

128 

129 Args: 

130 ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

131 Should be from a `Variable` node. 

132 value: A `Tensor`. Must have the same type as `ref`. 

133 The value to be subtracted to the variable. 

134 use_locking: An optional `bool`. Defaults to `False`. 

135 If True, the subtraction will be protected by a lock; 

136 otherwise the behavior is undefined, but may exhibit less contention. 

137 name: A name for the operation (optional). 

138 

139 Returns: 

140 A mutable `Tensor`. Has the same type as `ref`. 

141 """ 

142 _ctx = _context._context or _context.context() 

143 tld = _ctx._thread_local_data 

144 if tld.is_eager: 

145 raise RuntimeError("assign_sub op does not support eager execution. Arg 'output_ref' is a ref.") 

146 # Add nodes to the TensorFlow graph. 

147 if use_locking is None: 

148 use_locking = False 

149 use_locking = _execute.make_bool(use_locking, "use_locking") 

150 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

151 "AssignSub", ref=ref, value=value, use_locking=use_locking, name=name) 

152 _result = _outputs[:] 

153 if _execute.must_record_gradient(): 

154 _attrs = ("T", _op._get_attr_type("T"), "use_locking", 

155 _op._get_attr_bool("use_locking")) 

156 _inputs_flat = _op.inputs 

157 _execute.record_gradient( 

158 "AssignSub", _inputs_flat, _attrs, _result) 

159 _result, = _result 

160 return _result 

161 

162AssignSub = tf_export("raw_ops.AssignSub")(_ops.to_raw_op(assign_sub)) 

163 

164 

165def assign_sub_eager_fallback(ref, value, use_locking, name, ctx): 

166 raise RuntimeError("assign_sub op does not support eager execution. Arg 'output_ref' is a ref.") 

167 

168def count_up_to(ref, limit, name=None): 

169 r"""Increments 'ref' until it reaches 'limit'. 

170 

171 Args: 

172 ref: A mutable `Tensor`. Must be one of the following types: `int32`, `int64`. 

173 Should be from a scalar `Variable` node. 

174 limit: An `int`. 

175 If incrementing ref would bring it above limit, instead generates an 

176 'OutOfRange' error. 

177 name: A name for the operation (optional). 

178 

179 Returns: 

180 A `Tensor`. Has the same type as `ref`. 

181 """ 

182 _ctx = _context._context or _context.context() 

183 tld = _ctx._thread_local_data 

184 if tld.is_eager: 

185 raise RuntimeError("count_up_to op does not support eager execution. Arg 'ref' is a ref.") 

186 # Add nodes to the TensorFlow graph. 

187 limit = _execute.make_int(limit, "limit") 

188 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

189 "CountUpTo", ref=ref, limit=limit, name=name) 

190 _result = _outputs[:] 

191 if _execute.must_record_gradient(): 

192 _attrs = ("limit", _op._get_attr_int("limit"), "T", 

193 _op._get_attr_type("T")) 

194 _inputs_flat = _op.inputs 

195 _execute.record_gradient( 

196 "CountUpTo", _inputs_flat, _attrs, _result) 

197 _result, = _result 

198 return _result 

199 

200CountUpTo = tf_export("raw_ops.CountUpTo")(_ops.to_raw_op(count_up_to)) 

201 

202 

203def count_up_to_eager_fallback(ref, limit, name, ctx): 

204 raise RuntimeError("count_up_to op does not support eager execution. Arg 'ref' is a ref.") 

205 

206def destroy_temporary_variable(ref, var_name, name=None): 

207 r"""Destroys the temporary variable and returns its final value. 

208 

209 Sets output to the value of the Tensor pointed to by 'ref', then destroys 

210 the temporary variable called 'var_name'. 

211 All other uses of 'ref' *must* have executed before this op. 

212 This is typically achieved by chaining the ref through each assign op, or by 

213 using control dependencies. 

214 

215 Outputs the final value of the tensor pointed to by 'ref'. 

216 

217 Args: 

218 ref: A mutable `Tensor`. A reference to the temporary variable tensor. 

219 var_name: A `string`. 

220 Name of the temporary variable, usually the name of the matching 

221 'TemporaryVariable' op. 

222 name: A name for the operation (optional). 

223 

224 Returns: 

225 A `Tensor`. Has the same type as `ref`. 

226 """ 

227 _ctx = _context._context or _context.context() 

228 tld = _ctx._thread_local_data 

229 if tld.is_eager: 

230 raise RuntimeError("destroy_temporary_variable op does not support eager execution. Arg 'ref' is a ref.") 

231 # Add nodes to the TensorFlow graph. 

232 var_name = _execute.make_str(var_name, "var_name") 

233 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

234 "DestroyTemporaryVariable", ref=ref, var_name=var_name, name=name) 

235 _result = _outputs[:] 

236 if _execute.must_record_gradient(): 

237 _attrs = ("T", _op._get_attr_type("T"), "var_name", 

238 _op.get_attr("var_name")) 

239 _inputs_flat = _op.inputs 

240 _execute.record_gradient( 

241 "DestroyTemporaryVariable", _inputs_flat, _attrs, _result) 

242 _result, = _result 

243 return _result 

244 

245DestroyTemporaryVariable = tf_export("raw_ops.DestroyTemporaryVariable")(_ops.to_raw_op(destroy_temporary_variable)) 

246 

247 

248def destroy_temporary_variable_eager_fallback(ref, var_name, name, ctx): 

249 raise RuntimeError("destroy_temporary_variable op does not support eager execution. Arg 'ref' is a ref.") 

250 

251def is_variable_initialized(ref, name=None): 

252 r"""Checks whether a tensor has been initialized. 

253 

254 Outputs boolean scalar indicating whether the tensor has been initialized. 

255 

256 Args: 

257 ref: A mutable `Tensor`. 

258 Should be from a `Variable` node. May be uninitialized. 

259 name: A name for the operation (optional). 

260 

261 Returns: 

262 A `Tensor` of type `bool`. 

263 """ 

264 _ctx = _context._context or _context.context() 

265 tld = _ctx._thread_local_data 

266 if tld.is_eager: 

267 raise RuntimeError("is_variable_initialized op does not support eager execution. Arg 'ref' is a ref.") 

268 # Add nodes to the TensorFlow graph. 

269 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

270 "IsVariableInitialized", ref=ref, name=name) 

271 _result = _outputs[:] 

272 if _execute.must_record_gradient(): 

273 _attrs = ("dtype", _op._get_attr_type("dtype")) 

274 _inputs_flat = _op.inputs 

275 _execute.record_gradient( 

276 "IsVariableInitialized", _inputs_flat, _attrs, _result) 

277 _result, = _result 

278 return _result 

279 

280IsVariableInitialized = tf_export("raw_ops.IsVariableInitialized")(_ops.to_raw_op(is_variable_initialized)) 

281 

282 

283def is_variable_initialized_eager_fallback(ref, name, ctx): 

284 raise RuntimeError("is_variable_initialized op does not support eager execution. Arg 'ref' is a ref.") 

285 

286def resource_count_up_to(resource, limit, T, name=None): 

287 r"""Increments variable pointed to by 'resource' until it reaches 'limit'. 

288 

289 Args: 

290 resource: A `Tensor` of type `resource`. 

291 Should be from a scalar `Variable` node. 

292 limit: An `int`. 

293 If incrementing ref would bring it above limit, instead generates an 

294 'OutOfRange' error. 

295 T: A `tf.DType` from: `tf.int32, tf.int64`. 

296 name: A name for the operation (optional). 

297 

298 Returns: 

299 A `Tensor` of type `T`. 

300 """ 

301 _ctx = _context._context or _context.context() 

302 tld = _ctx._thread_local_data 

303 if tld.is_eager: 

304 try: 

305 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

306 _ctx, "ResourceCountUpTo", name, resource, "limit", limit, "T", T) 

307 return _result 

308 except _core._NotOkStatusException as e: 

309 _ops.raise_from_not_ok_status(e, name) 

310 except _core._FallbackException: 

311 pass 

312 try: 

313 return resource_count_up_to_eager_fallback( 

314 resource, limit=limit, T=T, name=name, ctx=_ctx) 

315 except _core._SymbolicException: 

316 pass # Add nodes to the TensorFlow graph. 

317 # Add nodes to the TensorFlow graph. 

318 limit = _execute.make_int(limit, "limit") 

319 T = _execute.make_type(T, "T") 

320 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

321 "ResourceCountUpTo", resource=resource, limit=limit, T=T, name=name) 

322 _result = _outputs[:] 

323 if _execute.must_record_gradient(): 

324 _attrs = ("limit", _op._get_attr_int("limit"), "T", 

325 _op._get_attr_type("T")) 

326 _inputs_flat = _op.inputs 

327 _execute.record_gradient( 

328 "ResourceCountUpTo", _inputs_flat, _attrs, _result) 

329 _result, = _result 

330 return _result 

331 

332ResourceCountUpTo = tf_export("raw_ops.ResourceCountUpTo")(_ops.to_raw_op(resource_count_up_to)) 

333 

334 

335def resource_count_up_to_eager_fallback(resource, limit, T, name, ctx): 

336 limit = _execute.make_int(limit, "limit") 

337 T = _execute.make_type(T, "T") 

338 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

339 _inputs_flat = [resource] 

340 _attrs = ("limit", limit, "T", T) 

341 _result = _execute.execute(b"ResourceCountUpTo", 1, inputs=_inputs_flat, 

342 attrs=_attrs, ctx=ctx, name=name) 

343 if _execute.must_record_gradient(): 

344 _execute.record_gradient( 

345 "ResourceCountUpTo", _inputs_flat, _attrs, _result) 

346 _result, = _result 

347 return _result 

348 

349 

350def resource_scatter_nd_add(ref, indices, updates, use_locking=True, name=None): 

351 r"""Applies sparse addition to individual values or slices in a Variable. 

352 

353 `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. 

354 

355 `indices` must be integer tensor, containing indices into `ref`. 

356 It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. 

357 

358 The innermost dimension of `indices` (with length `K`) corresponds to 

359 indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th 

360 dimension of `ref`. 

361 

362 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 

363 

364 ``` 

365 [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] 

366 ``` 

367 

368 For example, say we want to add 4 scattered elements to a rank-1 tensor to 

369 8 elements. In Python, that addition would look like this: 

370 

371 ```python 

372 ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) 

373 indices = tf.constant([[4], [3], [1], [7]]) 

374 updates = tf.constant([9, 10, 11, 12]) 

375 add = tf.scatter_nd_add(ref, indices, updates) 

376 with tf.Session() as sess: 

377 print sess.run(add) 

378 ``` 

379 

380 The resulting update to ref would look like this: 

381 

382 [1, 13, 3, 14, 14, 6, 7, 20] 

383 

384 See `tf.scatter_nd` for more details about how to make updates to 

385 slices. 

386 

387 Args: 

388 ref: A `Tensor` of type `resource`. 

389 A resource handle. Must be from a VarHandleOp. 

390 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

391 A Tensor. Must be one of the following types: int32, int64. 

392 A tensor of indices into ref. 

393 updates: A `Tensor`. A Tensor. Must have the same type as ref. A tensor of 

394 values to add to ref. 

395 use_locking: An optional `bool`. Defaults to `True`. 

396 An optional bool. Defaults to True. If True, the assignment will 

397 be protected by a lock; otherwise the behavior is undefined, 

398 but may exhibit less contention. 

399 name: A name for the operation (optional). 

400 

401 Returns: 

402 The created Operation. 

403 """ 

404 _ctx = _context._context or _context.context() 

405 tld = _ctx._thread_local_data 

406 if tld.is_eager: 

407 try: 

408 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

409 _ctx, "ResourceScatterNdAdd", name, ref, indices, updates, 

410 "use_locking", use_locking) 

411 return _result 

412 except _core._NotOkStatusException as e: 

413 _ops.raise_from_not_ok_status(e, name) 

414 except _core._FallbackException: 

415 pass 

416 try: 

417 return resource_scatter_nd_add_eager_fallback( 

418 ref, indices, updates, use_locking=use_locking, name=name, ctx=_ctx) 

419 except _core._SymbolicException: 

420 pass # Add nodes to the TensorFlow graph. 

421 # Add nodes to the TensorFlow graph. 

422 if use_locking is None: 

423 use_locking = True 

424 use_locking = _execute.make_bool(use_locking, "use_locking") 

425 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

426 "ResourceScatterNdAdd", ref=ref, indices=indices, updates=updates, 

427 use_locking=use_locking, name=name) 

428 return _op 

429ResourceScatterNdAdd = tf_export("raw_ops.ResourceScatterNdAdd")(_ops.to_raw_op(resource_scatter_nd_add)) 

430 

431 

432def resource_scatter_nd_add_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

433 if use_locking is None: 

434 use_locking = True 

435 use_locking = _execute.make_bool(use_locking, "use_locking") 

436 _attr_T, (updates,) = _execute.args_to_matching_eager([updates], ctx, []) 

437 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

438 ref = _ops.convert_to_tensor(ref, _dtypes.resource) 

439 _inputs_flat = [ref, indices, updates] 

440 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

441 use_locking) 

442 _result = _execute.execute(b"ResourceScatterNdAdd", 0, inputs=_inputs_flat, 

443 attrs=_attrs, ctx=ctx, name=name) 

444 _result = None 

445 return _result 

446 

447 

448def resource_scatter_nd_max(ref, indices, updates, use_locking=True, name=None): 

449 r"""TODO: add doc. 

450 

451 Args: 

452 ref: A `Tensor` of type `resource`. 

453 A resource handle. Must be from a VarHandleOp. 

454 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

455 A Tensor. Must be one of the following types: int32, int64. 

456 A tensor of indices into ref. 

457 updates: A `Tensor`. A Tensor. Must have the same type as ref. A tensor of 

458 values whose element wise max is taken with ref 

459 use_locking: An optional `bool`. Defaults to `True`. 

460 An optional bool. Defaults to True. If True, the assignment will 

461 be protected by a lock; otherwise the behavior is undefined, 

462 but may exhibit less contention. 

463 name: A name for the operation (optional). 

464 

465 Returns: 

466 The created Operation. 

467 """ 

468 _ctx = _context._context or _context.context() 

469 tld = _ctx._thread_local_data 

470 if tld.is_eager: 

471 try: 

472 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

473 _ctx, "ResourceScatterNdMax", name, ref, indices, updates, 

474 "use_locking", use_locking) 

475 return _result 

476 except _core._NotOkStatusException as e: 

477 _ops.raise_from_not_ok_status(e, name) 

478 except _core._FallbackException: 

479 pass 

480 try: 

481 return resource_scatter_nd_max_eager_fallback( 

482 ref, indices, updates, use_locking=use_locking, name=name, ctx=_ctx) 

483 except _core._SymbolicException: 

484 pass # Add nodes to the TensorFlow graph. 

485 # Add nodes to the TensorFlow graph. 

486 if use_locking is None: 

487 use_locking = True 

488 use_locking = _execute.make_bool(use_locking, "use_locking") 

489 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

490 "ResourceScatterNdMax", ref=ref, indices=indices, updates=updates, 

491 use_locking=use_locking, name=name) 

492 return _op 

493ResourceScatterNdMax = tf_export("raw_ops.ResourceScatterNdMax")(_ops.to_raw_op(resource_scatter_nd_max)) 

494 

495 

496def resource_scatter_nd_max_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

497 if use_locking is None: 

498 use_locking = True 

499 use_locking = _execute.make_bool(use_locking, "use_locking") 

500 _attr_T, (updates,) = _execute.args_to_matching_eager([updates], ctx, []) 

501 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

502 ref = _ops.convert_to_tensor(ref, _dtypes.resource) 

503 _inputs_flat = [ref, indices, updates] 

504 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

505 use_locking) 

506 _result = _execute.execute(b"ResourceScatterNdMax", 0, inputs=_inputs_flat, 

507 attrs=_attrs, ctx=ctx, name=name) 

508 _result = None 

509 return _result 

510 

511 

512def resource_scatter_nd_min(ref, indices, updates, use_locking=True, name=None): 

513 r"""TODO: add doc. 

514 

515 Args: 

516 ref: A `Tensor` of type `resource`. 

517 A resource handle. Must be from a VarHandleOp. 

518 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

519 A Tensor. Must be one of the following types: int32, int64. 

520 A tensor of indices into ref. 

521 updates: A `Tensor`. A Tensor. Must have the same type as ref. A tensor of 

522 values whose element wise min is taken with ref. 

523 use_locking: An optional `bool`. Defaults to `True`. 

524 An optional bool. Defaults to True. If True, the assignment will 

525 be protected by a lock; otherwise the behavior is undefined, 

526 but may exhibit less contention. 

527 name: A name for the operation (optional). 

528 

529 Returns: 

530 The created Operation. 

531 """ 

532 _ctx = _context._context or _context.context() 

533 tld = _ctx._thread_local_data 

534 if tld.is_eager: 

535 try: 

536 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

537 _ctx, "ResourceScatterNdMin", name, ref, indices, updates, 

538 "use_locking", use_locking) 

539 return _result 

540 except _core._NotOkStatusException as e: 

541 _ops.raise_from_not_ok_status(e, name) 

542 except _core._FallbackException: 

543 pass 

544 try: 

545 return resource_scatter_nd_min_eager_fallback( 

546 ref, indices, updates, use_locking=use_locking, name=name, ctx=_ctx) 

547 except _core._SymbolicException: 

548 pass # Add nodes to the TensorFlow graph. 

549 # Add nodes to the TensorFlow graph. 

550 if use_locking is None: 

551 use_locking = True 

552 use_locking = _execute.make_bool(use_locking, "use_locking") 

553 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

554 "ResourceScatterNdMin", ref=ref, indices=indices, updates=updates, 

555 use_locking=use_locking, name=name) 

556 return _op 

557ResourceScatterNdMin = tf_export("raw_ops.ResourceScatterNdMin")(_ops.to_raw_op(resource_scatter_nd_min)) 

558 

559 

560def resource_scatter_nd_min_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

561 if use_locking is None: 

562 use_locking = True 

563 use_locking = _execute.make_bool(use_locking, "use_locking") 

564 _attr_T, (updates,) = _execute.args_to_matching_eager([updates], ctx, []) 

565 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

566 ref = _ops.convert_to_tensor(ref, _dtypes.resource) 

567 _inputs_flat = [ref, indices, updates] 

568 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

569 use_locking) 

570 _result = _execute.execute(b"ResourceScatterNdMin", 0, inputs=_inputs_flat, 

571 attrs=_attrs, ctx=ctx, name=name) 

572 _result = None 

573 return _result 

574 

575 

576def resource_scatter_nd_sub(ref, indices, updates, use_locking=True, name=None): 

577 r"""Applies sparse subtraction to individual values or slices in a Variable. 

578 

579 `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. 

580 

581 `indices` must be integer tensor, containing indices into `ref`. 

582 It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. 

583 

584 The innermost dimension of `indices` (with length `K`) corresponds to 

585 indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th 

586 dimension of `ref`. 

587 

588 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 

589 

590 ``` 

591 [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] 

592 ``` 

593 

594 For example, say we want to subtract 4 scattered elements from a rank-1 tensor 

595 with 8 elements. In Python, that subtraction would look like this: 

596 

597 ```python 

598 ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) 

599 indices = tf.constant([[4], [3], [1], [7]]) 

600 updates = tf.constant([9, 10, 11, 12]) 

601 sub = tf.scatter_nd_sub(ref, indices, updates) 

602 with tf.Session() as sess: 

603 print sess.run(sub) 

604 ``` 

605 

606 The resulting update to ref would look like this: 

607 

608 [1, -9, 3, -6, -4, 6, 7, -4] 

609 

610 See `tf.scatter_nd` for more details about how to make updates to 

611 slices. 

612 

613 Args: 

614 ref: A `Tensor` of type `resource`. 

615 A resource handle. Must be from a VarHandleOp. 

616 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

617 A Tensor. Must be one of the following types: int32, int64. 

618 A tensor of indices into ref. 

619 updates: A `Tensor`. A Tensor. Must have the same type as ref. A tensor of 

620 values to add to ref. 

621 use_locking: An optional `bool`. Defaults to `True`. 

622 An optional bool. Defaults to True. If True, the assignment will 

623 be protected by a lock; otherwise the behavior is undefined, 

624 but may exhibit less contention. 

625 name: A name for the operation (optional). 

626 

627 Returns: 

628 The created Operation. 

629 """ 

630 _ctx = _context._context or _context.context() 

631 tld = _ctx._thread_local_data 

632 if tld.is_eager: 

633 try: 

634 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

635 _ctx, "ResourceScatterNdSub", name, ref, indices, updates, 

636 "use_locking", use_locking) 

637 return _result 

638 except _core._NotOkStatusException as e: 

639 _ops.raise_from_not_ok_status(e, name) 

640 except _core._FallbackException: 

641 pass 

642 try: 

643 return resource_scatter_nd_sub_eager_fallback( 

644 ref, indices, updates, use_locking=use_locking, name=name, ctx=_ctx) 

645 except _core._SymbolicException: 

646 pass # Add nodes to the TensorFlow graph. 

647 # Add nodes to the TensorFlow graph. 

648 if use_locking is None: 

649 use_locking = True 

650 use_locking = _execute.make_bool(use_locking, "use_locking") 

651 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

652 "ResourceScatterNdSub", ref=ref, indices=indices, updates=updates, 

653 use_locking=use_locking, name=name) 

654 return _op 

655ResourceScatterNdSub = tf_export("raw_ops.ResourceScatterNdSub")(_ops.to_raw_op(resource_scatter_nd_sub)) 

656 

657 

658def resource_scatter_nd_sub_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

659 if use_locking is None: 

660 use_locking = True 

661 use_locking = _execute.make_bool(use_locking, "use_locking") 

662 _attr_T, (updates,) = _execute.args_to_matching_eager([updates], ctx, []) 

663 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

664 ref = _ops.convert_to_tensor(ref, _dtypes.resource) 

665 _inputs_flat = [ref, indices, updates] 

666 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

667 use_locking) 

668 _result = _execute.execute(b"ResourceScatterNdSub", 0, inputs=_inputs_flat, 

669 attrs=_attrs, ctx=ctx, name=name) 

670 _result = None 

671 return _result 

672 

673 

674def resource_scatter_nd_update(ref, indices, updates, use_locking=True, name=None): 

675 r"""Applies sparse `updates` to individual values or slices within a given 

676 

677 variable according to `indices`. 

678 

679 `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. 

680 

681 `indices` must be integer tensor, containing indices into `ref`. 

682 It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. 

683 

684 The innermost dimension of `indices` (with length `K`) corresponds to 

685 indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th 

686 dimension of `ref`. 

687 

688 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 

689 

690 ``` 

691 [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. 

692 ``` 

693 

694 For example, say we want to update 4 scattered elements to a rank-1 tensor to 

695 8 elements. In Python, that update would look like this: 

696 

697 ```python 

698 ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) 

699 indices = tf.constant([[4], [3], [1] ,[7]]) 

700 updates = tf.constant([9, 10, 11, 12]) 

701 update = tf.scatter_nd_update(ref, indices, updates) 

702 with tf.Session() as sess: 

703 print sess.run(update) 

704 ``` 

705 

706 The resulting update to ref would look like this: 

707 

708 [1, 11, 3, 10, 9, 6, 7, 12] 

709 

710 See `tf.scatter_nd` for more details about how to make updates to 

711 slices. 

712 

713 Args: 

714 ref: A `Tensor` of type `resource`. 

715 A resource handle. Must be from a VarHandleOp. 

716 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

717 A Tensor. Must be one of the following types: int32, int64. 

718 A tensor of indices into ref. 

719 updates: A `Tensor`. 

720 A Tensor. Must have the same type as ref. A tensor of updated 

721 values to add to ref. 

722 use_locking: An optional `bool`. Defaults to `True`. 

723 An optional bool. Defaults to True. If True, the assignment will 

724 be protected by a lock; otherwise the behavior is undefined, 

725 but may exhibit less contention. 

726 name: A name for the operation (optional). 

727 

728 Returns: 

729 The created Operation. 

730 """ 

731 _ctx = _context._context or _context.context() 

732 tld = _ctx._thread_local_data 

733 if tld.is_eager: 

734 try: 

735 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

736 _ctx, "ResourceScatterNdUpdate", name, ref, indices, updates, 

737 "use_locking", use_locking) 

738 return _result 

739 except _core._NotOkStatusException as e: 

740 _ops.raise_from_not_ok_status(e, name) 

741 except _core._FallbackException: 

742 pass 

743 try: 

744 return resource_scatter_nd_update_eager_fallback( 

745 ref, indices, updates, use_locking=use_locking, name=name, ctx=_ctx) 

746 except _core._SymbolicException: 

747 pass # Add nodes to the TensorFlow graph. 

748 # Add nodes to the TensorFlow graph. 

749 if use_locking is None: 

750 use_locking = True 

751 use_locking = _execute.make_bool(use_locking, "use_locking") 

752 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

753 "ResourceScatterNdUpdate", ref=ref, indices=indices, updates=updates, 

754 use_locking=use_locking, name=name) 

755 return _op 

756ResourceScatterNdUpdate = tf_export("raw_ops.ResourceScatterNdUpdate")(_ops.to_raw_op(resource_scatter_nd_update)) 

757 

758 

759def resource_scatter_nd_update_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

760 if use_locking is None: 

761 use_locking = True 

762 use_locking = _execute.make_bool(use_locking, "use_locking") 

763 _attr_T, (updates,) = _execute.args_to_matching_eager([updates], ctx, []) 

764 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

765 ref = _ops.convert_to_tensor(ref, _dtypes.resource) 

766 _inputs_flat = [ref, indices, updates] 

767 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "use_locking", 

768 use_locking) 

769 _result = _execute.execute(b"ResourceScatterNdUpdate", 0, 

770 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

771 name=name) 

772 _result = None 

773 return _result 

774 

775 

776def scatter_add(ref, indices, updates, use_locking=False, name=None): 

777 r"""Adds sparse updates to a variable reference. 

778 

779 This operation computes 

780 

781 # Scalar indices 

782 ref[indices, ...] += updates[...] 

783 

784 # Vector indices (for each i) 

785 ref[indices[i], ...] += updates[i, ...] 

786 

787 # High rank indices (for each i, ..., j) 

788 ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] 

789 

790 This operation outputs `ref` after the update is done. 

791 This makes it easier to chain operations that need to use the reset value. 

792 

793 Duplicate entries are handled correctly: if multiple `indices` reference 

794 the same location, their contributions add. 

795 

796 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

797 

798 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

799 <img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt> 

800 </div> 

801 

802 Args: 

803 ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

804 Should be from a `Variable` node. 

805 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

806 A tensor of indices into the first dimension of `ref`. 

807 updates: A `Tensor`. Must have the same type as `ref`. 

808 A tensor of updated values to add to `ref`. 

809 use_locking: An optional `bool`. Defaults to `False`. 

810 If True, the addition will be protected by a lock; 

811 otherwise the behavior is undefined, but may exhibit less contention. 

812 name: A name for the operation (optional). 

813 

814 Returns: 

815 A mutable `Tensor`. Has the same type as `ref`. 

816 """ 

817 _ctx = _context._context or _context.context() 

818 tld = _ctx._thread_local_data 

819 if tld.is_eager: 

820 raise RuntimeError("scatter_add op does not support eager execution. Arg 'output_ref' is a ref.") 

821 # Add nodes to the TensorFlow graph. 

822 if use_locking is None: 

823 use_locking = False 

824 use_locking = _execute.make_bool(use_locking, "use_locking") 

825 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

826 "ScatterAdd", ref=ref, indices=indices, updates=updates, 

827 use_locking=use_locking, name=name) 

828 _result = _outputs[:] 

829 if _execute.must_record_gradient(): 

830 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

831 _op._get_attr_type("Tindices"), "use_locking", 

832 _op._get_attr_bool("use_locking")) 

833 _inputs_flat = _op.inputs 

834 _execute.record_gradient( 

835 "ScatterAdd", _inputs_flat, _attrs, _result) 

836 _result, = _result 

837 return _result 

838 

839ScatterAdd = tf_export("raw_ops.ScatterAdd")(_ops.to_raw_op(scatter_add)) 

840 

841 

842def scatter_add_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

843 raise RuntimeError("scatter_add op does not support eager execution. Arg 'output_ref' is a ref.") 

844 

845def scatter_div(ref, indices, updates, use_locking=False, name=None): 

846 r"""Divides a variable reference by sparse updates. 

847 

848 This operation computes 

849 

850 ```python 

851 # Scalar indices 

852 ref[indices, ...] /= updates[...] 

853 

854 # Vector indices (for each i) 

855 ref[indices[i], ...] /= updates[i, ...] 

856 

857 # High rank indices (for each i, ..., j) 

858 ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] 

859 ``` 

860 

861 This operation outputs `ref` after the update is done. 

862 This makes it easier to chain operations that need to use the reset value. 

863 

864 Duplicate entries are handled correctly: if multiple `indices` reference 

865 the same location, their contributions divide. 

866 

867 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

868 

869 Args: 

870 ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

871 Should be from a `Variable` node. 

872 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

873 A tensor of indices into the first dimension of `ref`. 

874 updates: A `Tensor`. Must have the same type as `ref`. 

875 A tensor of values that `ref` is divided by. 

876 use_locking: An optional `bool`. Defaults to `False`. 

877 If True, the operation will be protected by a lock; 

878 otherwise the behavior is undefined, but may exhibit less contention. 

879 name: A name for the operation (optional). 

880 

881 Returns: 

882 A mutable `Tensor`. Has the same type as `ref`. 

883 """ 

884 _ctx = _context._context or _context.context() 

885 tld = _ctx._thread_local_data 

886 if tld.is_eager: 

887 raise RuntimeError("scatter_div op does not support eager execution. Arg 'output_ref' is a ref.") 

888 # Add nodes to the TensorFlow graph. 

889 if use_locking is None: 

890 use_locking = False 

891 use_locking = _execute.make_bool(use_locking, "use_locking") 

892 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

893 "ScatterDiv", ref=ref, indices=indices, updates=updates, 

894 use_locking=use_locking, name=name) 

895 _result = _outputs[:] 

896 if _execute.must_record_gradient(): 

897 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

898 _op._get_attr_type("Tindices"), "use_locking", 

899 _op._get_attr_bool("use_locking")) 

900 _inputs_flat = _op.inputs 

901 _execute.record_gradient( 

902 "ScatterDiv", _inputs_flat, _attrs, _result) 

903 _result, = _result 

904 return _result 

905 

906ScatterDiv = tf_export("raw_ops.ScatterDiv")(_ops.to_raw_op(scatter_div)) 

907 

908 

909def scatter_div_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

910 raise RuntimeError("scatter_div op does not support eager execution. Arg 'output_ref' is a ref.") 

911 

912def scatter_max(ref, indices, updates, use_locking=False, name=None): 

913 r"""Reduces sparse updates into a variable reference using the `max` operation. 

914 

915 This operation computes 

916 

917 # Scalar indices 

918 ref[indices, ...] = max(ref[indices, ...], updates[...]) 

919 

920 # Vector indices (for each i) 

921 ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) 

922 

923 # High rank indices (for each i, ..., j) 

924 ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) 

925 

926 This operation outputs `ref` after the update is done. 

927 This makes it easier to chain operations that need to use the reset value. 

928 

929 Duplicate entries are handled correctly: if multiple `indices` reference 

930 the same location, their contributions combine. 

931 

932 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

933 

934 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

935 <img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt> 

936 </div> 

937 

938 Args: 

939 ref: A mutable `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`. 

940 Should be from a `Variable` node. 

941 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

942 A tensor of indices into the first dimension of `ref`. 

943 updates: A `Tensor`. Must have the same type as `ref`. 

944 A tensor of updated values to reduce into `ref`. 

945 use_locking: An optional `bool`. Defaults to `False`. 

946 If True, the update will be protected by a lock; 

947 otherwise the behavior is undefined, but may exhibit less contention. 

948 name: A name for the operation (optional). 

949 

950 Returns: 

951 A mutable `Tensor`. Has the same type as `ref`. 

952 """ 

953 _ctx = _context._context or _context.context() 

954 tld = _ctx._thread_local_data 

955 if tld.is_eager: 

956 raise RuntimeError("scatter_max op does not support eager execution. Arg 'output_ref' is a ref.") 

957 # Add nodes to the TensorFlow graph. 

958 if use_locking is None: 

959 use_locking = False 

960 use_locking = _execute.make_bool(use_locking, "use_locking") 

961 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

962 "ScatterMax", ref=ref, indices=indices, updates=updates, 

963 use_locking=use_locking, name=name) 

964 _result = _outputs[:] 

965 if _execute.must_record_gradient(): 

966 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

967 _op._get_attr_type("Tindices"), "use_locking", 

968 _op._get_attr_bool("use_locking")) 

969 _inputs_flat = _op.inputs 

970 _execute.record_gradient( 

971 "ScatterMax", _inputs_flat, _attrs, _result) 

972 _result, = _result 

973 return _result 

974 

975ScatterMax = tf_export("raw_ops.ScatterMax")(_ops.to_raw_op(scatter_max)) 

976 

977 

978def scatter_max_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

979 raise RuntimeError("scatter_max op does not support eager execution. Arg 'output_ref' is a ref.") 

980 

981def scatter_min(ref, indices, updates, use_locking=False, name=None): 

982 r"""Reduces sparse updates into a variable reference using the `min` operation. 

983 

984 This operation computes 

985 

986 # Scalar indices 

987 ref[indices, ...] = min(ref[indices, ...], updates[...]) 

988 

989 # Vector indices (for each i) 

990 ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) 

991 

992 # High rank indices (for each i, ..., j) 

993 ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) 

994 

995 This operation outputs `ref` after the update is done. 

996 This makes it easier to chain operations that need to use the reset value. 

997 

998 Duplicate entries are handled correctly: if multiple `indices` reference 

999 the same location, their contributions combine. 

1000 

1001 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

1002 

1003 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

1004 <img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt> 

1005 </div> 

1006 

1007 Args: 

1008 ref: A mutable `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`. 

1009 Should be from a `Variable` node. 

1010 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

1011 A tensor of indices into the first dimension of `ref`. 

1012 updates: A `Tensor`. Must have the same type as `ref`. 

1013 A tensor of updated values to reduce into `ref`. 

1014 use_locking: An optional `bool`. Defaults to `False`. 

1015 If True, the update will be protected by a lock; 

1016 otherwise the behavior is undefined, but may exhibit less contention. 

1017 name: A name for the operation (optional). 

1018 

1019 Returns: 

1020 A mutable `Tensor`. Has the same type as `ref`. 

1021 """ 

1022 _ctx = _context._context or _context.context() 

1023 tld = _ctx._thread_local_data 

1024 if tld.is_eager: 

1025 raise RuntimeError("scatter_min op does not support eager execution. Arg 'output_ref' is a ref.") 

1026 # Add nodes to the TensorFlow graph. 

1027 if use_locking is None: 

1028 use_locking = False 

1029 use_locking = _execute.make_bool(use_locking, "use_locking") 

1030 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1031 "ScatterMin", ref=ref, indices=indices, updates=updates, 

1032 use_locking=use_locking, name=name) 

1033 _result = _outputs[:] 

1034 if _execute.must_record_gradient(): 

1035 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

1036 _op._get_attr_type("Tindices"), "use_locking", 

1037 _op._get_attr_bool("use_locking")) 

1038 _inputs_flat = _op.inputs 

1039 _execute.record_gradient( 

1040 "ScatterMin", _inputs_flat, _attrs, _result) 

1041 _result, = _result 

1042 return _result 

1043 

1044ScatterMin = tf_export("raw_ops.ScatterMin")(_ops.to_raw_op(scatter_min)) 

1045 

1046 

1047def scatter_min_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

1048 raise RuntimeError("scatter_min op does not support eager execution. Arg 'output_ref' is a ref.") 

1049 

1050def scatter_mul(ref, indices, updates, use_locking=False, name=None): 

1051 r"""Multiplies sparse updates into a variable reference. 

1052 

1053 This operation computes 

1054 

1055 ```python 

1056 # Scalar indices 

1057 ref[indices, ...] *= updates[...] 

1058 

1059 # Vector indices (for each i) 

1060 ref[indices[i], ...] *= updates[i, ...] 

1061 

1062 # High rank indices (for each i, ..., j) 

1063 ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] 

1064 ``` 

1065 

1066 This operation outputs `ref` after the update is done. 

1067 This makes it easier to chain operations that need to use the reset value. 

1068 

1069 Duplicate entries are handled correctly: if multiple `indices` reference 

1070 the same location, their contributions multiply. 

1071 

1072 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

1073 

1074 Args: 

1075 ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1076 Should be from a `Variable` node. 

1077 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

1078 A tensor of indices into the first dimension of `ref`. 

1079 updates: A `Tensor`. Must have the same type as `ref`. 

1080 A tensor of updated values to multiply to `ref`. 

1081 use_locking: An optional `bool`. Defaults to `False`. 

1082 If True, the operation will be protected by a lock; 

1083 otherwise the behavior is undefined, but may exhibit less contention. 

1084 name: A name for the operation (optional). 

1085 

1086 Returns: 

1087 A mutable `Tensor`. Has the same type as `ref`. 

1088 """ 

1089 _ctx = _context._context or _context.context() 

1090 tld = _ctx._thread_local_data 

1091 if tld.is_eager: 

1092 raise RuntimeError("scatter_mul op does not support eager execution. Arg 'output_ref' is a ref.") 

1093 # Add nodes to the TensorFlow graph. 

1094 if use_locking is None: 

1095 use_locking = False 

1096 use_locking = _execute.make_bool(use_locking, "use_locking") 

1097 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1098 "ScatterMul", ref=ref, indices=indices, updates=updates, 

1099 use_locking=use_locking, name=name) 

1100 _result = _outputs[:] 

1101 if _execute.must_record_gradient(): 

1102 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

1103 _op._get_attr_type("Tindices"), "use_locking", 

1104 _op._get_attr_bool("use_locking")) 

1105 _inputs_flat = _op.inputs 

1106 _execute.record_gradient( 

1107 "ScatterMul", _inputs_flat, _attrs, _result) 

1108 _result, = _result 

1109 return _result 

1110 

1111ScatterMul = tf_export("raw_ops.ScatterMul")(_ops.to_raw_op(scatter_mul)) 

1112 

1113 

1114def scatter_mul_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

1115 raise RuntimeError("scatter_mul op does not support eager execution. Arg 'output_ref' is a ref.") 

1116 

1117def scatter_nd_add(ref, indices, updates, use_locking=False, name=None): 

1118 r"""Applies sparse addition to individual values or slices in a Variable. 

1119 

1120 `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. 

1121 

1122 `indices` must be integer tensor, containing indices into `ref`. 

1123 It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. 

1124 

1125 The innermost dimension of `indices` (with length `K`) corresponds to 

1126 indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th 

1127 dimension of `ref`. 

1128 

1129 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 

1130 

1131 ``` 

1132 [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] 

1133 ``` 

1134 

1135 For example, say we want to add 4 scattered elements to a rank-1 tensor to 

1136 8 elements. In Python, that addition would look like this: 

1137 

1138 ```python 

1139 ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) 

1140 indices = tf.constant([[4], [3], [1], [7]]) 

1141 updates = tf.constant([9, 10, 11, 12]) 

1142 add = tf.scatter_nd_add(ref, indices, updates) 

1143 with tf.Session() as sess: 

1144 print sess.run(add) 

1145 ``` 

1146 

1147 The resulting update to ref would look like this: 

1148 

1149 [1, 13, 3, 14, 14, 6, 7, 20] 

1150 

1151 See `tf.scatter_nd` for more details about how to make updates to 

1152 slices. 

1153 

1154 Args: 

1155 ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1156 A mutable Tensor. Should be from a Variable node. 

1157 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

1158 A Tensor. Must be one of the following types: int32, int64. 

1159 A tensor of indices into ref. 

1160 updates: A `Tensor`. Must have the same type as `ref`. 

1161 A Tensor. Must have the same type as ref. A tensor of updated values 

1162 to add to ref. 

1163 use_locking: An optional `bool`. Defaults to `False`. 

1164 An optional bool. Defaults to True. If True, the assignment will 

1165 be protected by a lock; otherwise the behavior is undefined, 

1166 but may exhibit less contention. 

1167 name: A name for the operation (optional). 

1168 

1169 Returns: 

1170 A mutable `Tensor`. Has the same type as `ref`. 

1171 """ 

1172 _ctx = _context._context or _context.context() 

1173 tld = _ctx._thread_local_data 

1174 if tld.is_eager: 

1175 raise RuntimeError("scatter_nd_add op does not support eager execution. Arg 'output_ref' is a ref.") 

1176 # Add nodes to the TensorFlow graph. 

1177 if use_locking is None: 

1178 use_locking = False 

1179 use_locking = _execute.make_bool(use_locking, "use_locking") 

1180 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1181 "ScatterNdAdd", ref=ref, indices=indices, updates=updates, 

1182 use_locking=use_locking, name=name) 

1183 _result = _outputs[:] 

1184 if _execute.must_record_gradient(): 

1185 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

1186 _op._get_attr_type("Tindices"), "use_locking", 

1187 _op._get_attr_bool("use_locking")) 

1188 _inputs_flat = _op.inputs 

1189 _execute.record_gradient( 

1190 "ScatterNdAdd", _inputs_flat, _attrs, _result) 

1191 _result, = _result 

1192 return _result 

1193 

1194ScatterNdAdd = tf_export("raw_ops.ScatterNdAdd")(_ops.to_raw_op(scatter_nd_add)) 

1195 

1196 

1197def scatter_nd_add_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

1198 raise RuntimeError("scatter_nd_add op does not support eager execution. Arg 'output_ref' is a ref.") 

1199 

1200def scatter_nd_max(ref, indices, updates, use_locking=False, name=None): 

1201 r"""Computes element-wise maximum. 

1202 

1203 Args: 

1204 ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1205 A mutable Tensor. Should be from a Variable node. 

1206 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

1207 A Tensor. Must be one of the following types: int32, int64. 

1208 A tensor of indices into ref. 

1209 updates: A `Tensor`. Must have the same type as `ref`. 

1210 A Tensor. Must have the same type as ref. A tensor of updated values 

1211 to add to ref. 

1212 use_locking: An optional `bool`. Defaults to `False`. 

1213 An optional bool. Defaults to True. If True, the assignment will 

1214 be protected by a lock; otherwise the behavior is undefined, 

1215 but may exhibit less contention. 

1216 name: A name for the operation (optional). 

1217 

1218 Returns: 

1219 A mutable `Tensor`. Has the same type as `ref`. 

1220 """ 

1221 _ctx = _context._context or _context.context() 

1222 tld = _ctx._thread_local_data 

1223 if tld.is_eager: 

1224 raise RuntimeError("scatter_nd_max op does not support eager execution. Arg 'output_ref' is a ref.") 

1225 # Add nodes to the TensorFlow graph. 

1226 if use_locking is None: 

1227 use_locking = False 

1228 use_locking = _execute.make_bool(use_locking, "use_locking") 

1229 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1230 "ScatterNdMax", ref=ref, indices=indices, updates=updates, 

1231 use_locking=use_locking, name=name) 

1232 _result = _outputs[:] 

1233 if _execute.must_record_gradient(): 

1234 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

1235 _op._get_attr_type("Tindices"), "use_locking", 

1236 _op._get_attr_bool("use_locking")) 

1237 _inputs_flat = _op.inputs 

1238 _execute.record_gradient( 

1239 "ScatterNdMax", _inputs_flat, _attrs, _result) 

1240 _result, = _result 

1241 return _result 

1242 

1243ScatterNdMax = tf_export("raw_ops.ScatterNdMax")(_ops.to_raw_op(scatter_nd_max)) 

1244 

1245 

1246def scatter_nd_max_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

1247 raise RuntimeError("scatter_nd_max op does not support eager execution. Arg 'output_ref' is a ref.") 

1248 

1249def scatter_nd_min(ref, indices, updates, use_locking=False, name=None): 

1250 r"""Computes element-wise minimum. 

1251 

1252 Args: 

1253 ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1254 A mutable Tensor. Should be from a Variable node. 

1255 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

1256 A Tensor. Must be one of the following types: int32, int64. 

1257 A tensor of indices into ref. 

1258 updates: A `Tensor`. Must have the same type as `ref`. 

1259 A Tensor. Must have the same type as ref. A tensor of updated values 

1260 to add to ref. 

1261 use_locking: An optional `bool`. Defaults to `False`. 

1262 An optional bool. Defaults to True. If True, the assignment will 

1263 be protected by a lock; otherwise the behavior is undefined, 

1264 but may exhibit less contention. 

1265 name: A name for the operation (optional). 

1266 

1267 Returns: 

1268 A mutable `Tensor`. Has the same type as `ref`. 

1269 """ 

1270 _ctx = _context._context or _context.context() 

1271 tld = _ctx._thread_local_data 

1272 if tld.is_eager: 

1273 raise RuntimeError("scatter_nd_min op does not support eager execution. Arg 'output_ref' is a ref.") 

1274 # Add nodes to the TensorFlow graph. 

1275 if use_locking is None: 

1276 use_locking = False 

1277 use_locking = _execute.make_bool(use_locking, "use_locking") 

1278 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1279 "ScatterNdMin", ref=ref, indices=indices, updates=updates, 

1280 use_locking=use_locking, name=name) 

1281 _result = _outputs[:] 

1282 if _execute.must_record_gradient(): 

1283 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

1284 _op._get_attr_type("Tindices"), "use_locking", 

1285 _op._get_attr_bool("use_locking")) 

1286 _inputs_flat = _op.inputs 

1287 _execute.record_gradient( 

1288 "ScatterNdMin", _inputs_flat, _attrs, _result) 

1289 _result, = _result 

1290 return _result 

1291 

1292ScatterNdMin = tf_export("raw_ops.ScatterNdMin")(_ops.to_raw_op(scatter_nd_min)) 

1293 

1294 

1295def scatter_nd_min_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

1296 raise RuntimeError("scatter_nd_min op does not support eager execution. Arg 'output_ref' is a ref.") 

1297 

1298def scatter_nd_sub(ref, indices, updates, use_locking=False, name=None): 

1299 r"""Applies sparse subtraction to individual values or slices in a Variable. 

1300 

1301 within a given variable according to `indices`. 

1302 

1303 `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. 

1304 

1305 `indices` must be integer tensor, containing indices into `ref`. 

1306 It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. 

1307 

1308 The innermost dimension of `indices` (with length `K`) corresponds to 

1309 indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th 

1310 dimension of `ref`. 

1311 

1312 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 

1313 

1314 ``` 

1315 [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] 

1316 ``` 

1317 

1318 For example, say we want to subtract 4 scattered elements from a rank-1 tensor 

1319 with 8 elements. In Python, that subtraction would look like this: 

1320 

1321 ```python 

1322 ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) 

1323 indices = tf.constant([[4], [3], [1], [7]]) 

1324 updates = tf.constant([9, 10, 11, 12]) 

1325 sub = tf.scatter_nd_sub(ref, indices, updates) 

1326 with tf.Session() as sess: 

1327 print sess.run(sub) 

1328 ``` 

1329 

1330 The resulting update to ref would look like this: 

1331 

1332 [1, -9, 3, -6, -4, 6, 7, -4] 

1333 

1334 See `tf.scatter_nd` for more details about how to make updates to 

1335 slices. 

1336 

1337 Args: 

1338 ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1339 A mutable Tensor. Should be from a Variable node. 

1340 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

1341 A Tensor. Must be one of the following types: int32, int64. 

1342 A tensor of indices into ref. 

1343 updates: A `Tensor`. Must have the same type as `ref`. 

1344 A Tensor. Must have the same type as ref. A tensor of updated values 

1345 to subtract from ref. 

1346 use_locking: An optional `bool`. Defaults to `False`. 

1347 An optional bool. Defaults to True. If True, the assignment will 

1348 be protected by a lock; otherwise the behavior is undefined, 

1349 but may exhibit less contention. 

1350 name: A name for the operation (optional). 

1351 

1352 Returns: 

1353 A mutable `Tensor`. Has the same type as `ref`. 

1354 """ 

1355 _ctx = _context._context or _context.context() 

1356 tld = _ctx._thread_local_data 

1357 if tld.is_eager: 

1358 raise RuntimeError("scatter_nd_sub op does not support eager execution. Arg 'output_ref' is a ref.") 

1359 # Add nodes to the TensorFlow graph. 

1360 if use_locking is None: 

1361 use_locking = False 

1362 use_locking = _execute.make_bool(use_locking, "use_locking") 

1363 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1364 "ScatterNdSub", ref=ref, indices=indices, updates=updates, 

1365 use_locking=use_locking, name=name) 

1366 _result = _outputs[:] 

1367 if _execute.must_record_gradient(): 

1368 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

1369 _op._get_attr_type("Tindices"), "use_locking", 

1370 _op._get_attr_bool("use_locking")) 

1371 _inputs_flat = _op.inputs 

1372 _execute.record_gradient( 

1373 "ScatterNdSub", _inputs_flat, _attrs, _result) 

1374 _result, = _result 

1375 return _result 

1376 

1377ScatterNdSub = tf_export("raw_ops.ScatterNdSub")(_ops.to_raw_op(scatter_nd_sub)) 

1378 

1379 

1380def scatter_nd_sub_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

1381 raise RuntimeError("scatter_nd_sub op does not support eager execution. Arg 'output_ref' is a ref.") 

1382 

1383def scatter_nd_update(ref, indices, updates, use_locking=True, name=None): 

1384 r"""Applies sparse `updates` to individual values or slices within a given 

1385 

1386 variable according to `indices`. 

1387 

1388 `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. 

1389 

1390 `indices` must be integer tensor, containing indices into `ref`. 

1391 It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. 

1392 

1393 The innermost dimension of `indices` (with length `K`) corresponds to 

1394 indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th 

1395 dimension of `ref`. 

1396 

1397 `updates` is `Tensor` of rank `Q-1+P-K` with shape: 

1398 

1399 $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ 

1400 

1401 For example, say we want to update 4 scattered elements to a rank-1 tensor to 

1402 8 elements. In Python, that update would look like this: 

1403 

1404 ```python 

1405 ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) 

1406 indices = tf.constant([[4], [3], [1] ,[7]]) 

1407 updates = tf.constant([9, 10, 11, 12]) 

1408 update = tf.scatter_nd_update(ref, indices, updates) 

1409 with tf.Session() as sess: 

1410 print sess.run(update) 

1411 ``` 

1412 

1413 The resulting update to ref would look like this: 

1414 

1415 [1, 11, 3, 10, 9, 6, 7, 12] 

1416 

1417 See `tf.scatter_nd` for more details about how to make updates to 

1418 slices. 

1419 

1420 See also `tf.scatter_update` and `tf.batch_scatter_update`. 

1421 

1422 Args: 

1423 ref: A mutable `Tensor`. A mutable Tensor. Should be from a Variable node. 

1424 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

1425 A Tensor. Must be one of the following types: int32, int64. 

1426 A tensor of indices into ref. 

1427 updates: A `Tensor`. Must have the same type as `ref`. 

1428 A Tensor. Must have the same type as ref. A tensor of updated 

1429 values to add to ref. 

1430 use_locking: An optional `bool`. Defaults to `True`. 

1431 An optional bool. Defaults to True. If True, the assignment will 

1432 be protected by a lock; otherwise the behavior is undefined, 

1433 but may exhibit less contention. 

1434 name: A name for the operation (optional). 

1435 

1436 Returns: 

1437 A mutable `Tensor`. Has the same type as `ref`. 

1438 """ 

1439 _ctx = _context._context or _context.context() 

1440 tld = _ctx._thread_local_data 

1441 if tld.is_eager: 

1442 raise RuntimeError("scatter_nd_update op does not support eager execution. Arg 'output_ref' is a ref.") 

1443 # Add nodes to the TensorFlow graph. 

1444 if use_locking is None: 

1445 use_locking = True 

1446 use_locking = _execute.make_bool(use_locking, "use_locking") 

1447 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1448 "ScatterNdUpdate", ref=ref, indices=indices, updates=updates, 

1449 use_locking=use_locking, name=name) 

1450 _result = _outputs[:] 

1451 if _execute.must_record_gradient(): 

1452 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

1453 _op._get_attr_type("Tindices"), "use_locking", 

1454 _op._get_attr_bool("use_locking")) 

1455 _inputs_flat = _op.inputs 

1456 _execute.record_gradient( 

1457 "ScatterNdUpdate", _inputs_flat, _attrs, _result) 

1458 _result, = _result 

1459 return _result 

1460 

1461ScatterNdUpdate = tf_export("raw_ops.ScatterNdUpdate")(_ops.to_raw_op(scatter_nd_update)) 

1462 

1463 

1464def scatter_nd_update_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

1465 raise RuntimeError("scatter_nd_update op does not support eager execution. Arg 'output_ref' is a ref.") 

1466 

1467def scatter_sub(ref, indices, updates, use_locking=False, name=None): 

1468 r"""Subtracts sparse updates to a variable reference. 

1469 

1470 ```python 

1471 # Scalar indices 

1472 ref[indices, ...] -= updates[...] 

1473 

1474 # Vector indices (for each i) 

1475 ref[indices[i], ...] -= updates[i, ...] 

1476 

1477 # High rank indices (for each i, ..., j) 

1478 ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] 

1479 ``` 

1480 

1481 This operation outputs `ref` after the update is done. 

1482 This makes it easier to chain operations that need to use the reset value. 

1483 

1484 Duplicate entries are handled correctly: if multiple `indices` reference 

1485 the same location, their (negated) contributions add. 

1486 

1487 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

1488 

1489 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

1490 <img style="width:100%" src="https://www.tensorflow.org/images/ScatterSub.png" alt> 

1491 </div> 

1492 

1493 Args: 

1494 ref: A mutable `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1495 Should be from a `Variable` node. 

1496 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

1497 A tensor of indices into the first dimension of `ref`. 

1498 updates: A `Tensor`. Must have the same type as `ref`. 

1499 A tensor of updated values to subtract from `ref`. 

1500 use_locking: An optional `bool`. Defaults to `False`. 

1501 If True, the subtraction will be protected by a lock; 

1502 otherwise the behavior is undefined, but may exhibit less contention. 

1503 name: A name for the operation (optional). 

1504 

1505 Returns: 

1506 A mutable `Tensor`. Has the same type as `ref`. 

1507 """ 

1508 _ctx = _context._context or _context.context() 

1509 tld = _ctx._thread_local_data 

1510 if tld.is_eager: 

1511 raise RuntimeError("scatter_sub op does not support eager execution. Arg 'output_ref' is a ref.") 

1512 # Add nodes to the TensorFlow graph. 

1513 if use_locking is None: 

1514 use_locking = False 

1515 use_locking = _execute.make_bool(use_locking, "use_locking") 

1516 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1517 "ScatterSub", ref=ref, indices=indices, updates=updates, 

1518 use_locking=use_locking, name=name) 

1519 _result = _outputs[:] 

1520 if _execute.must_record_gradient(): 

1521 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

1522 _op._get_attr_type("Tindices"), "use_locking", 

1523 _op._get_attr_bool("use_locking")) 

1524 _inputs_flat = _op.inputs 

1525 _execute.record_gradient( 

1526 "ScatterSub", _inputs_flat, _attrs, _result) 

1527 _result, = _result 

1528 return _result 

1529 

1530ScatterSub = tf_export("raw_ops.ScatterSub")(_ops.to_raw_op(scatter_sub)) 

1531 

1532 

1533def scatter_sub_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

1534 raise RuntimeError("scatter_sub op does not support eager execution. Arg 'output_ref' is a ref.") 

1535 

1536def scatter_update(ref, indices, updates, use_locking=True, name=None): 

1537 r"""Applies sparse updates to a variable reference. 

1538 

1539 This operation computes 

1540 

1541 ```python 

1542 # Scalar indices 

1543 ref[indices, ...] = updates[...] 

1544 

1545 # Vector indices (for each i) 

1546 ref[indices[i], ...] = updates[i, ...] 

1547 

1548 # High rank indices (for each i, ..., j) 

1549 ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] 

1550 ``` 

1551 

1552 This operation outputs `ref` after the update is done. 

1553 This makes it easier to chain operations that need to use the reset value. 

1554 

1555 If values in `ref` is to be updated more than once, because there are 

1556 duplicate entries in `indices`, the order at which the updates happen 

1557 for each value is undefined. 

1558 

1559 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

1560 

1561 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

1562 <img style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt> 

1563 </div> 

1564 

1565 See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. 

1566 

1567 Args: 

1568 ref: A mutable `Tensor`. Should be from a `Variable` node. 

1569 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

1570 A tensor of indices into the first dimension of `ref`. 

1571 updates: A `Tensor`. Must have the same type as `ref`. 

1572 A tensor of updated values to store in `ref`. 

1573 use_locking: An optional `bool`. Defaults to `True`. 

1574 If True, the assignment will be protected by a lock; 

1575 otherwise the behavior is undefined, but may exhibit less contention. 

1576 name: A name for the operation (optional). 

1577 

1578 Returns: 

1579 A mutable `Tensor`. Has the same type as `ref`. 

1580 """ 

1581 _ctx = _context._context or _context.context() 

1582 tld = _ctx._thread_local_data 

1583 if tld.is_eager: 

1584 raise RuntimeError("scatter_update op does not support eager execution. Arg 'output_ref' is a ref.") 

1585 # Add nodes to the TensorFlow graph. 

1586 if use_locking is None: 

1587 use_locking = True 

1588 use_locking = _execute.make_bool(use_locking, "use_locking") 

1589 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1590 "ScatterUpdate", ref=ref, indices=indices, updates=updates, 

1591 use_locking=use_locking, name=name) 

1592 _result = _outputs[:] 

1593 if _execute.must_record_gradient(): 

1594 _attrs = ("T", _op._get_attr_type("T"), "Tindices", 

1595 _op._get_attr_type("Tindices"), "use_locking", 

1596 _op._get_attr_bool("use_locking")) 

1597 _inputs_flat = _op.inputs 

1598 _execute.record_gradient( 

1599 "ScatterUpdate", _inputs_flat, _attrs, _result) 

1600 _result, = _result 

1601 return _result 

1602 

1603ScatterUpdate = tf_export("raw_ops.ScatterUpdate")(_ops.to_raw_op(scatter_update)) 

1604 

1605 

1606def scatter_update_eager_fallback(ref, indices, updates, use_locking, name, ctx): 

1607 raise RuntimeError("scatter_update op does not support eager execution. Arg 'output_ref' is a ref.") 

1608 

1609def temporary_variable(shape, dtype, var_name="", name=None): 

1610 r"""Returns a tensor that may be mutated, but only persists within a single step. 

1611 

1612 This is an experimental op for internal use only and it is possible to use this 

1613 op in unsafe ways. DO NOT USE unless you fully understand the risks. 

1614 

1615 It is the caller's responsibility to ensure that 'ref' is eventually passed to a 

1616 matching 'DestroyTemporaryVariable' op after all other uses have completed. 

1617 

1618 Outputs a ref to the tensor state so it may be read or modified. 

1619 

1620 E.g. 

1621 var = state_ops._temporary_variable([1, 2], types.float_) 

1622 var_name = var.op.name 

1623 var = state_ops.assign(var, [[4.0, 5.0]]) 

1624 var = state_ops.assign_add(var, [[6.0, 7.0]]) 

1625 final = state_ops._destroy_temporary_variable(var, var_name=var_name) 

1626 

1627 Args: 

1628 shape: A `tf.TensorShape` or list of `ints`. 

1629 The shape of the variable tensor. 

1630 dtype: A `tf.DType`. The type of elements in the variable tensor. 

1631 var_name: An optional `string`. Defaults to `""`. 

1632 Overrides the name used for the temporary variable resource. Default 

1633 value is the name of the 'TemporaryVariable' op (which is guaranteed unique). 

1634 name: A name for the operation (optional). 

1635 

1636 Returns: 

1637 A mutable `Tensor` of type `dtype`. 

1638 """ 

1639 _ctx = _context._context or _context.context() 

1640 tld = _ctx._thread_local_data 

1641 if tld.is_eager: 

1642 raise RuntimeError("temporary_variable op does not support eager execution. Arg 'ref' is a ref.") 

1643 # Add nodes to the TensorFlow graph. 

1644 shape = _execute.make_shape(shape, "shape") 

1645 dtype = _execute.make_type(dtype, "dtype") 

1646 if var_name is None: 

1647 var_name = "" 

1648 var_name = _execute.make_str(var_name, "var_name") 

1649 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1650 "TemporaryVariable", shape=shape, dtype=dtype, var_name=var_name, 

1651 name=name) 

1652 _result = _outputs[:] 

1653 if _execute.must_record_gradient(): 

1654 _attrs = ("shape", _op.get_attr("shape"), "dtype", 

1655 _op._get_attr_type("dtype"), "var_name", 

1656 _op.get_attr("var_name")) 

1657 _inputs_flat = _op.inputs 

1658 _execute.record_gradient( 

1659 "TemporaryVariable", _inputs_flat, _attrs, _result) 

1660 _result, = _result 

1661 return _result 

1662 

1663TemporaryVariable = tf_export("raw_ops.TemporaryVariable")(_ops.to_raw_op(temporary_variable)) 

1664 

1665 

1666def temporary_variable_eager_fallback(shape, dtype, var_name, name, ctx): 

1667 raise RuntimeError("temporary_variable op does not support eager execution. Arg 'ref' is a ref.") 

1668 

1669def variable(shape, dtype, container="", shared_name="", name=None): 

1670 r"""Use VariableV2 instead. 

1671 

1672 Args: 

1673 shape: A `tf.TensorShape` or list of `ints`. 

1674 dtype: A `tf.DType`. 

1675 container: An optional `string`. Defaults to `""`. 

1676 shared_name: An optional `string`. Defaults to `""`. 

1677 name: A name for the operation (optional). 

1678 

1679 Returns: 

1680 A mutable `Tensor` of type `dtype`. 

1681 """ 

1682 _ctx = _context._context or _context.context() 

1683 tld = _ctx._thread_local_data 

1684 if tld.is_eager: 

1685 raise RuntimeError("variable op does not support eager execution. Arg 'ref' is a ref.") 

1686 # Add nodes to the TensorFlow graph. 

1687 shape = _execute.make_shape(shape, "shape") 

1688 dtype = _execute.make_type(dtype, "dtype") 

1689 if container is None: 

1690 container = "" 

1691 container = _execute.make_str(container, "container") 

1692 if shared_name is None: 

1693 shared_name = "" 

1694 shared_name = _execute.make_str(shared_name, "shared_name") 

1695 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1696 "Variable", shape=shape, dtype=dtype, container=container, 

1697 shared_name=shared_name, name=name) 

1698 _result = _outputs[:] 

1699 if _execute.must_record_gradient(): 

1700 _attrs = ("shape", _op.get_attr("shape"), "dtype", 

1701 _op._get_attr_type("dtype"), "container", 

1702 _op.get_attr("container"), "shared_name", 

1703 _op.get_attr("shared_name")) 

1704 _inputs_flat = _op.inputs 

1705 _execute.record_gradient( 

1706 "Variable", _inputs_flat, _attrs, _result) 

1707 _result, = _result 

1708 return _result 

1709 

1710Variable = tf_export("raw_ops.Variable")(_ops.to_raw_op(variable)) 

1711 

1712 

1713def variable_eager_fallback(shape, dtype, container, shared_name, name, ctx): 

1714 raise RuntimeError("variable op does not support eager execution. Arg 'ref' is a ref.") 

1715 

1716def variable_v2(shape, dtype, container="", shared_name="", name=None): 

1717 r"""Holds state in the form of a tensor that persists across steps. 

1718 

1719 Outputs a ref to the tensor state so it may be read or modified. 

1720 TODO(zhifengc/mrry): Adds a pointer to a more detail document 

1721 about sharing states in tensorflow. 

1722 

1723 Args: 

1724 shape: A `tf.TensorShape` or list of `ints`. 

1725 The shape of the variable tensor. 

1726 dtype: A `tf.DType`. The type of elements in the variable tensor. 

1727 container: An optional `string`. Defaults to `""`. 

1728 If non-empty, this variable is placed in the given container. 

1729 Otherwise, a default container is used. 

1730 shared_name: An optional `string`. Defaults to `""`. 

1731 If non-empty, this variable is named in the given bucket 

1732 with this shared_name. Otherwise, the node name is used instead. 

1733 name: A name for the operation (optional). 

1734 

1735 Returns: 

1736 A mutable `Tensor` of type `dtype`. 

1737 """ 

1738 _ctx = _context._context or _context.context() 

1739 tld = _ctx._thread_local_data 

1740 if tld.is_eager: 

1741 raise RuntimeError("variable_v2 op does not support eager execution. Arg 'ref' is a ref.") 

1742 # Add nodes to the TensorFlow graph. 

1743 shape = _execute.make_shape(shape, "shape") 

1744 dtype = _execute.make_type(dtype, "dtype") 

1745 if container is None: 

1746 container = "" 

1747 container = _execute.make_str(container, "container") 

1748 if shared_name is None: 

1749 shared_name = "" 

1750 shared_name = _execute.make_str(shared_name, "shared_name") 

1751 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1752 "VariableV2", shape=shape, dtype=dtype, container=container, 

1753 shared_name=shared_name, name=name) 

1754 _result = _outputs[:] 

1755 if _execute.must_record_gradient(): 

1756 _attrs = ("shape", _op.get_attr("shape"), "dtype", 

1757 _op._get_attr_type("dtype"), "container", 

1758 _op.get_attr("container"), "shared_name", 

1759 _op.get_attr("shared_name")) 

1760 _inputs_flat = _op.inputs 

1761 _execute.record_gradient( 

1762 "VariableV2", _inputs_flat, _attrs, _result) 

1763 _result, = _result 

1764 return _result 

1765 

1766VariableV2 = tf_export("raw_ops.VariableV2")(_ops.to_raw_op(variable_v2)) 

1767 

1768 

1769def variable_v2_eager_fallback(shape, dtype, container, shared_name, name, ctx): 

1770 raise RuntimeError("variable_v2 op does not support eager execution. Arg 'ref' is a ref.")