Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_resource_variable_ops.py: 11%

694 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1"""Python wrappers around TensorFlow ops. 

2 

3This file is MACHINE GENERATED! Do not edit. 

4""" 

5 

6import collections 

7 

8from tensorflow.python import pywrap_tfe as pywrap_tfe 

9from tensorflow.python.eager import context as _context 

10from tensorflow.python.eager import core as _core 

11from tensorflow.python.eager import execute as _execute 

12from tensorflow.python.framework import dtypes as _dtypes 

13from tensorflow.security.fuzzing.py import annotation_types as _atypes 

14 

15from tensorflow.python.framework import op_def_registry as _op_def_registry 

16from tensorflow.python.framework import ops as _ops 

17from tensorflow.python.framework import op_def_library as _op_def_library 

18from tensorflow.python.util.deprecation import deprecated_endpoints 

19from tensorflow.python.util import dispatch as _dispatch 

20from tensorflow.python.util.tf_export import tf_export 

21 

22from typing import TypeVar 

23 

24def assign_add_variable_op(resource, value, name=None): 

25 r"""Adds a value to the current value of a variable. 

26 

27 Any ReadVariableOp with a control dependency on this op is guaranteed to 

28 see the incremented value or a subsequent newer one. 

29 

30 Args: 

31 resource: A `Tensor` of type `resource`. 

32 handle to the resource in which to store the variable. 

33 value: A `Tensor`. the value by which the variable will be incremented. 

34 name: A name for the operation (optional). 

35 

36 Returns: 

37 The created Operation. 

38 """ 

39 _ctx = _context._context or _context.context() 

40 tld = _ctx._thread_local_data 

41 if tld.is_eager: 

42 try: 

43 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

44 _ctx, "AssignAddVariableOp", name, resource, value) 

45 return _result 

46 except _core._NotOkStatusException as e: 

47 _ops.raise_from_not_ok_status(e, name) 

48 except _core._FallbackException: 

49 pass 

50 try: 

51 return assign_add_variable_op_eager_fallback( 

52 resource, value, name=name, ctx=_ctx) 

53 except _core._SymbolicException: 

54 pass # Add nodes to the TensorFlow graph. 

55 # Add nodes to the TensorFlow graph. 

56 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

57 "AssignAddVariableOp", resource=resource, value=value, name=name) 

58 return _op 

59AssignAddVariableOp = tf_export("raw_ops.AssignAddVariableOp")(_ops.to_raw_op(assign_add_variable_op)) 

60 

61 

62def assign_add_variable_op_eager_fallback(resource, value, name, ctx): 

63 _attr_dtype, (value,) = _execute.args_to_matching_eager([value], ctx, []) 

64 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

65 _inputs_flat = [resource, value] 

66 _attrs = ("dtype", _attr_dtype) 

67 _result = _execute.execute(b"AssignAddVariableOp", 0, inputs=_inputs_flat, 

68 attrs=_attrs, ctx=ctx, name=name) 

69 _result = None 

70 return _result 

71 

72 

73def assign_sub_variable_op(resource, value, name=None): 

74 r"""Subtracts a value from the current value of a variable. 

75 

76 Any ReadVariableOp with a control dependency on this op is guaranteed to 

77 see the decremented value or a subsequent newer one. 

78 

79 Args: 

80 resource: A `Tensor` of type `resource`. 

81 handle to the resource in which to store the variable. 

82 value: A `Tensor`. the value by which the variable will be incremented. 

83 name: A name for the operation (optional). 

84 

85 Returns: 

86 The created Operation. 

87 """ 

88 _ctx = _context._context or _context.context() 

89 tld = _ctx._thread_local_data 

90 if tld.is_eager: 

91 try: 

92 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

93 _ctx, "AssignSubVariableOp", name, resource, value) 

94 return _result 

95 except _core._NotOkStatusException as e: 

96 _ops.raise_from_not_ok_status(e, name) 

97 except _core._FallbackException: 

98 pass 

99 try: 

100 return assign_sub_variable_op_eager_fallback( 

101 resource, value, name=name, ctx=_ctx) 

102 except _core._SymbolicException: 

103 pass # Add nodes to the TensorFlow graph. 

104 # Add nodes to the TensorFlow graph. 

105 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

106 "AssignSubVariableOp", resource=resource, value=value, name=name) 

107 return _op 

108AssignSubVariableOp = tf_export("raw_ops.AssignSubVariableOp")(_ops.to_raw_op(assign_sub_variable_op)) 

109 

110 

111def assign_sub_variable_op_eager_fallback(resource, value, name, ctx): 

112 _attr_dtype, (value,) = _execute.args_to_matching_eager([value], ctx, []) 

113 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

114 _inputs_flat = [resource, value] 

115 _attrs = ("dtype", _attr_dtype) 

116 _result = _execute.execute(b"AssignSubVariableOp", 0, inputs=_inputs_flat, 

117 attrs=_attrs, ctx=ctx, name=name) 

118 _result = None 

119 return _result 

120 

121 

122def assign_variable_op(resource, value, validate_shape=False, name=None): 

123 r"""Assigns a new value to a variable. 

124 

125 Any ReadVariableOp with a control dependency on this op is guaranteed to return 

126 this value or a subsequent newer value of the variable. 

127 

128 Args: 

129 resource: A `Tensor` of type `resource`. 

130 handle to the resource in which to store the variable. 

131 value: A `Tensor`. the value to set the new tensor to use. 

132 validate_shape: An optional `bool`. Defaults to `False`. 

133 name: A name for the operation (optional). 

134 

135 Returns: 

136 The created Operation. 

137 """ 

138 _ctx = _context._context or _context.context() 

139 tld = _ctx._thread_local_data 

140 if tld.is_eager: 

141 try: 

142 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

143 _ctx, "AssignVariableOp", name, resource, value, "validate_shape", 

144 validate_shape) 

145 return _result 

146 except _core._NotOkStatusException as e: 

147 _ops.raise_from_not_ok_status(e, name) 

148 except _core._FallbackException: 

149 pass 

150 try: 

151 return assign_variable_op_eager_fallback( 

152 resource, value, validate_shape=validate_shape, name=name, ctx=_ctx) 

153 except _core._SymbolicException: 

154 pass # Add nodes to the TensorFlow graph. 

155 # Add nodes to the TensorFlow graph. 

156 if validate_shape is None: 

157 validate_shape = False 

158 validate_shape = _execute.make_bool(validate_shape, "validate_shape") 

159 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

160 "AssignVariableOp", resource=resource, value=value, 

161 validate_shape=validate_shape, name=name) 

162 return _op 

163AssignVariableOp = tf_export("raw_ops.AssignVariableOp")(_ops.to_raw_op(assign_variable_op)) 

164 

165 

166def assign_variable_op_eager_fallback(resource, value, validate_shape, name, ctx): 

167 if validate_shape is None: 

168 validate_shape = False 

169 validate_shape = _execute.make_bool(validate_shape, "validate_shape") 

170 _attr_dtype, (value,) = _execute.args_to_matching_eager([value], ctx, []) 

171 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

172 _inputs_flat = [resource, value] 

173 _attrs = ("dtype", _attr_dtype, "validate_shape", validate_shape) 

174 _result = _execute.execute(b"AssignVariableOp", 0, inputs=_inputs_flat, 

175 attrs=_attrs, ctx=ctx, name=name) 

176 _result = None 

177 return _result 

178 

179 

180def consume_mutex_lock(mutex_lock, name=None): 

181 r"""This op consumes a lock created by `MutexLock`. 

182 

183 This op exists to consume a tensor created by `MutexLock` (other than 

184 direct control dependencies). It should be the only that consumes the tensor, 

185 and will raise an error if it is not. Its only purpose is to keep the 

186 mutex lock tensor alive until it is consumed by this op. 

187 

188 **NOTE**: This operation must run on the same device as its input. This may 

189 be enforced via the `colocate_with` mechanism. 

190 

191 Args: 

192 mutex_lock: A `Tensor` of type `variant`. 

193 A tensor returned by `MutexLock`. 

194 name: A name for the operation (optional). 

195 

196 Returns: 

197 The created Operation. 

198 """ 

199 _ctx = _context._context or _context.context() 

200 tld = _ctx._thread_local_data 

201 if tld.is_eager: 

202 try: 

203 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

204 _ctx, "ConsumeMutexLock", name, mutex_lock) 

205 return _result 

206 except _core._NotOkStatusException as e: 

207 _ops.raise_from_not_ok_status(e, name) 

208 except _core._FallbackException: 

209 pass 

210 try: 

211 return consume_mutex_lock_eager_fallback( 

212 mutex_lock, name=name, ctx=_ctx) 

213 except _core._SymbolicException: 

214 pass # Add nodes to the TensorFlow graph. 

215 # Add nodes to the TensorFlow graph. 

216 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

217 "ConsumeMutexLock", mutex_lock=mutex_lock, name=name) 

218 return _op 

219ConsumeMutexLock = tf_export("raw_ops.ConsumeMutexLock")(_ops.to_raw_op(consume_mutex_lock)) 

220 

221 

222def consume_mutex_lock_eager_fallback(mutex_lock, name, ctx): 

223 mutex_lock = _ops.convert_to_tensor(mutex_lock, _dtypes.variant) 

224 _inputs_flat = [mutex_lock] 

225 _attrs = None 

226 _result = _execute.execute(b"ConsumeMutexLock", 0, inputs=_inputs_flat, 

227 attrs=_attrs, ctx=ctx, name=name) 

228 _result = None 

229 return _result 

230 

231 

232def destroy_resource_op(resource, ignore_lookup_error=True, name=None): 

233 r"""Deletes the resource specified by the handle. 

234 

235 All subsequent operations using the resource will result in a NotFound 

236 error status. 

237 

238 Args: 

239 resource: A `Tensor` of type `resource`. handle to the resource to delete. 

240 ignore_lookup_error: An optional `bool`. Defaults to `True`. 

241 whether to ignore the error when the resource 

242 doesn't exist. 

243 name: A name for the operation (optional). 

244 

245 Returns: 

246 The created Operation. 

247 """ 

248 _ctx = _context._context or _context.context() 

249 tld = _ctx._thread_local_data 

250 if tld.is_eager: 

251 try: 

252 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

253 _ctx, "DestroyResourceOp", name, resource, "ignore_lookup_error", 

254 ignore_lookup_error) 

255 return _result 

256 except _core._NotOkStatusException as e: 

257 _ops.raise_from_not_ok_status(e, name) 

258 except _core._FallbackException: 

259 pass 

260 try: 

261 return destroy_resource_op_eager_fallback( 

262 resource, ignore_lookup_error=ignore_lookup_error, name=name, 

263 ctx=_ctx) 

264 except _core._SymbolicException: 

265 pass # Add nodes to the TensorFlow graph. 

266 # Add nodes to the TensorFlow graph. 

267 if ignore_lookup_error is None: 

268 ignore_lookup_error = True 

269 ignore_lookup_error = _execute.make_bool(ignore_lookup_error, "ignore_lookup_error") 

270 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

271 "DestroyResourceOp", resource=resource, 

272 ignore_lookup_error=ignore_lookup_error, 

273 name=name) 

274 return _op 

275DestroyResourceOp = tf_export("raw_ops.DestroyResourceOp")(_ops.to_raw_op(destroy_resource_op)) 

276 

277 

278def destroy_resource_op_eager_fallback(resource, ignore_lookup_error, name, ctx): 

279 if ignore_lookup_error is None: 

280 ignore_lookup_error = True 

281 ignore_lookup_error = _execute.make_bool(ignore_lookup_error, "ignore_lookup_error") 

282 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

283 _inputs_flat = [resource] 

284 _attrs = ("ignore_lookup_error", ignore_lookup_error) 

285 _result = _execute.execute(b"DestroyResourceOp", 0, inputs=_inputs_flat, 

286 attrs=_attrs, ctx=ctx, name=name) 

287 _result = None 

288 return _result 

289 

290 

291def disable_copy_on_read(resource, name=None): 

292 r"""Turns off the copy-on-read mode. 

293 

294 Turns off the copy-on-read mode of a resource variable. If the variable is not in copy-on-read mode, this op has no effect. 

295 

296 Args: 

297 resource: A `Tensor` of type `resource`. 

298 The resource handle of the resource variable. 

299 name: A name for the operation (optional). 

300 

301 Returns: 

302 The created Operation. 

303 """ 

304 _ctx = _context._context or _context.context() 

305 tld = _ctx._thread_local_data 

306 if tld.is_eager: 

307 try: 

308 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

309 _ctx, "DisableCopyOnRead", name, resource) 

310 return _result 

311 except _core._NotOkStatusException as e: 

312 _ops.raise_from_not_ok_status(e, name) 

313 except _core._FallbackException: 

314 pass 

315 try: 

316 return disable_copy_on_read_eager_fallback( 

317 resource, name=name, ctx=_ctx) 

318 except _core._SymbolicException: 

319 pass # Add nodes to the TensorFlow graph. 

320 # Add nodes to the TensorFlow graph. 

321 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

322 "DisableCopyOnRead", resource=resource, name=name) 

323 return _op 

324DisableCopyOnRead = tf_export("raw_ops.DisableCopyOnRead")(_ops.to_raw_op(disable_copy_on_read)) 

325 

326 

327def disable_copy_on_read_eager_fallback(resource, name, ctx): 

328 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

329 _inputs_flat = [resource] 

330 _attrs = None 

331 _result = _execute.execute(b"DisableCopyOnRead", 0, inputs=_inputs_flat, 

332 attrs=_attrs, ctx=ctx, name=name) 

333 _result = None 

334 return _result 

335 

336 

337def mutex_lock(mutex, name=None): 

338 r"""Locks a mutex resource. The output is the lock. So long as the lock tensor 

339 

340 is alive, any other request to use `MutexLock` with this mutex will wait. 

341 

342 This is particularly useful for creating a critical section when used in 

343 conjunction with `MutexLockIdentity`: 

344 

345 ```python 

346 

347 mutex = mutex_v2( 

348 shared_name=handle_name, container=container, name=name) 

349 

350 def execute_in_critical_section(fn, *args, **kwargs): 

351 lock = gen_resource_variable_ops.mutex_lock(mutex) 

352 

353 with ops.control_dependencies([lock]): 

354 r = fn(*args, **kwargs) 

355 

356 with ops.control_dependencies(nest.flatten(r)): 

357 with ops.colocate_with(mutex): 

358 ensure_lock_exists = mutex_lock_identity(lock) 

359 

360 # Make sure that if any element of r is accessed, all of 

361 # them are executed together. 

362 r = nest.map_structure(tf.identity, r) 

363 

364 with ops.control_dependencies([ensure_lock_exists]): 

365 return nest.map_structure(tf.identity, r) 

366 ``` 

367 

368 While `fn` is running in the critical section, no other functions which wish to 

369 use this critical section may run. 

370 

371 Often the use case is that two executions of the same graph, in parallel, 

372 wish to run `fn`; and we wish to ensure that only one of them executes 

373 at a time. This is especially important if `fn` modifies one or more 

374 variables at a time. 

375 

376 It is also useful if two separate functions must share a resource, but we 

377 wish to ensure the usage is exclusive. 

378 

379 Args: 

380 mutex: A `Tensor` of type `resource`. The mutex resource to lock. 

381 name: A name for the operation (optional). 

382 

383 Returns: 

384 A `Tensor` of type `variant`. 

385 """ 

386 _ctx = _context._context or _context.context() 

387 tld = _ctx._thread_local_data 

388 if tld.is_eager: 

389 try: 

390 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

391 _ctx, "MutexLock", name, mutex) 

392 return _result 

393 except _core._NotOkStatusException as e: 

394 _ops.raise_from_not_ok_status(e, name) 

395 except _core._FallbackException: 

396 pass 

397 try: 

398 return mutex_lock_eager_fallback( 

399 mutex, name=name, ctx=_ctx) 

400 except _core._SymbolicException: 

401 pass # Add nodes to the TensorFlow graph. 

402 # Add nodes to the TensorFlow graph. 

403 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

404 "MutexLock", mutex=mutex, name=name) 

405 _result = _outputs[:] 

406 if _execute.must_record_gradient(): 

407 _attrs = () 

408 _inputs_flat = _op.inputs 

409 _execute.record_gradient( 

410 "MutexLock", _inputs_flat, _attrs, _result) 

411 _result, = _result 

412 return _result 

413 

414MutexLock = tf_export("raw_ops.MutexLock")(_ops.to_raw_op(mutex_lock)) 

415 

416 

417def mutex_lock_eager_fallback(mutex, name, ctx): 

418 mutex = _ops.convert_to_tensor(mutex, _dtypes.resource) 

419 _inputs_flat = [mutex] 

420 _attrs = None 

421 _result = _execute.execute(b"MutexLock", 1, inputs=_inputs_flat, 

422 attrs=_attrs, ctx=ctx, name=name) 

423 if _execute.must_record_gradient(): 

424 _execute.record_gradient( 

425 "MutexLock", _inputs_flat, _attrs, _result) 

426 _result, = _result 

427 return _result 

428 

429 

430def mutex_v2(container="", shared_name="", name=None): 

431 r"""Creates a Mutex resource that can be locked by `MutexLock`. 

432 

433 Args: 

434 container: An optional `string`. Defaults to `""`. 

435 If non-empty, this variable is placed in the given container. 

436 Otherwise, a default container is used. 

437 shared_name: An optional `string`. Defaults to `""`. 

438 If non-empty, this variable is named in the given bucket 

439 with this shared_name. Otherwise, the node name is used instead. 

440 name: A name for the operation (optional). 

441 

442 Returns: 

443 A `Tensor` of type `resource`. 

444 """ 

445 _ctx = _context._context or _context.context() 

446 tld = _ctx._thread_local_data 

447 if tld.is_eager: 

448 try: 

449 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

450 _ctx, "MutexV2", name, "container", container, "shared_name", 

451 shared_name) 

452 return _result 

453 except _core._NotOkStatusException as e: 

454 _ops.raise_from_not_ok_status(e, name) 

455 except _core._FallbackException: 

456 pass 

457 try: 

458 return mutex_v2_eager_fallback( 

459 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

460 except _core._SymbolicException: 

461 pass # Add nodes to the TensorFlow graph. 

462 # Add nodes to the TensorFlow graph. 

463 if container is None: 

464 container = "" 

465 container = _execute.make_str(container, "container") 

466 if shared_name is None: 

467 shared_name = "" 

468 shared_name = _execute.make_str(shared_name, "shared_name") 

469 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

470 "MutexV2", container=container, shared_name=shared_name, name=name) 

471 _result = _outputs[:] 

472 if _execute.must_record_gradient(): 

473 _attrs = ("container", _op.get_attr("container"), "shared_name", 

474 _op.get_attr("shared_name")) 

475 _inputs_flat = _op.inputs 

476 _execute.record_gradient( 

477 "MutexV2", _inputs_flat, _attrs, _result) 

478 _result, = _result 

479 return _result 

480 

481MutexV2 = tf_export("raw_ops.MutexV2")(_ops.to_raw_op(mutex_v2)) 

482 

483 

484def mutex_v2_eager_fallback(container, shared_name, name, ctx): 

485 if container is None: 

486 container = "" 

487 container = _execute.make_str(container, "container") 

488 if shared_name is None: 

489 shared_name = "" 

490 shared_name = _execute.make_str(shared_name, "shared_name") 

491 _inputs_flat = [] 

492 _attrs = ("container", container, "shared_name", shared_name) 

493 _result = _execute.execute(b"MutexV2", 1, inputs=_inputs_flat, attrs=_attrs, 

494 ctx=ctx, name=name) 

495 if _execute.must_record_gradient(): 

496 _execute.record_gradient( 

497 "MutexV2", _inputs_flat, _attrs, _result) 

498 _result, = _result 

499 return _result 

500 

501 

502def read_variable_op(resource, dtype, name=None): 

503 r"""Reads the value of a variable. 

504 

505 The tensor returned by this operation is immutable. 

506 

507 The value returned by this operation is guaranteed to be influenced by all the 

508 writes on which this operation depends directly or indirectly, and to not be 

509 influenced by any of the writes which depend directly or indirectly on this 

510 operation. 

511 

512 Args: 

513 resource: A `Tensor` of type `resource`. 

514 handle to the resource in which to store the variable. 

515 dtype: A `tf.DType`. the dtype of the value. 

516 name: A name for the operation (optional). 

517 

518 Returns: 

519 A `Tensor` of type `dtype`. 

520 """ 

521 _ctx = _context._context or _context.context() 

522 tld = _ctx._thread_local_data 

523 if tld.is_eager: 

524 try: 

525 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

526 _ctx, "ReadVariableOp", name, resource, "dtype", dtype) 

527 return _result 

528 except _core._NotOkStatusException as e: 

529 _ops.raise_from_not_ok_status(e, name) 

530 except _core._FallbackException: 

531 pass 

532 try: 

533 return read_variable_op_eager_fallback( 

534 resource, dtype=dtype, name=name, ctx=_ctx) 

535 except _core._SymbolicException: 

536 pass # Add nodes to the TensorFlow graph. 

537 # Add nodes to the TensorFlow graph. 

538 dtype = _execute.make_type(dtype, "dtype") 

539 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

540 "ReadVariableOp", resource=resource, dtype=dtype, name=name) 

541 _result = _outputs[:] 

542 if _execute.must_record_gradient(): 

543 _attrs = ("dtype", _op._get_attr_type("dtype")) 

544 _inputs_flat = _op.inputs 

545 _execute.record_gradient( 

546 "ReadVariableOp", _inputs_flat, _attrs, _result) 

547 _result, = _result 

548 return _result 

549 

550ReadVariableOp = tf_export("raw_ops.ReadVariableOp")(_ops.to_raw_op(read_variable_op)) 

551 

552 

553def read_variable_op_eager_fallback(resource, dtype, name, ctx): 

554 dtype = _execute.make_type(dtype, "dtype") 

555 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

556 _inputs_flat = [resource] 

557 _attrs = ("dtype", dtype) 

558 _result = _execute.execute(b"ReadVariableOp", 1, inputs=_inputs_flat, 

559 attrs=_attrs, ctx=ctx, name=name) 

560 if _execute.must_record_gradient(): 

561 _execute.record_gradient( 

562 "ReadVariableOp", _inputs_flat, _attrs, _result) 

563 _result, = _result 

564 return _result 

565 

566 

567def resource_gather(resource, indices, dtype, batch_dims=0, validate_indices=True, name=None): 

568 r"""Gather slices from the variable pointed to by `resource` according to `indices`. 

569 

570 `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). 

571 Produces an output tensor with shape `indices.shape + params.shape[1:]` where: 

572 

573 ```python 

574 # Scalar indices 

575 output[:, ..., :] = params[indices, :, ... :] 

576 

577 # Vector indices 

578 output[i, :, ..., :] = params[indices[i], :, ... :] 

579 

580 # Higher rank indices 

581 output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] 

582 ``` 

583 

584 Args: 

585 resource: A `Tensor` of type `resource`. 

586 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

587 dtype: A `tf.DType`. 

588 batch_dims: An optional `int`. Defaults to `0`. 

589 validate_indices: An optional `bool`. Defaults to `True`. 

590 name: A name for the operation (optional). 

591 

592 Returns: 

593 A `Tensor` of type `dtype`. 

594 """ 

595 _ctx = _context._context or _context.context() 

596 tld = _ctx._thread_local_data 

597 if tld.is_eager: 

598 try: 

599 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

600 _ctx, "ResourceGather", name, resource, indices, "batch_dims", 

601 batch_dims, "validate_indices", validate_indices, "dtype", dtype) 

602 return _result 

603 except _core._NotOkStatusException as e: 

604 _ops.raise_from_not_ok_status(e, name) 

605 except _core._FallbackException: 

606 pass 

607 try: 

608 return resource_gather_eager_fallback( 

609 resource, indices, batch_dims=batch_dims, 

610 validate_indices=validate_indices, dtype=dtype, name=name, ctx=_ctx) 

611 except _core._SymbolicException: 

612 pass # Add nodes to the TensorFlow graph. 

613 # Add nodes to the TensorFlow graph. 

614 dtype = _execute.make_type(dtype, "dtype") 

615 if batch_dims is None: 

616 batch_dims = 0 

617 batch_dims = _execute.make_int(batch_dims, "batch_dims") 

618 if validate_indices is None: 

619 validate_indices = True 

620 validate_indices = _execute.make_bool(validate_indices, "validate_indices") 

621 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

622 "ResourceGather", resource=resource, indices=indices, dtype=dtype, 

623 batch_dims=batch_dims, 

624 validate_indices=validate_indices, name=name) 

625 _result = _outputs[:] 

626 if _execute.must_record_gradient(): 

627 _attrs = ("batch_dims", _op._get_attr_int("batch_dims"), 

628 "validate_indices", _op._get_attr_bool("validate_indices"), 

629 "dtype", _op._get_attr_type("dtype"), "Tindices", 

630 _op._get_attr_type("Tindices")) 

631 _inputs_flat = _op.inputs 

632 _execute.record_gradient( 

633 "ResourceGather", _inputs_flat, _attrs, _result) 

634 _result, = _result 

635 return _result 

636 

637ResourceGather = tf_export("raw_ops.ResourceGather")(_ops.to_raw_op(resource_gather)) 

638 

639 

640def resource_gather_eager_fallback(resource, indices, dtype, batch_dims, validate_indices, name, ctx): 

641 dtype = _execute.make_type(dtype, "dtype") 

642 if batch_dims is None: 

643 batch_dims = 0 

644 batch_dims = _execute.make_int(batch_dims, "batch_dims") 

645 if validate_indices is None: 

646 validate_indices = True 

647 validate_indices = _execute.make_bool(validate_indices, "validate_indices") 

648 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

649 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

650 _inputs_flat = [resource, indices] 

651 _attrs = ("batch_dims", batch_dims, "validate_indices", validate_indices, 

652 "dtype", dtype, "Tindices", _attr_Tindices) 

653 _result = _execute.execute(b"ResourceGather", 1, inputs=_inputs_flat, 

654 attrs=_attrs, ctx=ctx, name=name) 

655 if _execute.must_record_gradient(): 

656 _execute.record_gradient( 

657 "ResourceGather", _inputs_flat, _attrs, _result) 

658 _result, = _result 

659 return _result 

660 

661 

662def resource_gather_nd(resource, indices, dtype, name=None): 

663 r"""TODO: add doc. 

664 

665 Args: 

666 resource: A `Tensor` of type `resource`. 

667 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

668 dtype: A `tf.DType`. 

669 name: A name for the operation (optional). 

670 

671 Returns: 

672 A `Tensor` of type `dtype`. 

673 """ 

674 _ctx = _context._context or _context.context() 

675 tld = _ctx._thread_local_data 

676 if tld.is_eager: 

677 try: 

678 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

679 _ctx, "ResourceGatherNd", name, resource, indices, "dtype", dtype) 

680 return _result 

681 except _core._NotOkStatusException as e: 

682 _ops.raise_from_not_ok_status(e, name) 

683 except _core._FallbackException: 

684 pass 

685 try: 

686 return resource_gather_nd_eager_fallback( 

687 resource, indices, dtype=dtype, name=name, ctx=_ctx) 

688 except _core._SymbolicException: 

689 pass # Add nodes to the TensorFlow graph. 

690 # Add nodes to the TensorFlow graph. 

691 dtype = _execute.make_type(dtype, "dtype") 

692 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

693 "ResourceGatherNd", resource=resource, indices=indices, dtype=dtype, 

694 name=name) 

695 _result = _outputs[:] 

696 if _execute.must_record_gradient(): 

697 _attrs = ("dtype", _op._get_attr_type("dtype"), "Tindices", 

698 _op._get_attr_type("Tindices")) 

699 _inputs_flat = _op.inputs 

700 _execute.record_gradient( 

701 "ResourceGatherNd", _inputs_flat, _attrs, _result) 

702 _result, = _result 

703 return _result 

704 

705ResourceGatherNd = tf_export("raw_ops.ResourceGatherNd")(_ops.to_raw_op(resource_gather_nd)) 

706 

707 

708def resource_gather_nd_eager_fallback(resource, indices, dtype, name, ctx): 

709 dtype = _execute.make_type(dtype, "dtype") 

710 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

711 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

712 _inputs_flat = [resource, indices] 

713 _attrs = ("dtype", dtype, "Tindices", _attr_Tindices) 

714 _result = _execute.execute(b"ResourceGatherNd", 1, inputs=_inputs_flat, 

715 attrs=_attrs, ctx=ctx, name=name) 

716 if _execute.must_record_gradient(): 

717 _execute.record_gradient( 

718 "ResourceGatherNd", _inputs_flat, _attrs, _result) 

719 _result, = _result 

720 return _result 

721 

722 

723def resource_scatter_add(resource, indices, updates, name=None): 

724 r"""Adds sparse updates to the variable referenced by `resource`. 

725 

726 This operation computes 

727 

728 # Scalar indices 

729 ref[indices, ...] += updates[...] 

730 

731 # Vector indices (for each i) 

732 ref[indices[i], ...] += updates[i, ...] 

733 

734 # High rank indices (for each i, ..., j) 

735 ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] 

736 

737 Duplicate entries are handled correctly: if multiple `indices` reference 

738 the same location, their contributions add. 

739 

740 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

741 

742 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

743 <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> 

744 </div> 

745 

746 Args: 

747 resource: A `Tensor` of type `resource`. Should be from a `Variable` node. 

748 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

749 A tensor of indices into the first dimension of `ref`. 

750 updates: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

751 A tensor of updated values to add to `ref`. 

752 name: A name for the operation (optional). 

753 

754 Returns: 

755 The created Operation. 

756 """ 

757 _ctx = _context._context or _context.context() 

758 tld = _ctx._thread_local_data 

759 if tld.is_eager: 

760 try: 

761 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

762 _ctx, "ResourceScatterAdd", name, resource, indices, updates) 

763 return _result 

764 except _core._NotOkStatusException as e: 

765 _ops.raise_from_not_ok_status(e, name) 

766 except _core._FallbackException: 

767 pass 

768 try: 

769 return resource_scatter_add_eager_fallback( 

770 resource, indices, updates, name=name, ctx=_ctx) 

771 except _core._SymbolicException: 

772 pass # Add nodes to the TensorFlow graph. 

773 # Add nodes to the TensorFlow graph. 

774 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

775 "ResourceScatterAdd", resource=resource, indices=indices, 

776 updates=updates, name=name) 

777 return _op 

778ResourceScatterAdd = tf_export("raw_ops.ResourceScatterAdd")(_ops.to_raw_op(resource_scatter_add)) 

779 

780 

781def resource_scatter_add_eager_fallback(resource, indices, updates, name, ctx): 

782 _attr_dtype, (updates,) = _execute.args_to_matching_eager([updates], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

783 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

784 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

785 _inputs_flat = [resource, indices, updates] 

786 _attrs = ("dtype", _attr_dtype, "Tindices", _attr_Tindices) 

787 _result = _execute.execute(b"ResourceScatterAdd", 0, inputs=_inputs_flat, 

788 attrs=_attrs, ctx=ctx, name=name) 

789 _result = None 

790 return _result 

791 

792 

793def resource_scatter_div(resource, indices, updates, name=None): 

794 r"""Divides sparse updates into the variable referenced by `resource`. 

795 

796 This operation computes 

797 

798 # Scalar indices 

799 ref[indices, ...] /= updates[...] 

800 

801 # Vector indices (for each i) 

802 ref[indices[i], ...] /= updates[i, ...] 

803 

804 # High rank indices (for each i, ..., j) 

805 ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] 

806 

807 Duplicate entries are handled correctly: if multiple `indices` reference 

808 the same location, their contributions multiply. 

809 

810 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

811 

812 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

813 <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> 

814 </div> 

815 

816 Args: 

817 resource: A `Tensor` of type `resource`. Should be from a `Variable` node. 

818 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

819 A tensor of indices into the first dimension of `ref`. 

820 updates: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

821 A tensor of updated values to add to `ref`. 

822 name: A name for the operation (optional). 

823 

824 Returns: 

825 The created Operation. 

826 """ 

827 _ctx = _context._context or _context.context() 

828 tld = _ctx._thread_local_data 

829 if tld.is_eager: 

830 try: 

831 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

832 _ctx, "ResourceScatterDiv", name, resource, indices, updates) 

833 return _result 

834 except _core._NotOkStatusException as e: 

835 _ops.raise_from_not_ok_status(e, name) 

836 except _core._FallbackException: 

837 pass 

838 try: 

839 return resource_scatter_div_eager_fallback( 

840 resource, indices, updates, name=name, ctx=_ctx) 

841 except _core._SymbolicException: 

842 pass # Add nodes to the TensorFlow graph. 

843 # Add nodes to the TensorFlow graph. 

844 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

845 "ResourceScatterDiv", resource=resource, indices=indices, 

846 updates=updates, name=name) 

847 return _op 

848ResourceScatterDiv = tf_export("raw_ops.ResourceScatterDiv")(_ops.to_raw_op(resource_scatter_div)) 

849 

850 

851def resource_scatter_div_eager_fallback(resource, indices, updates, name, ctx): 

852 _attr_dtype, (updates,) = _execute.args_to_matching_eager([updates], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

853 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

854 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

855 _inputs_flat = [resource, indices, updates] 

856 _attrs = ("dtype", _attr_dtype, "Tindices", _attr_Tindices) 

857 _result = _execute.execute(b"ResourceScatterDiv", 0, inputs=_inputs_flat, 

858 attrs=_attrs, ctx=ctx, name=name) 

859 _result = None 

860 return _result 

861 

862 

863def resource_scatter_max(resource, indices, updates, name=None): 

864 r"""Reduces sparse updates into the variable referenced by `resource` using the `max` operation. 

865 

866 This operation computes 

867 

868 # Scalar indices 

869 ref[indices, ...] = max(ref[indices, ...], updates[...]) 

870 

871 # Vector indices (for each i) 

872 ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) 

873 

874 # High rank indices (for each i, ..., j) 

875 ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) 

876 

877 Duplicate entries are handled correctly: if multiple `indices` reference 

878 the same location, their contributions are combined. 

879 

880 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

881 

882 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

883 <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> 

884 </div> 

885 

886 Args: 

887 resource: A `Tensor` of type `resource`. Should be from a `Variable` node. 

888 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

889 A tensor of indices into the first dimension of `ref`. 

890 updates: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

891 A tensor of updated values to add to `ref`. 

892 name: A name for the operation (optional). 

893 

894 Returns: 

895 The created Operation. 

896 """ 

897 _ctx = _context._context or _context.context() 

898 tld = _ctx._thread_local_data 

899 if tld.is_eager: 

900 try: 

901 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

902 _ctx, "ResourceScatterMax", name, resource, indices, updates) 

903 return _result 

904 except _core._NotOkStatusException as e: 

905 _ops.raise_from_not_ok_status(e, name) 

906 except _core._FallbackException: 

907 pass 

908 try: 

909 return resource_scatter_max_eager_fallback( 

910 resource, indices, updates, name=name, ctx=_ctx) 

911 except _core._SymbolicException: 

912 pass # Add nodes to the TensorFlow graph. 

913 # Add nodes to the TensorFlow graph. 

914 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

915 "ResourceScatterMax", resource=resource, indices=indices, 

916 updates=updates, name=name) 

917 return _op 

918ResourceScatterMax = tf_export("raw_ops.ResourceScatterMax")(_ops.to_raw_op(resource_scatter_max)) 

919 

920 

921def resource_scatter_max_eager_fallback(resource, indices, updates, name, ctx): 

922 _attr_dtype, (updates,) = _execute.args_to_matching_eager([updates], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

923 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

924 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

925 _inputs_flat = [resource, indices, updates] 

926 _attrs = ("dtype", _attr_dtype, "Tindices", _attr_Tindices) 

927 _result = _execute.execute(b"ResourceScatterMax", 0, inputs=_inputs_flat, 

928 attrs=_attrs, ctx=ctx, name=name) 

929 _result = None 

930 return _result 

931 

932 

933def resource_scatter_min(resource, indices, updates, name=None): 

934 r"""Reduces sparse updates into the variable referenced by `resource` using the `min` operation. 

935 

936 This operation computes 

937 

938 # Scalar indices 

939 ref[indices, ...] = min(ref[indices, ...], updates[...]) 

940 

941 # Vector indices (for each i) 

942 ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) 

943 

944 # High rank indices (for each i, ..., j) 

945 ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) 

946 

947 Duplicate entries are handled correctly: if multiple `indices` reference 

948 the same location, their contributions are combined. 

949 

950 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

951 

952 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

953 <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> 

954 </div> 

955 

956 Args: 

957 resource: A `Tensor` of type `resource`. Should be from a `Variable` node. 

958 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

959 A tensor of indices into the first dimension of `ref`. 

960 updates: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

961 A tensor of updated values to add to `ref`. 

962 name: A name for the operation (optional). 

963 

964 Returns: 

965 The created Operation. 

966 """ 

967 _ctx = _context._context or _context.context() 

968 tld = _ctx._thread_local_data 

969 if tld.is_eager: 

970 try: 

971 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

972 _ctx, "ResourceScatterMin", name, resource, indices, updates) 

973 return _result 

974 except _core._NotOkStatusException as e: 

975 _ops.raise_from_not_ok_status(e, name) 

976 except _core._FallbackException: 

977 pass 

978 try: 

979 return resource_scatter_min_eager_fallback( 

980 resource, indices, updates, name=name, ctx=_ctx) 

981 except _core._SymbolicException: 

982 pass # Add nodes to the TensorFlow graph. 

983 # Add nodes to the TensorFlow graph. 

984 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

985 "ResourceScatterMin", resource=resource, indices=indices, 

986 updates=updates, name=name) 

987 return _op 

988ResourceScatterMin = tf_export("raw_ops.ResourceScatterMin")(_ops.to_raw_op(resource_scatter_min)) 

989 

990 

991def resource_scatter_min_eager_fallback(resource, indices, updates, name, ctx): 

992 _attr_dtype, (updates,) = _execute.args_to_matching_eager([updates], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

993 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

994 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

995 _inputs_flat = [resource, indices, updates] 

996 _attrs = ("dtype", _attr_dtype, "Tindices", _attr_Tindices) 

997 _result = _execute.execute(b"ResourceScatterMin", 0, inputs=_inputs_flat, 

998 attrs=_attrs, ctx=ctx, name=name) 

999 _result = None 

1000 return _result 

1001 

1002 

1003def resource_scatter_mul(resource, indices, updates, name=None): 

1004 r"""Multiplies sparse updates into the variable referenced by `resource`. 

1005 

1006 This operation computes 

1007 

1008 # Scalar indices 

1009 ref[indices, ...] *= updates[...] 

1010 

1011 # Vector indices (for each i) 

1012 ref[indices[i], ...] *= updates[i, ...] 

1013 

1014 # High rank indices (for each i, ..., j) 

1015 ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] 

1016 

1017 Duplicate entries are handled correctly: if multiple `indices` reference 

1018 the same location, their contributions multiply. 

1019 

1020 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

1021 

1022 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

1023 <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> 

1024 </div> 

1025 

1026 Args: 

1027 resource: A `Tensor` of type `resource`. Should be from a `Variable` node. 

1028 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

1029 A tensor of indices into the first dimension of `ref`. 

1030 updates: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1031 A tensor of updated values to add to `ref`. 

1032 name: A name for the operation (optional). 

1033 

1034 Returns: 

1035 The created Operation. 

1036 """ 

1037 _ctx = _context._context or _context.context() 

1038 tld = _ctx._thread_local_data 

1039 if tld.is_eager: 

1040 try: 

1041 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1042 _ctx, "ResourceScatterMul", name, resource, indices, updates) 

1043 return _result 

1044 except _core._NotOkStatusException as e: 

1045 _ops.raise_from_not_ok_status(e, name) 

1046 except _core._FallbackException: 

1047 pass 

1048 try: 

1049 return resource_scatter_mul_eager_fallback( 

1050 resource, indices, updates, name=name, ctx=_ctx) 

1051 except _core._SymbolicException: 

1052 pass # Add nodes to the TensorFlow graph. 

1053 # Add nodes to the TensorFlow graph. 

1054 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1055 "ResourceScatterMul", resource=resource, indices=indices, 

1056 updates=updates, name=name) 

1057 return _op 

1058ResourceScatterMul = tf_export("raw_ops.ResourceScatterMul")(_ops.to_raw_op(resource_scatter_mul)) 

1059 

1060 

1061def resource_scatter_mul_eager_fallback(resource, indices, updates, name, ctx): 

1062 _attr_dtype, (updates,) = _execute.args_to_matching_eager([updates], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1063 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

1064 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

1065 _inputs_flat = [resource, indices, updates] 

1066 _attrs = ("dtype", _attr_dtype, "Tindices", _attr_Tindices) 

1067 _result = _execute.execute(b"ResourceScatterMul", 0, inputs=_inputs_flat, 

1068 attrs=_attrs, ctx=ctx, name=name) 

1069 _result = None 

1070 return _result 

1071 

1072 

1073def resource_scatter_sub(resource, indices, updates, name=None): 

1074 r"""Subtracts sparse updates from the variable referenced by `resource`. 

1075 

1076 This operation computes 

1077 

1078 # Scalar indices 

1079 ref[indices, ...] -= updates[...] 

1080 

1081 # Vector indices (for each i) 

1082 ref[indices[i], ...] -= updates[i, ...] 

1083 

1084 # High rank indices (for each i, ..., j) 

1085 ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] 

1086 

1087 Duplicate entries are handled correctly: if multiple `indices` reference 

1088 the same location, their contributions add. 

1089 

1090 Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. 

1091 

1092 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

1093 <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt> 

1094 </div> 

1095 

1096 Args: 

1097 resource: A `Tensor` of type `resource`. Should be from a `Variable` node. 

1098 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

1099 A tensor of indices into the first dimension of `ref`. 

1100 updates: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

1101 A tensor of updated values to add to `ref`. 

1102 name: A name for the operation (optional). 

1103 

1104 Returns: 

1105 The created Operation. 

1106 """ 

1107 _ctx = _context._context or _context.context() 

1108 tld = _ctx._thread_local_data 

1109 if tld.is_eager: 

1110 try: 

1111 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1112 _ctx, "ResourceScatterSub", name, resource, indices, updates) 

1113 return _result 

1114 except _core._NotOkStatusException as e: 

1115 _ops.raise_from_not_ok_status(e, name) 

1116 except _core._FallbackException: 

1117 pass 

1118 try: 

1119 return resource_scatter_sub_eager_fallback( 

1120 resource, indices, updates, name=name, ctx=_ctx) 

1121 except _core._SymbolicException: 

1122 pass # Add nodes to the TensorFlow graph. 

1123 # Add nodes to the TensorFlow graph. 

1124 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1125 "ResourceScatterSub", resource=resource, indices=indices, 

1126 updates=updates, name=name) 

1127 return _op 

1128ResourceScatterSub = tf_export("raw_ops.ResourceScatterSub")(_ops.to_raw_op(resource_scatter_sub)) 

1129 

1130 

1131def resource_scatter_sub_eager_fallback(resource, indices, updates, name, ctx): 

1132 _attr_dtype, (updates,) = _execute.args_to_matching_eager([updates], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1133 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

1134 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

1135 _inputs_flat = [resource, indices, updates] 

1136 _attrs = ("dtype", _attr_dtype, "Tindices", _attr_Tindices) 

1137 _result = _execute.execute(b"ResourceScatterSub", 0, inputs=_inputs_flat, 

1138 attrs=_attrs, ctx=ctx, name=name) 

1139 _result = None 

1140 return _result 

1141 

1142 

1143def resource_scatter_update(resource, indices, updates, name=None): 

1144 r"""Assigns sparse updates to the variable referenced by `resource`. 

1145 

1146 This operation computes 

1147 

1148 # Scalar indices 

1149 ref[indices, ...] = updates[...] 

1150 

1151 # Vector indices (for each i) 

1152 ref[indices[i], ...] = updates[i, ...] 

1153 

1154 # High rank indices (for each i, ..., j) 

1155 ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] 

1156 

1157 Args: 

1158 resource: A `Tensor` of type `resource`. Should be from a `Variable` node. 

1159 indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

1160 A tensor of indices into the first dimension of `ref`. 

1161 updates: A `Tensor`. A tensor of updated values to add to `ref`. 

1162 name: A name for the operation (optional). 

1163 

1164 Returns: 

1165 The created Operation. 

1166 """ 

1167 _ctx = _context._context or _context.context() 

1168 tld = _ctx._thread_local_data 

1169 if tld.is_eager: 

1170 try: 

1171 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1172 _ctx, "ResourceScatterUpdate", name, resource, indices, updates) 

1173 return _result 

1174 except _core._NotOkStatusException as e: 

1175 _ops.raise_from_not_ok_status(e, name) 

1176 except _core._FallbackException: 

1177 pass 

1178 try: 

1179 return resource_scatter_update_eager_fallback( 

1180 resource, indices, updates, name=name, ctx=_ctx) 

1181 except _core._SymbolicException: 

1182 pass # Add nodes to the TensorFlow graph. 

1183 # Add nodes to the TensorFlow graph. 

1184 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1185 "ResourceScatterUpdate", resource=resource, indices=indices, 

1186 updates=updates, name=name) 

1187 return _op 

1188ResourceScatterUpdate = tf_export("raw_ops.ResourceScatterUpdate")(_ops.to_raw_op(resource_scatter_update)) 

1189 

1190 

1191def resource_scatter_update_eager_fallback(resource, indices, updates, name, ctx): 

1192 _attr_dtype, (updates,) = _execute.args_to_matching_eager([updates], ctx, []) 

1193 _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) 

1194 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

1195 _inputs_flat = [resource, indices, updates] 

1196 _attrs = ("dtype", _attr_dtype, "Tindices", _attr_Tindices) 

1197 _result = _execute.execute(b"ResourceScatterUpdate", 0, inputs=_inputs_flat, 

1198 attrs=_attrs, ctx=ctx, name=name) 

1199 _result = None 

1200 return _result 

1201 

1202 

1203def var_handle_op(dtype, shape, container="", shared_name="", allowed_devices=[], name=None): 

1204 r"""Creates a handle to a Variable resource. 

1205 

1206 Args: 

1207 dtype: A `tf.DType`. the type of this variable. Must agree with the dtypes 

1208 of all ops using this variable. 

1209 shape: A `tf.TensorShape` or list of `ints`. 

1210 The (possibly partially specified) shape of this variable. 

1211 container: An optional `string`. Defaults to `""`. 

1212 the container this variable is placed in. 

1213 shared_name: An optional `string`. Defaults to `""`. 

1214 the name by which this variable is referred to. 

1215 allowed_devices: An optional list of `strings`. Defaults to `[]`. 

1216 DEPRECATED. The allowed devices containing the resource variable. Set when the 

1217 output ResourceHandle represents a per-replica/partitioned resource variable. 

1218 name: A name for the operation (optional). 

1219 

1220 Returns: 

1221 A `Tensor` of type `resource`. 

1222 """ 

1223 _ctx = _context._context or _context.context() 

1224 tld = _ctx._thread_local_data 

1225 if tld.is_eager: 

1226 try: 

1227 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1228 _ctx, "VarHandleOp", name, "container", container, "shared_name", 

1229 shared_name, "dtype", dtype, "shape", shape, "allowed_devices", 

1230 allowed_devices) 

1231 return _result 

1232 except _core._NotOkStatusException as e: 

1233 _ops.raise_from_not_ok_status(e, name) 

1234 except _core._FallbackException: 

1235 pass 

1236 try: 

1237 return var_handle_op_eager_fallback( 

1238 container=container, shared_name=shared_name, dtype=dtype, 

1239 shape=shape, allowed_devices=allowed_devices, name=name, ctx=_ctx) 

1240 except _core._SymbolicException: 

1241 pass # Add nodes to the TensorFlow graph. 

1242 # Add nodes to the TensorFlow graph. 

1243 dtype = _execute.make_type(dtype, "dtype") 

1244 shape = _execute.make_shape(shape, "shape") 

1245 if container is None: 

1246 container = "" 

1247 container = _execute.make_str(container, "container") 

1248 if shared_name is None: 

1249 shared_name = "" 

1250 shared_name = _execute.make_str(shared_name, "shared_name") 

1251 if allowed_devices is None: 

1252 allowed_devices = [] 

1253 if not isinstance(allowed_devices, (list, tuple)): 

1254 raise TypeError( 

1255 "Expected list for 'allowed_devices' argument to " 

1256 "'var_handle_op' Op, not %r." % allowed_devices) 

1257 allowed_devices = [_execute.make_str(_s, "allowed_devices") for _s in allowed_devices] 

1258 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1259 "VarHandleOp", dtype=dtype, shape=shape, container=container, 

1260 shared_name=shared_name, 

1261 allowed_devices=allowed_devices, name=name) 

1262 _result = _outputs[:] 

1263 if _execute.must_record_gradient(): 

1264 _attrs = ("container", _op.get_attr("container"), "shared_name", 

1265 _op.get_attr("shared_name"), "dtype", 

1266 _op._get_attr_type("dtype"), "shape", _op.get_attr("shape"), 

1267 "allowed_devices", _op.get_attr("allowed_devices")) 

1268 _inputs_flat = _op.inputs 

1269 _execute.record_gradient( 

1270 "VarHandleOp", _inputs_flat, _attrs, _result) 

1271 _result, = _result 

1272 return _result 

1273 

1274VarHandleOp = tf_export("raw_ops.VarHandleOp")(_ops.to_raw_op(var_handle_op)) 

1275 

1276 

1277def var_handle_op_eager_fallback(dtype, shape, container, shared_name, allowed_devices, name, ctx): 

1278 dtype = _execute.make_type(dtype, "dtype") 

1279 shape = _execute.make_shape(shape, "shape") 

1280 if container is None: 

1281 container = "" 

1282 container = _execute.make_str(container, "container") 

1283 if shared_name is None: 

1284 shared_name = "" 

1285 shared_name = _execute.make_str(shared_name, "shared_name") 

1286 if allowed_devices is None: 

1287 allowed_devices = [] 

1288 if not isinstance(allowed_devices, (list, tuple)): 

1289 raise TypeError( 

1290 "Expected list for 'allowed_devices' argument to " 

1291 "'var_handle_op' Op, not %r." % allowed_devices) 

1292 allowed_devices = [_execute.make_str(_s, "allowed_devices") for _s in allowed_devices] 

1293 _inputs_flat = [] 

1294 _attrs = ("container", container, "shared_name", shared_name, "dtype", 

1295 dtype, "shape", shape, "allowed_devices", allowed_devices) 

1296 _result = _execute.execute(b"VarHandleOp", 1, inputs=_inputs_flat, 

1297 attrs=_attrs, ctx=ctx, name=name) 

1298 if _execute.must_record_gradient(): 

1299 _execute.record_gradient( 

1300 "VarHandleOp", _inputs_flat, _attrs, _result) 

1301 _result, = _result 

1302 return _result 

1303 

1304 

1305def var_is_initialized_op(resource, name=None): 

1306 r"""Checks whether a resource handle-based variable has been initialized. 

1307 

1308 Args: 

1309 resource: A `Tensor` of type `resource`. the input resource handle. 

1310 name: A name for the operation (optional). 

1311 

1312 Returns: 

1313 A `Tensor` of type `bool`. 

1314 """ 

1315 _ctx = _context._context or _context.context() 

1316 tld = _ctx._thread_local_data 

1317 if tld.is_eager: 

1318 try: 

1319 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1320 _ctx, "VarIsInitializedOp", name, resource) 

1321 return _result 

1322 except _core._NotOkStatusException as e: 

1323 _ops.raise_from_not_ok_status(e, name) 

1324 except _core._FallbackException: 

1325 pass 

1326 try: 

1327 return var_is_initialized_op_eager_fallback( 

1328 resource, name=name, ctx=_ctx) 

1329 except _core._SymbolicException: 

1330 pass # Add nodes to the TensorFlow graph. 

1331 # Add nodes to the TensorFlow graph. 

1332 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1333 "VarIsInitializedOp", resource=resource, name=name) 

1334 _result = _outputs[:] 

1335 if _execute.must_record_gradient(): 

1336 _attrs = () 

1337 _inputs_flat = _op.inputs 

1338 _execute.record_gradient( 

1339 "VarIsInitializedOp", _inputs_flat, _attrs, _result) 

1340 _result, = _result 

1341 return _result 

1342 

1343VarIsInitializedOp = tf_export("raw_ops.VarIsInitializedOp")(_ops.to_raw_op(var_is_initialized_op)) 

1344 

1345 

1346def var_is_initialized_op_eager_fallback(resource, name, ctx): 

1347 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

1348 _inputs_flat = [resource] 

1349 _attrs = None 

1350 _result = _execute.execute(b"VarIsInitializedOp", 1, inputs=_inputs_flat, 

1351 attrs=_attrs, ctx=ctx, name=name) 

1352 if _execute.must_record_gradient(): 

1353 _execute.record_gradient( 

1354 "VarIsInitializedOp", _inputs_flat, _attrs, _result) 

1355 _result, = _result 

1356 return _result 

1357 

1358 

1359def variable_shape(input, out_type=_dtypes.int32, name=None): 

1360 r"""Returns the shape of the variable pointed to by `resource`. 

1361 

1362 This operation returns a 1-D integer tensor representing the shape of `input`. 

1363 

1364 For example: 

1365 

1366 ``` 

1367 # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] 

1368 shape(t) ==> [2, 2, 3] 

1369 ``` 

1370 

1371 Args: 

1372 input: A `Tensor` of type `resource`. 

1373 out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. 

1374 name: A name for the operation (optional). 

1375 

1376 Returns: 

1377 A `Tensor` of type `out_type`. 

1378 """ 

1379 _ctx = _context._context or _context.context() 

1380 tld = _ctx._thread_local_data 

1381 if tld.is_eager: 

1382 try: 

1383 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1384 _ctx, "VariableShape", name, input, "out_type", out_type) 

1385 return _result 

1386 except _core._NotOkStatusException as e: 

1387 _ops.raise_from_not_ok_status(e, name) 

1388 except _core._FallbackException: 

1389 pass 

1390 try: 

1391 return variable_shape_eager_fallback( 

1392 input, out_type=out_type, name=name, ctx=_ctx) 

1393 except _core._SymbolicException: 

1394 pass # Add nodes to the TensorFlow graph. 

1395 # Add nodes to the TensorFlow graph. 

1396 if out_type is None: 

1397 out_type = _dtypes.int32 

1398 out_type = _execute.make_type(out_type, "out_type") 

1399 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1400 "VariableShape", input=input, out_type=out_type, name=name) 

1401 _result = _outputs[:] 

1402 if _execute.must_record_gradient(): 

1403 _attrs = ("out_type", _op._get_attr_type("out_type")) 

1404 _inputs_flat = _op.inputs 

1405 _execute.record_gradient( 

1406 "VariableShape", _inputs_flat, _attrs, _result) 

1407 _result, = _result 

1408 return _result 

1409 

1410VariableShape = tf_export("raw_ops.VariableShape")(_ops.to_raw_op(variable_shape)) 

1411 

1412 

1413def variable_shape_eager_fallback(input, out_type, name, ctx): 

1414 if out_type is None: 

1415 out_type = _dtypes.int32 

1416 out_type = _execute.make_type(out_type, "out_type") 

1417 input = _ops.convert_to_tensor(input, _dtypes.resource) 

1418 _inputs_flat = [input] 

1419 _attrs = ("out_type", out_type) 

1420 _result = _execute.execute(b"VariableShape", 1, inputs=_inputs_flat, 

1421 attrs=_attrs, ctx=ctx, name=name) 

1422 if _execute.must_record_gradient(): 

1423 _execute.record_gradient( 

1424 "VariableShape", _inputs_flat, _attrs, _result) 

1425 _result, = _result 

1426 return _result 

1427