Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_data_flow_ops.py: 9%

4076 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1"""Python wrappers around TensorFlow ops. 

2 

3This file is MACHINE GENERATED! Do not edit. 

4""" 

5 

6import collections 

7 

8from tensorflow.python import pywrap_tfe as pywrap_tfe 

9from tensorflow.python.eager import context as _context 

10from tensorflow.python.eager import core as _core 

11from tensorflow.python.eager import execute as _execute 

12from tensorflow.python.framework import dtypes as _dtypes 

13from tensorflow.security.fuzzing.py import annotation_types as _atypes 

14 

15from tensorflow.python.framework import op_def_registry as _op_def_registry 

16from tensorflow.python.framework import ops as _ops 

17from tensorflow.python.framework import op_def_library as _op_def_library 

18from tensorflow.python.util.deprecation import deprecated_endpoints 

19from tensorflow.python.util import dispatch as _dispatch 

20from tensorflow.python.util.tf_export import tf_export 

21 

22from typing import TypeVar 

23 

24def accumulator_apply_gradient(handle, local_step, gradient, name=None): 

25 r"""Applies a gradient to a given accumulator. 

26 

27 Does not add if local_step is lesser than the accumulator's global_step. 

28 

29 Args: 

30 handle: A `Tensor` of type mutable `string`. The handle to a accumulator. 

31 local_step: A `Tensor` of type `int64`. 

32 The local_step value at which the gradient was computed. 

33 gradient: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

34 A tensor of the gradient to be accumulated. 

35 name: A name for the operation (optional). 

36 

37 Returns: 

38 The created Operation. 

39 """ 

40 _ctx = _context._context or _context.context() 

41 tld = _ctx._thread_local_data 

42 if tld.is_eager: 

43 raise RuntimeError("accumulator_apply_gradient op does not support eager execution. Arg 'handle' is a ref.") 

44 # Add nodes to the TensorFlow graph. 

45 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

46 "AccumulatorApplyGradient", handle=handle, local_step=local_step, 

47 gradient=gradient, name=name) 

48 return _op 

49AccumulatorApplyGradient = tf_export("raw_ops.AccumulatorApplyGradient")(_ops.to_raw_op(accumulator_apply_gradient)) 

50 

51 

52def accumulator_apply_gradient_eager_fallback(handle, local_step, gradient, name, ctx): 

53 raise RuntimeError("accumulator_apply_gradient op does not support eager execution. Arg 'handle' is a ref.") 

54 

55def accumulator_num_accumulated(handle, name=None): 

56 r"""Returns the number of gradients aggregated in the given accumulators. 

57 

58 Args: 

59 handle: A `Tensor` of type mutable `string`. The handle to an accumulator. 

60 name: A name for the operation (optional). 

61 

62 Returns: 

63 A `Tensor` of type `int32`. 

64 """ 

65 _ctx = _context._context or _context.context() 

66 tld = _ctx._thread_local_data 

67 if tld.is_eager: 

68 raise RuntimeError("accumulator_num_accumulated op does not support eager execution. Arg 'handle' is a ref.") 

69 # Add nodes to the TensorFlow graph. 

70 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

71 "AccumulatorNumAccumulated", handle=handle, name=name) 

72 _result = _outputs[:] 

73 if _execute.must_record_gradient(): 

74 _attrs = () 

75 _inputs_flat = _op.inputs 

76 _execute.record_gradient( 

77 "AccumulatorNumAccumulated", _inputs_flat, _attrs, _result) 

78 _result, = _result 

79 return _result 

80 

81AccumulatorNumAccumulated = tf_export("raw_ops.AccumulatorNumAccumulated")(_ops.to_raw_op(accumulator_num_accumulated)) 

82 

83 

84def accumulator_num_accumulated_eager_fallback(handle, name, ctx): 

85 raise RuntimeError("accumulator_num_accumulated op does not support eager execution. Arg 'handle' is a ref.") 

86 

87def accumulator_set_global_step(handle, new_global_step, name=None): 

88 r"""Updates the accumulator with a new value for global_step. 

89 

90 Logs warning if the accumulator's value is already higher than 

91 new_global_step. 

92 

93 Args: 

94 handle: A `Tensor` of type mutable `string`. The handle to an accumulator. 

95 new_global_step: A `Tensor` of type `int64`. 

96 The new global_step value to set. 

97 name: A name for the operation (optional). 

98 

99 Returns: 

100 The created Operation. 

101 """ 

102 _ctx = _context._context or _context.context() 

103 tld = _ctx._thread_local_data 

104 if tld.is_eager: 

105 raise RuntimeError("accumulator_set_global_step op does not support eager execution. Arg 'handle' is a ref.") 

106 # Add nodes to the TensorFlow graph. 

107 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

108 "AccumulatorSetGlobalStep", handle=handle, 

109 new_global_step=new_global_step, 

110 name=name) 

111 return _op 

112AccumulatorSetGlobalStep = tf_export("raw_ops.AccumulatorSetGlobalStep")(_ops.to_raw_op(accumulator_set_global_step)) 

113 

114 

115def accumulator_set_global_step_eager_fallback(handle, new_global_step, name, ctx): 

116 raise RuntimeError("accumulator_set_global_step op does not support eager execution. Arg 'handle' is a ref.") 

117 

118def accumulator_take_gradient(handle, num_required, dtype, name=None): 

119 r"""Extracts the average gradient in the given ConditionalAccumulator. 

120 

121 The op blocks until sufficient (i.e., more than num_required) 

122 gradients have been accumulated. If the accumulator has already 

123 aggregated more than num_required gradients, it returns the average of 

124 the accumulated gradients. Also automatically increments the recorded 

125 global_step in the accumulator by 1, and resets the aggregate to 0. 

126 

127 Args: 

128 handle: A `Tensor` of type mutable `string`. The handle to an accumulator. 

129 num_required: A `Tensor` of type `int32`. 

130 Number of gradients required before we return an aggregate. 

131 dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.qint16, tf.quint16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`. 

132 The data type of accumulated gradients. Needs to correspond to the type 

133 of the accumulator. 

134 name: A name for the operation (optional). 

135 

136 Returns: 

137 A `Tensor` of type `dtype`. 

138 """ 

139 _ctx = _context._context or _context.context() 

140 tld = _ctx._thread_local_data 

141 if tld.is_eager: 

142 raise RuntimeError("accumulator_take_gradient op does not support eager execution. Arg 'handle' is a ref.") 

143 # Add nodes to the TensorFlow graph. 

144 dtype = _execute.make_type(dtype, "dtype") 

145 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

146 "AccumulatorTakeGradient", handle=handle, num_required=num_required, 

147 dtype=dtype, name=name) 

148 _result = _outputs[:] 

149 if _execute.must_record_gradient(): 

150 _attrs = ("dtype", _op._get_attr_type("dtype")) 

151 _inputs_flat = _op.inputs 

152 _execute.record_gradient( 

153 "AccumulatorTakeGradient", _inputs_flat, _attrs, _result) 

154 _result, = _result 

155 return _result 

156 

157AccumulatorTakeGradient = tf_export("raw_ops.AccumulatorTakeGradient")(_ops.to_raw_op(accumulator_take_gradient)) 

158 

159 

160def accumulator_take_gradient_eager_fallback(handle, num_required, dtype, name, ctx): 

161 raise RuntimeError("accumulator_take_gradient op does not support eager execution. Arg 'handle' is a ref.") 

162 

163def barrier(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None): 

164 r"""Defines a barrier that persists across different graph executions. 

165 

166 A barrier represents a key-value map, where each key is a string, and 

167 each value is a tuple of tensors. 

168 

169 At runtime, the barrier contains 'complete' and 'incomplete' 

170 elements. A complete element has defined tensors for all components of 

171 its value tuple, and may be accessed using BarrierTakeMany. An 

172 incomplete element has some undefined components in its value tuple, 

173 and may be updated using BarrierInsertMany. 

174 

175 Args: 

176 component_types: A list of `tf.DTypes` that has length `>= 1`. 

177 The type of each component in a value. 

178 shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. 

179 The shape of each component in a value. Each shape must be 1 in the 

180 first dimension. The length of this attr must be the same as the length of 

181 component_types. 

182 capacity: An optional `int`. Defaults to `-1`. 

183 The capacity of the barrier. The default capacity is MAX_INT32, 

184 which is the largest capacity of the underlying queue. 

185 container: An optional `string`. Defaults to `""`. 

186 If non-empty, this barrier is placed in the given container. 

187 Otherwise, a default container is used. 

188 shared_name: An optional `string`. Defaults to `""`. 

189 If non-empty, this barrier will be shared under the given name 

190 across multiple sessions. 

191 name: A name for the operation (optional). 

192 

193 Returns: 

194 A `Tensor` of type mutable `string`. 

195 """ 

196 _ctx = _context._context or _context.context() 

197 tld = _ctx._thread_local_data 

198 if tld.is_eager: 

199 raise RuntimeError("barrier op does not support eager execution. Arg 'handle' is a ref.") 

200 # Add nodes to the TensorFlow graph. 

201 if not isinstance(component_types, (list, tuple)): 

202 raise TypeError( 

203 "Expected list for 'component_types' argument to " 

204 "'barrier' Op, not %r." % component_types) 

205 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

206 if shapes is None: 

207 shapes = [] 

208 if not isinstance(shapes, (list, tuple)): 

209 raise TypeError( 

210 "Expected list for 'shapes' argument to " 

211 "'barrier' Op, not %r." % shapes) 

212 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

213 if capacity is None: 

214 capacity = -1 

215 capacity = _execute.make_int(capacity, "capacity") 

216 if container is None: 

217 container = "" 

218 container = _execute.make_str(container, "container") 

219 if shared_name is None: 

220 shared_name = "" 

221 shared_name = _execute.make_str(shared_name, "shared_name") 

222 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

223 "Barrier", component_types=component_types, shapes=shapes, 

224 capacity=capacity, container=container, 

225 shared_name=shared_name, name=name) 

226 _result = _outputs[:] 

227 if _execute.must_record_gradient(): 

228 _attrs = ("component_types", _op.get_attr("component_types"), "shapes", 

229 _op.get_attr("shapes"), "capacity", 

230 _op._get_attr_int("capacity"), "container", 

231 _op.get_attr("container"), "shared_name", 

232 _op.get_attr("shared_name")) 

233 _inputs_flat = _op.inputs 

234 _execute.record_gradient( 

235 "Barrier", _inputs_flat, _attrs, _result) 

236 _result, = _result 

237 return _result 

238 

239Barrier = tf_export("raw_ops.Barrier")(_ops.to_raw_op(barrier)) 

240 

241 

242def barrier_eager_fallback(component_types, shapes, capacity, container, shared_name, name, ctx): 

243 raise RuntimeError("barrier op does not support eager execution. Arg 'handle' is a ref.") 

244 

245def barrier_close(handle, cancel_pending_enqueues=False, name=None): 

246 r"""Closes the given barrier. 

247 

248 This operation signals that no more new elements will be inserted in the 

249 given barrier. Subsequent InsertMany that try to introduce a new key will fail. 

250 Subsequent InsertMany operations that just add missing components to already 

251 existing elements will continue to succeed. Subsequent TakeMany operations will 

252 continue to succeed if sufficient completed elements remain in the barrier. 

253 Subsequent TakeMany operations that would block will fail immediately. 

254 

255 Args: 

256 handle: A `Tensor` of type mutable `string`. The handle to a barrier. 

257 cancel_pending_enqueues: An optional `bool`. Defaults to `False`. 

258 If true, all pending enqueue requests that are 

259 blocked on the barrier's queue will be canceled. InsertMany will fail, even 

260 if no new key is introduced. 

261 name: A name for the operation (optional). 

262 

263 Returns: 

264 The created Operation. 

265 """ 

266 _ctx = _context._context or _context.context() 

267 tld = _ctx._thread_local_data 

268 if tld.is_eager: 

269 raise RuntimeError("barrier_close op does not support eager execution. Arg 'handle' is a ref.") 

270 # Add nodes to the TensorFlow graph. 

271 if cancel_pending_enqueues is None: 

272 cancel_pending_enqueues = False 

273 cancel_pending_enqueues = _execute.make_bool(cancel_pending_enqueues, "cancel_pending_enqueues") 

274 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

275 "BarrierClose", handle=handle, 

276 cancel_pending_enqueues=cancel_pending_enqueues, 

277 name=name) 

278 return _op 

279BarrierClose = tf_export("raw_ops.BarrierClose")(_ops.to_raw_op(barrier_close)) 

280 

281 

282def barrier_close_eager_fallback(handle, cancel_pending_enqueues, name, ctx): 

283 raise RuntimeError("barrier_close op does not support eager execution. Arg 'handle' is a ref.") 

284 

285def barrier_incomplete_size(handle, name=None): 

286 r"""Computes the number of incomplete elements in the given barrier. 

287 

288 Args: 

289 handle: A `Tensor` of type mutable `string`. The handle to a barrier. 

290 name: A name for the operation (optional). 

291 

292 Returns: 

293 A `Tensor` of type `int32`. 

294 """ 

295 _ctx = _context._context or _context.context() 

296 tld = _ctx._thread_local_data 

297 if tld.is_eager: 

298 raise RuntimeError("barrier_incomplete_size op does not support eager execution. Arg 'handle' is a ref.") 

299 # Add nodes to the TensorFlow graph. 

300 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

301 "BarrierIncompleteSize", handle=handle, name=name) 

302 _result = _outputs[:] 

303 if _execute.must_record_gradient(): 

304 _attrs = () 

305 _inputs_flat = _op.inputs 

306 _execute.record_gradient( 

307 "BarrierIncompleteSize", _inputs_flat, _attrs, _result) 

308 _result, = _result 

309 return _result 

310 

311BarrierIncompleteSize = tf_export("raw_ops.BarrierIncompleteSize")(_ops.to_raw_op(barrier_incomplete_size)) 

312 

313 

314def barrier_incomplete_size_eager_fallback(handle, name, ctx): 

315 raise RuntimeError("barrier_incomplete_size op does not support eager execution. Arg 'handle' is a ref.") 

316 

317def barrier_insert_many(handle, keys, values, component_index, name=None): 

318 r"""For each key, assigns the respective value to the specified component. 

319 

320 If a key is not found in the barrier, this operation will create a new 

321 incomplete element. If a key is found in the barrier, and the element 

322 already has a value at component_index, this operation will fail with 

323 INVALID_ARGUMENT, and leave the barrier in an undefined state. 

324 

325 Args: 

326 handle: A `Tensor` of type mutable `string`. The handle to a barrier. 

327 keys: A `Tensor` of type `string`. 

328 A one-dimensional tensor of keys, with length n. 

329 values: A `Tensor`. 

330 An any-dimensional tensor of values, which are associated with the 

331 respective keys. The 0th dimension must have length n. 

332 component_index: An `int`. 

333 The component of the barrier elements that is being assigned. 

334 name: A name for the operation (optional). 

335 

336 Returns: 

337 The created Operation. 

338 """ 

339 _ctx = _context._context or _context.context() 

340 tld = _ctx._thread_local_data 

341 if tld.is_eager: 

342 raise RuntimeError("barrier_insert_many op does not support eager execution. Arg 'handle' is a ref.") 

343 # Add nodes to the TensorFlow graph. 

344 component_index = _execute.make_int(component_index, "component_index") 

345 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

346 "BarrierInsertMany", handle=handle, keys=keys, values=values, 

347 component_index=component_index, name=name) 

348 return _op 

349BarrierInsertMany = tf_export("raw_ops.BarrierInsertMany")(_ops.to_raw_op(barrier_insert_many)) 

350 

351 

352def barrier_insert_many_eager_fallback(handle, keys, values, component_index, name, ctx): 

353 raise RuntimeError("barrier_insert_many op does not support eager execution. Arg 'handle' is a ref.") 

354 

355def barrier_ready_size(handle, name=None): 

356 r"""Computes the number of complete elements in the given barrier. 

357 

358 Args: 

359 handle: A `Tensor` of type mutable `string`. The handle to a barrier. 

360 name: A name for the operation (optional). 

361 

362 Returns: 

363 A `Tensor` of type `int32`. 

364 """ 

365 _ctx = _context._context or _context.context() 

366 tld = _ctx._thread_local_data 

367 if tld.is_eager: 

368 raise RuntimeError("barrier_ready_size op does not support eager execution. Arg 'handle' is a ref.") 

369 # Add nodes to the TensorFlow graph. 

370 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

371 "BarrierReadySize", handle=handle, name=name) 

372 _result = _outputs[:] 

373 if _execute.must_record_gradient(): 

374 _attrs = () 

375 _inputs_flat = _op.inputs 

376 _execute.record_gradient( 

377 "BarrierReadySize", _inputs_flat, _attrs, _result) 

378 _result, = _result 

379 return _result 

380 

381BarrierReadySize = tf_export("raw_ops.BarrierReadySize")(_ops.to_raw_op(barrier_ready_size)) 

382 

383 

384def barrier_ready_size_eager_fallback(handle, name, ctx): 

385 raise RuntimeError("barrier_ready_size op does not support eager execution. Arg 'handle' is a ref.") 

386_BarrierTakeManyOutput = collections.namedtuple( 

387 "BarrierTakeMany", 

388 ["indices", "keys", "values"]) 

389 

390 

391def barrier_take_many(handle, num_elements, component_types, allow_small_batch=False, wait_for_incomplete=False, timeout_ms=-1, name=None): 

392 r"""Takes the given number of completed elements from a barrier. 

393 

394 This operation concatenates completed-element component tensors along 

395 the 0th dimension to make a single component tensor. 

396 

397 Elements come out of the barrier when they are complete, and in the order 

398 in which they were placed into the barrier. The indices output provides 

399 information about the batch in which each element was originally inserted 

400 into the barrier. 

401 

402 Args: 

403 handle: A `Tensor` of type mutable `string`. The handle to a barrier. 

404 num_elements: A `Tensor` of type `int32`. 

405 A single-element tensor containing the number of elements to 

406 take. 

407 component_types: A list of `tf.DTypes` that has length `>= 1`. 

408 The type of each component in a value. 

409 allow_small_batch: An optional `bool`. Defaults to `False`. 

410 Allow to return less than num_elements items if barrier is 

411 already closed. 

412 wait_for_incomplete: An optional `bool`. Defaults to `False`. 

413 timeout_ms: An optional `int`. Defaults to `-1`. 

414 If the queue is empty, this operation will block for up to 

415 timeout_ms milliseconds. 

416 Note: This option is not supported yet. 

417 name: A name for the operation (optional). 

418 

419 Returns: 

420 A tuple of `Tensor` objects (indices, keys, values). 

421 

422 indices: A `Tensor` of type `int64`. 

423 keys: A `Tensor` of type `string`. 

424 values: A list of `Tensor` objects of type `component_types`. 

425 """ 

426 _ctx = _context._context or _context.context() 

427 tld = _ctx._thread_local_data 

428 if tld.is_eager: 

429 raise RuntimeError("barrier_take_many op does not support eager execution. Arg 'handle' is a ref.") 

430 # Add nodes to the TensorFlow graph. 

431 if not isinstance(component_types, (list, tuple)): 

432 raise TypeError( 

433 "Expected list for 'component_types' argument to " 

434 "'barrier_take_many' Op, not %r." % component_types) 

435 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

436 if allow_small_batch is None: 

437 allow_small_batch = False 

438 allow_small_batch = _execute.make_bool(allow_small_batch, "allow_small_batch") 

439 if wait_for_incomplete is None: 

440 wait_for_incomplete = False 

441 wait_for_incomplete = _execute.make_bool(wait_for_incomplete, "wait_for_incomplete") 

442 if timeout_ms is None: 

443 timeout_ms = -1 

444 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

445 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

446 "BarrierTakeMany", handle=handle, num_elements=num_elements, 

447 component_types=component_types, 

448 allow_small_batch=allow_small_batch, 

449 wait_for_incomplete=wait_for_incomplete, 

450 timeout_ms=timeout_ms, name=name) 

451 _result = _outputs[:] 

452 if _execute.must_record_gradient(): 

453 _attrs = ("component_types", _op.get_attr("component_types"), 

454 "allow_small_batch", _op._get_attr_bool("allow_small_batch"), 

455 "wait_for_incomplete", 

456 _op._get_attr_bool("wait_for_incomplete"), "timeout_ms", 

457 _op._get_attr_int("timeout_ms")) 

458 _inputs_flat = _op.inputs 

459 _execute.record_gradient( 

460 "BarrierTakeMany", _inputs_flat, _attrs, _result) 

461 _result = _result[:2] + [_result[2:]] 

462 _result = _BarrierTakeManyOutput._make(_result) 

463 return _result 

464 

465BarrierTakeMany = tf_export("raw_ops.BarrierTakeMany")(_ops.to_raw_op(barrier_take_many)) 

466 

467 

468def barrier_take_many_eager_fallback(handle, num_elements, component_types, allow_small_batch, wait_for_incomplete, timeout_ms, name, ctx): 

469 raise RuntimeError("barrier_take_many op does not support eager execution. Arg 'handle' is a ref.") 

470 

471def conditional_accumulator(dtype, shape, container="", shared_name="", reduction_type="MEAN", name=None): 

472 r"""A conditional accumulator for aggregating gradients. 

473 

474 The accumulator accepts gradients marked with local_step greater or 

475 equal to the most recent global_step known to the accumulator. The 

476 average can be extracted from the accumulator, provided sufficient 

477 gradients have been accumulated. Extracting the average automatically 

478 resets the aggregate to 0, and increments the global_step recorded by 

479 the accumulator. 

480 

481 Args: 

482 dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.qint16, tf.quint16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`. 

483 The type of the value being accumulated. 

484 shape: A `tf.TensorShape` or list of `ints`. 

485 The shape of the values, can be [], in which case shape is unknown. 

486 container: An optional `string`. Defaults to `""`. 

487 If non-empty, this accumulator is placed in the given container. 

488 Otherwise, a default container is used. 

489 shared_name: An optional `string`. Defaults to `""`. 

490 If non-empty, this accumulator will be shared under the 

491 given name across multiple sessions. 

492 reduction_type: An optional `string` from: `"MEAN", "SUM"`. Defaults to `"MEAN"`. 

493 name: A name for the operation (optional). 

494 

495 Returns: 

496 A `Tensor` of type mutable `string`. 

497 """ 

498 _ctx = _context._context or _context.context() 

499 tld = _ctx._thread_local_data 

500 if tld.is_eager: 

501 raise RuntimeError("conditional_accumulator op does not support eager execution. Arg 'handle' is a ref.") 

502 # Add nodes to the TensorFlow graph. 

503 dtype = _execute.make_type(dtype, "dtype") 

504 shape = _execute.make_shape(shape, "shape") 

505 if container is None: 

506 container = "" 

507 container = _execute.make_str(container, "container") 

508 if shared_name is None: 

509 shared_name = "" 

510 shared_name = _execute.make_str(shared_name, "shared_name") 

511 if reduction_type is None: 

512 reduction_type = "MEAN" 

513 reduction_type = _execute.make_str(reduction_type, "reduction_type") 

514 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

515 "ConditionalAccumulator", dtype=dtype, shape=shape, 

516 container=container, 

517 shared_name=shared_name, 

518 reduction_type=reduction_type, name=name) 

519 _result = _outputs[:] 

520 if _execute.must_record_gradient(): 

521 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", 

522 _op.get_attr("shape"), "container", _op.get_attr("container"), 

523 "shared_name", _op.get_attr("shared_name"), "reduction_type", 

524 _op.get_attr("reduction_type")) 

525 _inputs_flat = _op.inputs 

526 _execute.record_gradient( 

527 "ConditionalAccumulator", _inputs_flat, _attrs, _result) 

528 _result, = _result 

529 return _result 

530 

531ConditionalAccumulator = tf_export("raw_ops.ConditionalAccumulator")(_ops.to_raw_op(conditional_accumulator)) 

532 

533 

534def conditional_accumulator_eager_fallback(dtype, shape, container, shared_name, reduction_type, name, ctx): 

535 raise RuntimeError("conditional_accumulator op does not support eager execution. Arg 'handle' is a ref.") 

536 

537def delete_session_tensor(handle, name=None): 

538 r"""Delete the tensor specified by its handle in the session. 

539 

540 Args: 

541 handle: A `Tensor` of type `string`. 

542 The handle for a tensor stored in the session state. 

543 name: A name for the operation (optional). 

544 

545 Returns: 

546 The created Operation. 

547 """ 

548 _ctx = _context._context or _context.context() 

549 tld = _ctx._thread_local_data 

550 if tld.is_eager: 

551 try: 

552 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

553 _ctx, "DeleteSessionTensor", name, handle) 

554 return _result 

555 except _core._NotOkStatusException as e: 

556 _ops.raise_from_not_ok_status(e, name) 

557 except _core._FallbackException: 

558 pass 

559 try: 

560 return delete_session_tensor_eager_fallback( 

561 handle, name=name, ctx=_ctx) 

562 except _core._SymbolicException: 

563 pass # Add nodes to the TensorFlow graph. 

564 # Add nodes to the TensorFlow graph. 

565 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

566 "DeleteSessionTensor", handle=handle, name=name) 

567 return _op 

568DeleteSessionTensor = tf_export("raw_ops.DeleteSessionTensor")(_ops.to_raw_op(delete_session_tensor)) 

569 

570 

571def delete_session_tensor_eager_fallback(handle, name, ctx): 

572 handle = _ops.convert_to_tensor(handle, _dtypes.string) 

573 _inputs_flat = [handle] 

574 _attrs = None 

575 _result = _execute.execute(b"DeleteSessionTensor", 0, inputs=_inputs_flat, 

576 attrs=_attrs, ctx=ctx, name=name) 

577 _result = None 

578 return _result 

579 

580 

581@_dispatch.add_fallback_dispatch_list 

582@_dispatch.add_type_based_api_dispatcher 

583@tf_export('dynamic_partition') 

584def dynamic_partition(data, partitions, num_partitions, name=None): 

585 r"""Partitions `data` into `num_partitions` tensors using indices from `partitions`. 

586 

587 For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` 

588 becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` 

589 are placed in `outputs[i]` in lexicographic order of `js`, and the first 

590 dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. 

591 In detail, 

592 

593 ```python 

594 outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] 

595 

596 outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) 

597 ``` 

598 

599 `data.shape` must start with `partitions.shape`. 

600 

601 For example: 

602 

603 ```python 

604 # Scalar partitions. 

605 partitions = 1 

606 num_partitions = 2 

607 data = [10, 20] 

608 outputs[0] = [] # Empty with shape [0, 2] 

609 outputs[1] = [[10, 20]] 

610 

611 # Vector partitions. 

612 partitions = [0, 0, 1, 1, 0] 

613 num_partitions = 2 

614 data = [10, 20, 30, 40, 50] 

615 outputs[0] = [10, 20, 50] 

616 outputs[1] = [30, 40] 

617 ``` 

618 

619 See `dynamic_stitch` for an example on how to merge partitions back. 

620 

621 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

622 <img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt> 

623 </div> 

624 

625 

626 Raises: 

627 * `InvalidArgumentError` in following cases: 

628 - If partitions is not in range `[0, num_partiions)` 

629 - If `partitions.shape` does not match prefix of `data.shape` argument. 

630 

631 Args: 

632 data: A `Tensor`. 

633 partitions: A `Tensor` of type `int32`. 

634 Any shape. Indices in the range `[0, num_partitions)`. 

635 num_partitions: An `int` that is `>= 1`. 

636 The number of partitions to output. 

637 name: A name for the operation (optional). 

638 

639 Returns: 

640 A list of `num_partitions` `Tensor` objects with the same type as `data`. 

641 """ 

642 _ctx = _context._context or _context.context() 

643 tld = _ctx._thread_local_data 

644 if tld.is_eager: 

645 try: 

646 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

647 _ctx, "DynamicPartition", name, data, partitions, "num_partitions", 

648 num_partitions) 

649 return _result 

650 except _core._NotOkStatusException as e: 

651 _ops.raise_from_not_ok_status(e, name) 

652 except _core._FallbackException: 

653 pass 

654 try: 

655 _result = _dispatcher_for_dynamic_partition( 

656 (data, partitions, num_partitions, name,), None) 

657 if _result is not NotImplemented: 

658 return _result 

659 return dynamic_partition_eager_fallback( 

660 data, partitions, num_partitions=num_partitions, name=name, 

661 ctx=_ctx) 

662 except _core._SymbolicException: 

663 pass # Add nodes to the TensorFlow graph. 

664 except (TypeError, ValueError): 

665 _result = _dispatch.dispatch( 

666 dynamic_partition, (), dict(data=data, partitions=partitions, 

667 num_partitions=num_partitions, 

668 name=name) 

669 ) 

670 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

671 return _result 

672 raise 

673 else: 

674 _result = _dispatcher_for_dynamic_partition( 

675 (data, partitions, num_partitions, name,), None) 

676 if _result is not NotImplemented: 

677 return _result 

678 # Add nodes to the TensorFlow graph. 

679 num_partitions = _execute.make_int(num_partitions, "num_partitions") 

680 try: 

681 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

682 "DynamicPartition", data=data, partitions=partitions, 

683 num_partitions=num_partitions, name=name) 

684 except (TypeError, ValueError): 

685 _result = _dispatch.dispatch( 

686 dynamic_partition, (), dict(data=data, partitions=partitions, 

687 num_partitions=num_partitions, 

688 name=name) 

689 ) 

690 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

691 return _result 

692 raise 

693 _result = _outputs[:] 

694 if _execute.must_record_gradient(): 

695 _attrs = ("num_partitions", _op._get_attr_int("num_partitions"), "T", 

696 _op._get_attr_type("T")) 

697 _inputs_flat = _op.inputs 

698 _execute.record_gradient( 

699 "DynamicPartition", _inputs_flat, _attrs, _result) 

700 return _result 

701 

702DynamicPartition = tf_export("raw_ops.DynamicPartition")(_ops.to_raw_op(dynamic_partition)) 

703_dispatcher_for_dynamic_partition = dynamic_partition._tf_type_based_dispatcher.Dispatch 

704 

705 

706def dynamic_partition_eager_fallback(data, partitions, num_partitions, name, ctx): 

707 num_partitions = _execute.make_int(num_partitions, "num_partitions") 

708 _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx, []) 

709 partitions = _ops.convert_to_tensor(partitions, _dtypes.int32) 

710 _inputs_flat = [data, partitions] 

711 _attrs = ("num_partitions", num_partitions, "T", _attr_T) 

712 _result = _execute.execute(b"DynamicPartition", num_partitions, 

713 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

714 name=name) 

715 if _execute.must_record_gradient(): 

716 _execute.record_gradient( 

717 "DynamicPartition", _inputs_flat, _attrs, _result) 

718 return _result 

719 

720 

721@_dispatch.add_fallback_dispatch_list 

722@_dispatch.add_type_based_api_dispatcher 

723@tf_export('dynamic_stitch') 

724def dynamic_stitch(indices, data, name=None): 

725 r"""Interleave the values from the `data` tensors into a single tensor. 

726 

727 Builds a merged tensor such that 

728 

729 ```python 

730 merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] 

731 ``` 

732 

733 For example, if each `indices[m]` is scalar or vector, we have 

734 

735 ```python 

736 # Scalar indices: 

737 merged[indices[m], ...] = data[m][...] 

738 

739 # Vector indices: 

740 merged[indices[m][i], ...] = data[m][i, ...] 

741 ``` 

742 

743 Each `data[i].shape` must start with the corresponding `indices[i].shape`, 

744 and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we 

745 must have `data[i].shape = indices[i].shape + constant`. In terms of this 

746 `constant`, the output shape is 

747 

748 merged.shape = [max(indices)] + constant 

749 

750 Values are merged in order, so if an index appears in both `indices[m][i]` and 

751 `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the 

752 merged result. If you do not need this guarantee, ParallelDynamicStitch might 

753 perform better on some devices. 

754 

755 For example: 

756 

757 ```python 

758 indices[0] = 6 

759 indices[1] = [4, 1] 

760 indices[2] = [[5, 2], [0, 3]] 

761 data[0] = [61, 62] 

762 data[1] = [[41, 42], [11, 12]] 

763 data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] 

764 merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], 

765 [51, 52], [61, 62]] 

766 ``` 

767 

768 This method can be used to merge partitions created by `dynamic_partition` 

769 as illustrated on the following example: 

770 

771 ```python 

772 # Apply function (increments x_i) on elements for which a certain condition 

773 # apply (x_i != -1 in this example). 

774 x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) 

775 condition_mask=tf.not_equal(x,tf.constant(-1.)) 

776 partitioned_data = tf.dynamic_partition( 

777 x, tf.cast(condition_mask, tf.int32) , 2) 

778 partitioned_data[1] = partitioned_data[1] + 1.0 

779 condition_indices = tf.dynamic_partition( 

780 tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) 

781 x = tf.dynamic_stitch(condition_indices, partitioned_data) 

782 # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain 

783 # unchanged. 

784 ``` 

785 

786 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

787 <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt> 

788 </div> 

789 

790 Args: 

791 indices: A list of at least 1 `Tensor` objects with type `int32`. 

792 data: A list with the same length as `indices` of `Tensor` objects with the same type. 

793 name: A name for the operation (optional). 

794 

795 Returns: 

796 A `Tensor`. Has the same type as `data`. 

797 """ 

798 _ctx = _context._context or _context.context() 

799 tld = _ctx._thread_local_data 

800 if tld.is_eager: 

801 try: 

802 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

803 _ctx, "DynamicStitch", name, indices, data) 

804 return _result 

805 except _core._NotOkStatusException as e: 

806 _ops.raise_from_not_ok_status(e, name) 

807 except _core._FallbackException: 

808 pass 

809 try: 

810 _result = _dispatcher_for_dynamic_stitch( 

811 (indices, data, name,), None) 

812 if _result is not NotImplemented: 

813 return _result 

814 return dynamic_stitch_eager_fallback( 

815 indices, data, name=name, ctx=_ctx) 

816 except _core._SymbolicException: 

817 pass # Add nodes to the TensorFlow graph. 

818 except (TypeError, ValueError): 

819 _result = _dispatch.dispatch( 

820 dynamic_stitch, (), dict(indices=indices, data=data, name=name) 

821 ) 

822 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

823 return _result 

824 raise 

825 else: 

826 _result = _dispatcher_for_dynamic_stitch( 

827 (indices, data, name,), None) 

828 if _result is not NotImplemented: 

829 return _result 

830 # Add nodes to the TensorFlow graph. 

831 if not isinstance(indices, (list, tuple)): 

832 raise TypeError( 

833 "Expected list for 'indices' argument to " 

834 "'dynamic_stitch' Op, not %r." % indices) 

835 _attr_N = len(indices) 

836 if not isinstance(data, (list, tuple)): 

837 raise TypeError( 

838 "Expected list for 'data' argument to " 

839 "'dynamic_stitch' Op, not %r." % data) 

840 if len(data) != _attr_N: 

841 raise ValueError( 

842 "List argument 'data' to 'dynamic_stitch' Op with length %d " 

843 "must match length %d of argument 'indices'." % 

844 (len(data), _attr_N)) 

845 try: 

846 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

847 "DynamicStitch", indices=indices, data=data, name=name) 

848 except (TypeError, ValueError): 

849 _result = _dispatch.dispatch( 

850 dynamic_stitch, (), dict(indices=indices, data=data, name=name) 

851 ) 

852 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

853 return _result 

854 raise 

855 _result = _outputs[:] 

856 if _execute.must_record_gradient(): 

857 _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T")) 

858 _inputs_flat = _op.inputs 

859 _execute.record_gradient( 

860 "DynamicStitch", _inputs_flat, _attrs, _result) 

861 _result, = _result 

862 return _result 

863 

864DynamicStitch = tf_export("raw_ops.DynamicStitch")(_ops.to_raw_op(dynamic_stitch)) 

865_dispatcher_for_dynamic_stitch = dynamic_stitch._tf_type_based_dispatcher.Dispatch 

866 

867 

868def dynamic_stitch_eager_fallback(indices, data, name, ctx): 

869 if not isinstance(indices, (list, tuple)): 

870 raise TypeError( 

871 "Expected list for 'indices' argument to " 

872 "'dynamic_stitch' Op, not %r." % indices) 

873 _attr_N = len(indices) 

874 if not isinstance(data, (list, tuple)): 

875 raise TypeError( 

876 "Expected list for 'data' argument to " 

877 "'dynamic_stitch' Op, not %r." % data) 

878 if len(data) != _attr_N: 

879 raise ValueError( 

880 "List argument 'data' to 'dynamic_stitch' Op with length %d " 

881 "must match length %d of argument 'indices'." % 

882 (len(data), _attr_N)) 

883 _attr_T, data = _execute.args_to_matching_eager(list(data), ctx, []) 

884 indices = _ops.convert_n_to_tensor(indices, _dtypes.int32) 

885 _inputs_flat = list(indices) + list(data) 

886 _attrs = ("N", _attr_N, "T", _attr_T) 

887 _result = _execute.execute(b"DynamicStitch", 1, inputs=_inputs_flat, 

888 attrs=_attrs, ctx=ctx, name=name) 

889 if _execute.must_record_gradient(): 

890 _execute.record_gradient( 

891 "DynamicStitch", _inputs_flat, _attrs, _result) 

892 _result, = _result 

893 return _result 

894 

895 

896def fifo_queue(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None): 

897 r"""A queue that produces elements in first-in first-out order. 

898 

899 Args: 

900 component_types: A list of `tf.DTypes` that has length `>= 1`. 

901 The type of each component in a value. 

902 shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. 

903 The shape of each component in a value. The length of this attr must 

904 be either 0 or the same as the length of component_types. If the length of 

905 this attr is 0, the shapes of queue elements are not constrained, and 

906 only one element may be dequeued at a time. 

907 capacity: An optional `int`. Defaults to `-1`. 

908 The upper bound on the number of elements in this queue. 

909 Negative numbers mean no limit. 

910 container: An optional `string`. Defaults to `""`. 

911 If non-empty, this queue is placed in the given container. 

912 Otherwise, a default container is used. 

913 shared_name: An optional `string`. Defaults to `""`. 

914 If non-empty, this queue will be shared under the given name 

915 across multiple sessions. 

916 name: A name for the operation (optional). 

917 

918 Returns: 

919 A `Tensor` of type mutable `string`. 

920 """ 

921 _ctx = _context._context or _context.context() 

922 tld = _ctx._thread_local_data 

923 if tld.is_eager: 

924 raise RuntimeError("fifo_queue op does not support eager execution. Arg 'handle' is a ref.") 

925 # Add nodes to the TensorFlow graph. 

926 if not isinstance(component_types, (list, tuple)): 

927 raise TypeError( 

928 "Expected list for 'component_types' argument to " 

929 "'fifo_queue' Op, not %r." % component_types) 

930 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

931 if shapes is None: 

932 shapes = [] 

933 if not isinstance(shapes, (list, tuple)): 

934 raise TypeError( 

935 "Expected list for 'shapes' argument to " 

936 "'fifo_queue' Op, not %r." % shapes) 

937 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

938 if capacity is None: 

939 capacity = -1 

940 capacity = _execute.make_int(capacity, "capacity") 

941 if container is None: 

942 container = "" 

943 container = _execute.make_str(container, "container") 

944 if shared_name is None: 

945 shared_name = "" 

946 shared_name = _execute.make_str(shared_name, "shared_name") 

947 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

948 "FIFOQueue", component_types=component_types, shapes=shapes, 

949 capacity=capacity, container=container, 

950 shared_name=shared_name, name=name) 

951 _result = _outputs[:] 

952 if _execute.must_record_gradient(): 

953 _attrs = ("component_types", _op.get_attr("component_types"), "shapes", 

954 _op.get_attr("shapes"), "capacity", 

955 _op._get_attr_int("capacity"), "container", 

956 _op.get_attr("container"), "shared_name", 

957 _op.get_attr("shared_name")) 

958 _inputs_flat = _op.inputs 

959 _execute.record_gradient( 

960 "FIFOQueue", _inputs_flat, _attrs, _result) 

961 _result, = _result 

962 return _result 

963 

964FIFOQueue = tf_export("raw_ops.FIFOQueue")(_ops.to_raw_op(fifo_queue)) 

965 

966 

967def fifo_queue_eager_fallback(component_types, shapes, capacity, container, shared_name, name, ctx): 

968 raise RuntimeError("fifo_queue op does not support eager execution. Arg 'handle' is a ref.") 

969 

970def fifo_queue_v2(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None): 

971 r"""A queue that produces elements in first-in first-out order. 

972 

973 Args: 

974 component_types: A list of `tf.DTypes` that has length `>= 1`. 

975 The type of each component in a value. 

976 shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. 

977 The shape of each component in a value. The length of this attr must 

978 be either 0 or the same as the length of component_types. If the length of 

979 this attr is 0, the shapes of queue elements are not constrained, and 

980 only one element may be dequeued at a time. 

981 capacity: An optional `int`. Defaults to `-1`. 

982 The upper bound on the number of elements in this queue. 

983 Negative numbers mean no limit. 

984 container: An optional `string`. Defaults to `""`. 

985 If non-empty, this queue is placed in the given container. 

986 Otherwise, a default container is used. 

987 shared_name: An optional `string`. Defaults to `""`. 

988 If non-empty, this queue will be shared under the given name 

989 across multiple sessions. 

990 name: A name for the operation (optional). 

991 

992 Returns: 

993 A `Tensor` of type `resource`. 

994 """ 

995 _ctx = _context._context or _context.context() 

996 tld = _ctx._thread_local_data 

997 if tld.is_eager: 

998 try: 

999 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1000 _ctx, "FIFOQueueV2", name, "component_types", component_types, 

1001 "shapes", shapes, "capacity", capacity, "container", container, 

1002 "shared_name", shared_name) 

1003 return _result 

1004 except _core._NotOkStatusException as e: 

1005 _ops.raise_from_not_ok_status(e, name) 

1006 except _core._FallbackException: 

1007 pass 

1008 try: 

1009 return fifo_queue_v2_eager_fallback( 

1010 component_types=component_types, shapes=shapes, capacity=capacity, 

1011 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

1012 except _core._SymbolicException: 

1013 pass # Add nodes to the TensorFlow graph. 

1014 # Add nodes to the TensorFlow graph. 

1015 if not isinstance(component_types, (list, tuple)): 

1016 raise TypeError( 

1017 "Expected list for 'component_types' argument to " 

1018 "'fifo_queue_v2' Op, not %r." % component_types) 

1019 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

1020 if shapes is None: 

1021 shapes = [] 

1022 if not isinstance(shapes, (list, tuple)): 

1023 raise TypeError( 

1024 "Expected list for 'shapes' argument to " 

1025 "'fifo_queue_v2' Op, not %r." % shapes) 

1026 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

1027 if capacity is None: 

1028 capacity = -1 

1029 capacity = _execute.make_int(capacity, "capacity") 

1030 if container is None: 

1031 container = "" 

1032 container = _execute.make_str(container, "container") 

1033 if shared_name is None: 

1034 shared_name = "" 

1035 shared_name = _execute.make_str(shared_name, "shared_name") 

1036 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1037 "FIFOQueueV2", component_types=component_types, shapes=shapes, 

1038 capacity=capacity, container=container, 

1039 shared_name=shared_name, name=name) 

1040 _result = _outputs[:] 

1041 if _execute.must_record_gradient(): 

1042 _attrs = ("component_types", _op.get_attr("component_types"), "shapes", 

1043 _op.get_attr("shapes"), "capacity", 

1044 _op._get_attr_int("capacity"), "container", 

1045 _op.get_attr("container"), "shared_name", 

1046 _op.get_attr("shared_name")) 

1047 _inputs_flat = _op.inputs 

1048 _execute.record_gradient( 

1049 "FIFOQueueV2", _inputs_flat, _attrs, _result) 

1050 _result, = _result 

1051 return _result 

1052 

1053FIFOQueueV2 = tf_export("raw_ops.FIFOQueueV2")(_ops.to_raw_op(fifo_queue_v2)) 

1054 

1055 

1056def fifo_queue_v2_eager_fallback(component_types, shapes, capacity, container, shared_name, name, ctx): 

1057 if not isinstance(component_types, (list, tuple)): 

1058 raise TypeError( 

1059 "Expected list for 'component_types' argument to " 

1060 "'fifo_queue_v2' Op, not %r." % component_types) 

1061 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

1062 if shapes is None: 

1063 shapes = [] 

1064 if not isinstance(shapes, (list, tuple)): 

1065 raise TypeError( 

1066 "Expected list for 'shapes' argument to " 

1067 "'fifo_queue_v2' Op, not %r." % shapes) 

1068 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

1069 if capacity is None: 

1070 capacity = -1 

1071 capacity = _execute.make_int(capacity, "capacity") 

1072 if container is None: 

1073 container = "" 

1074 container = _execute.make_str(container, "container") 

1075 if shared_name is None: 

1076 shared_name = "" 

1077 shared_name = _execute.make_str(shared_name, "shared_name") 

1078 _inputs_flat = [] 

1079 _attrs = ("component_types", component_types, "shapes", shapes, "capacity", 

1080 capacity, "container", container, "shared_name", shared_name) 

1081 _result = _execute.execute(b"FIFOQueueV2", 1, inputs=_inputs_flat, 

1082 attrs=_attrs, ctx=ctx, name=name) 

1083 if _execute.must_record_gradient(): 

1084 _execute.record_gradient( 

1085 "FIFOQueueV2", _inputs_flat, _attrs, _result) 

1086 _result, = _result 

1087 return _result 

1088 

1089 

1090def fake_queue(resource, name=None): 

1091 r"""Deprecated. Do not use. 

1092 

1093 Args: 

1094 resource: A `Tensor` of type `resource`. 

1095 name: A name for the operation (optional). 

1096 

1097 Returns: 

1098 A `Tensor` of type mutable `string`. 

1099 """ 

1100 _ctx = _context._context or _context.context() 

1101 tld = _ctx._thread_local_data 

1102 if tld.is_eager: 

1103 raise RuntimeError("fake_queue op does not support eager execution. Arg 'handle' is a ref.") 

1104 # Add nodes to the TensorFlow graph. 

1105 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1106 "FakeQueue", resource=resource, name=name) 

1107 _result = _outputs[:] 

1108 if _execute.must_record_gradient(): 

1109 _attrs = () 

1110 _inputs_flat = _op.inputs 

1111 _execute.record_gradient( 

1112 "FakeQueue", _inputs_flat, _attrs, _result) 

1113 _result, = _result 

1114 return _result 

1115 

1116FakeQueue = tf_export("raw_ops.FakeQueue")(_ops.to_raw_op(fake_queue)) 

1117 

1118 

1119def fake_queue_eager_fallback(resource, name, ctx): 

1120 raise RuntimeError("fake_queue op does not support eager execution. Arg 'handle' is a ref.") 

1121 

1122def get_session_handle(value, name=None): 

1123 r"""Store the input tensor in the state of the current session. 

1124 

1125 Args: 

1126 value: A `Tensor`. The tensor to be stored. 

1127 name: A name for the operation (optional). 

1128 

1129 Returns: 

1130 A `Tensor` of type `string`. 

1131 """ 

1132 _ctx = _context._context or _context.context() 

1133 tld = _ctx._thread_local_data 

1134 if tld.is_eager: 

1135 try: 

1136 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1137 _ctx, "GetSessionHandle", name, value) 

1138 return _result 

1139 except _core._NotOkStatusException as e: 

1140 _ops.raise_from_not_ok_status(e, name) 

1141 except _core._FallbackException: 

1142 pass 

1143 try: 

1144 return get_session_handle_eager_fallback( 

1145 value, name=name, ctx=_ctx) 

1146 except _core._SymbolicException: 

1147 pass # Add nodes to the TensorFlow graph. 

1148 # Add nodes to the TensorFlow graph. 

1149 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1150 "GetSessionHandle", value=value, name=name) 

1151 _result = _outputs[:] 

1152 if _execute.must_record_gradient(): 

1153 _attrs = ("T", _op._get_attr_type("T")) 

1154 _inputs_flat = _op.inputs 

1155 _execute.record_gradient( 

1156 "GetSessionHandle", _inputs_flat, _attrs, _result) 

1157 _result, = _result 

1158 return _result 

1159 

1160GetSessionHandle = tf_export("raw_ops.GetSessionHandle")(_ops.to_raw_op(get_session_handle)) 

1161 

1162 

1163def get_session_handle_eager_fallback(value, name, ctx): 

1164 _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, []) 

1165 _inputs_flat = [value] 

1166 _attrs = ("T", _attr_T) 

1167 _result = _execute.execute(b"GetSessionHandle", 1, inputs=_inputs_flat, 

1168 attrs=_attrs, ctx=ctx, name=name) 

1169 if _execute.must_record_gradient(): 

1170 _execute.record_gradient( 

1171 "GetSessionHandle", _inputs_flat, _attrs, _result) 

1172 _result, = _result 

1173 return _result 

1174 

1175 

1176def get_session_handle_v2(value, name=None): 

1177 r"""Store the input tensor in the state of the current session. 

1178 

1179 Args: 

1180 value: A `Tensor`. The tensor to be stored. 

1181 name: A name for the operation (optional). 

1182 

1183 Returns: 

1184 A `Tensor` of type `resource`. 

1185 """ 

1186 _ctx = _context._context or _context.context() 

1187 tld = _ctx._thread_local_data 

1188 if tld.is_eager: 

1189 try: 

1190 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1191 _ctx, "GetSessionHandleV2", name, value) 

1192 return _result 

1193 except _core._NotOkStatusException as e: 

1194 _ops.raise_from_not_ok_status(e, name) 

1195 except _core._FallbackException: 

1196 pass 

1197 try: 

1198 return get_session_handle_v2_eager_fallback( 

1199 value, name=name, ctx=_ctx) 

1200 except _core._SymbolicException: 

1201 pass # Add nodes to the TensorFlow graph. 

1202 # Add nodes to the TensorFlow graph. 

1203 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1204 "GetSessionHandleV2", value=value, name=name) 

1205 _result = _outputs[:] 

1206 if _execute.must_record_gradient(): 

1207 _attrs = ("T", _op._get_attr_type("T")) 

1208 _inputs_flat = _op.inputs 

1209 _execute.record_gradient( 

1210 "GetSessionHandleV2", _inputs_flat, _attrs, _result) 

1211 _result, = _result 

1212 return _result 

1213 

1214GetSessionHandleV2 = tf_export("raw_ops.GetSessionHandleV2")(_ops.to_raw_op(get_session_handle_v2)) 

1215 

1216 

1217def get_session_handle_v2_eager_fallback(value, name, ctx): 

1218 _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, []) 

1219 _inputs_flat = [value] 

1220 _attrs = ("T", _attr_T) 

1221 _result = _execute.execute(b"GetSessionHandleV2", 1, inputs=_inputs_flat, 

1222 attrs=_attrs, ctx=ctx, name=name) 

1223 if _execute.must_record_gradient(): 

1224 _execute.record_gradient( 

1225 "GetSessionHandleV2", _inputs_flat, _attrs, _result) 

1226 _result, = _result 

1227 return _result 

1228 

1229 

1230def get_session_tensor(handle, dtype, name=None): 

1231 r"""Get the value of the tensor specified by its handle. 

1232 

1233 Args: 

1234 handle: A `Tensor` of type `string`. 

1235 The handle for a tensor stored in the session state. 

1236 dtype: A `tf.DType`. The type of the output value. 

1237 name: A name for the operation (optional). 

1238 

1239 Returns: 

1240 A `Tensor` of type `dtype`. 

1241 """ 

1242 _ctx = _context._context or _context.context() 

1243 tld = _ctx._thread_local_data 

1244 if tld.is_eager: 

1245 try: 

1246 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1247 _ctx, "GetSessionTensor", name, handle, "dtype", dtype) 

1248 return _result 

1249 except _core._NotOkStatusException as e: 

1250 _ops.raise_from_not_ok_status(e, name) 

1251 except _core._FallbackException: 

1252 pass 

1253 try: 

1254 return get_session_tensor_eager_fallback( 

1255 handle, dtype=dtype, name=name, ctx=_ctx) 

1256 except _core._SymbolicException: 

1257 pass # Add nodes to the TensorFlow graph. 

1258 # Add nodes to the TensorFlow graph. 

1259 dtype = _execute.make_type(dtype, "dtype") 

1260 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1261 "GetSessionTensor", handle=handle, dtype=dtype, name=name) 

1262 _result = _outputs[:] 

1263 if _execute.must_record_gradient(): 

1264 _attrs = ("dtype", _op._get_attr_type("dtype")) 

1265 _inputs_flat = _op.inputs 

1266 _execute.record_gradient( 

1267 "GetSessionTensor", _inputs_flat, _attrs, _result) 

1268 _result, = _result 

1269 return _result 

1270 

1271GetSessionTensor = tf_export("raw_ops.GetSessionTensor")(_ops.to_raw_op(get_session_tensor)) 

1272 

1273 

1274def get_session_tensor_eager_fallback(handle, dtype, name, ctx): 

1275 dtype = _execute.make_type(dtype, "dtype") 

1276 handle = _ops.convert_to_tensor(handle, _dtypes.string) 

1277 _inputs_flat = [handle] 

1278 _attrs = ("dtype", dtype) 

1279 _result = _execute.execute(b"GetSessionTensor", 1, inputs=_inputs_flat, 

1280 attrs=_attrs, ctx=ctx, name=name) 

1281 if _execute.must_record_gradient(): 

1282 _execute.record_gradient( 

1283 "GetSessionTensor", _inputs_flat, _attrs, _result) 

1284 _result, = _result 

1285 return _result 

1286 

1287 

1288def map_clear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

1289 r"""Op removes all elements in the underlying container. 

1290 

1291 Args: 

1292 dtypes: A list of `tf.DTypes`. 

1293 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

1294 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

1295 container: An optional `string`. Defaults to `""`. 

1296 shared_name: An optional `string`. Defaults to `""`. 

1297 name: A name for the operation (optional). 

1298 

1299 Returns: 

1300 The created Operation. 

1301 """ 

1302 _ctx = _context._context or _context.context() 

1303 tld = _ctx._thread_local_data 

1304 if tld.is_eager: 

1305 try: 

1306 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1307 _ctx, "MapClear", name, "capacity", capacity, "memory_limit", 

1308 memory_limit, "dtypes", dtypes, "container", container, "shared_name", 

1309 shared_name) 

1310 return _result 

1311 except _core._NotOkStatusException as e: 

1312 _ops.raise_from_not_ok_status(e, name) 

1313 except _core._FallbackException: 

1314 pass 

1315 try: 

1316 return map_clear_eager_fallback( 

1317 capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, 

1318 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

1319 except _core._SymbolicException: 

1320 pass # Add nodes to the TensorFlow graph. 

1321 # Add nodes to the TensorFlow graph. 

1322 if not isinstance(dtypes, (list, tuple)): 

1323 raise TypeError( 

1324 "Expected list for 'dtypes' argument to " 

1325 "'map_clear' Op, not %r." % dtypes) 

1326 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1327 if capacity is None: 

1328 capacity = 0 

1329 capacity = _execute.make_int(capacity, "capacity") 

1330 if memory_limit is None: 

1331 memory_limit = 0 

1332 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1333 if container is None: 

1334 container = "" 

1335 container = _execute.make_str(container, "container") 

1336 if shared_name is None: 

1337 shared_name = "" 

1338 shared_name = _execute.make_str(shared_name, "shared_name") 

1339 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1340 "MapClear", dtypes=dtypes, capacity=capacity, 

1341 memory_limit=memory_limit, container=container, 

1342 shared_name=shared_name, name=name) 

1343 return _op 

1344MapClear = tf_export("raw_ops.MapClear")(_ops.to_raw_op(map_clear)) 

1345 

1346 

1347def map_clear_eager_fallback(dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

1348 if not isinstance(dtypes, (list, tuple)): 

1349 raise TypeError( 

1350 "Expected list for 'dtypes' argument to " 

1351 "'map_clear' Op, not %r." % dtypes) 

1352 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1353 if capacity is None: 

1354 capacity = 0 

1355 capacity = _execute.make_int(capacity, "capacity") 

1356 if memory_limit is None: 

1357 memory_limit = 0 

1358 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1359 if container is None: 

1360 container = "" 

1361 container = _execute.make_str(container, "container") 

1362 if shared_name is None: 

1363 shared_name = "" 

1364 shared_name = _execute.make_str(shared_name, "shared_name") 

1365 _inputs_flat = [] 

1366 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

1367 dtypes, "container", container, "shared_name", shared_name) 

1368 _result = _execute.execute(b"MapClear", 0, inputs=_inputs_flat, 

1369 attrs=_attrs, ctx=ctx, name=name) 

1370 _result = None 

1371 return _result 

1372 

1373 

1374def map_incomplete_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

1375 r"""Op returns the number of incomplete elements in the underlying container. 

1376 

1377 Args: 

1378 dtypes: A list of `tf.DTypes`. 

1379 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

1380 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

1381 container: An optional `string`. Defaults to `""`. 

1382 shared_name: An optional `string`. Defaults to `""`. 

1383 name: A name for the operation (optional). 

1384 

1385 Returns: 

1386 A `Tensor` of type `int32`. 

1387 """ 

1388 _ctx = _context._context or _context.context() 

1389 tld = _ctx._thread_local_data 

1390 if tld.is_eager: 

1391 try: 

1392 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1393 _ctx, "MapIncompleteSize", name, "capacity", capacity, "memory_limit", 

1394 memory_limit, "dtypes", dtypes, "container", container, "shared_name", 

1395 shared_name) 

1396 return _result 

1397 except _core._NotOkStatusException as e: 

1398 _ops.raise_from_not_ok_status(e, name) 

1399 except _core._FallbackException: 

1400 pass 

1401 try: 

1402 return map_incomplete_size_eager_fallback( 

1403 capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, 

1404 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

1405 except _core._SymbolicException: 

1406 pass # Add nodes to the TensorFlow graph. 

1407 # Add nodes to the TensorFlow graph. 

1408 if not isinstance(dtypes, (list, tuple)): 

1409 raise TypeError( 

1410 "Expected list for 'dtypes' argument to " 

1411 "'map_incomplete_size' Op, not %r." % dtypes) 

1412 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1413 if capacity is None: 

1414 capacity = 0 

1415 capacity = _execute.make_int(capacity, "capacity") 

1416 if memory_limit is None: 

1417 memory_limit = 0 

1418 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1419 if container is None: 

1420 container = "" 

1421 container = _execute.make_str(container, "container") 

1422 if shared_name is None: 

1423 shared_name = "" 

1424 shared_name = _execute.make_str(shared_name, "shared_name") 

1425 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1426 "MapIncompleteSize", dtypes=dtypes, capacity=capacity, 

1427 memory_limit=memory_limit, container=container, 

1428 shared_name=shared_name, name=name) 

1429 _result = _outputs[:] 

1430 if _execute.must_record_gradient(): 

1431 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

1432 _op._get_attr_int("memory_limit"), "dtypes", 

1433 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

1434 "shared_name", _op.get_attr("shared_name")) 

1435 _inputs_flat = _op.inputs 

1436 _execute.record_gradient( 

1437 "MapIncompleteSize", _inputs_flat, _attrs, _result) 

1438 _result, = _result 

1439 return _result 

1440 

1441MapIncompleteSize = tf_export("raw_ops.MapIncompleteSize")(_ops.to_raw_op(map_incomplete_size)) 

1442 

1443 

1444def map_incomplete_size_eager_fallback(dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

1445 if not isinstance(dtypes, (list, tuple)): 

1446 raise TypeError( 

1447 "Expected list for 'dtypes' argument to " 

1448 "'map_incomplete_size' Op, not %r." % dtypes) 

1449 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1450 if capacity is None: 

1451 capacity = 0 

1452 capacity = _execute.make_int(capacity, "capacity") 

1453 if memory_limit is None: 

1454 memory_limit = 0 

1455 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1456 if container is None: 

1457 container = "" 

1458 container = _execute.make_str(container, "container") 

1459 if shared_name is None: 

1460 shared_name = "" 

1461 shared_name = _execute.make_str(shared_name, "shared_name") 

1462 _inputs_flat = [] 

1463 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

1464 dtypes, "container", container, "shared_name", shared_name) 

1465 _result = _execute.execute(b"MapIncompleteSize", 1, inputs=_inputs_flat, 

1466 attrs=_attrs, ctx=ctx, name=name) 

1467 if _execute.must_record_gradient(): 

1468 _execute.record_gradient( 

1469 "MapIncompleteSize", _inputs_flat, _attrs, _result) 

1470 _result, = _result 

1471 return _result 

1472 

1473 

1474def map_peek(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

1475 r"""Op peeks at the values at the specified key. If the 

1476 

1477 underlying container does not contain this key 

1478 this op will block until it does. 

1479 

1480 Args: 

1481 key: A `Tensor` of type `int64`. 

1482 indices: A `Tensor` of type `int32`. 

1483 dtypes: A list of `tf.DTypes` that has length `>= 1`. 

1484 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

1485 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

1486 container: An optional `string`. Defaults to `""`. 

1487 shared_name: An optional `string`. Defaults to `""`. 

1488 name: A name for the operation (optional). 

1489 

1490 Returns: 

1491 A list of `Tensor` objects of type `dtypes`. 

1492 """ 

1493 _ctx = _context._context or _context.context() 

1494 tld = _ctx._thread_local_data 

1495 if tld.is_eager: 

1496 try: 

1497 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1498 _ctx, "MapPeek", name, key, indices, "capacity", capacity, 

1499 "memory_limit", memory_limit, "dtypes", dtypes, "container", 

1500 container, "shared_name", shared_name) 

1501 return _result 

1502 except _core._NotOkStatusException as e: 

1503 _ops.raise_from_not_ok_status(e, name) 

1504 except _core._FallbackException: 

1505 pass 

1506 try: 

1507 return map_peek_eager_fallback( 

1508 key, indices, capacity=capacity, memory_limit=memory_limit, 

1509 dtypes=dtypes, container=container, shared_name=shared_name, 

1510 name=name, ctx=_ctx) 

1511 except _core._SymbolicException: 

1512 pass # Add nodes to the TensorFlow graph. 

1513 # Add nodes to the TensorFlow graph. 

1514 if not isinstance(dtypes, (list, tuple)): 

1515 raise TypeError( 

1516 "Expected list for 'dtypes' argument to " 

1517 "'map_peek' Op, not %r." % dtypes) 

1518 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1519 if capacity is None: 

1520 capacity = 0 

1521 capacity = _execute.make_int(capacity, "capacity") 

1522 if memory_limit is None: 

1523 memory_limit = 0 

1524 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1525 if container is None: 

1526 container = "" 

1527 container = _execute.make_str(container, "container") 

1528 if shared_name is None: 

1529 shared_name = "" 

1530 shared_name = _execute.make_str(shared_name, "shared_name") 

1531 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1532 "MapPeek", key=key, indices=indices, dtypes=dtypes, capacity=capacity, 

1533 memory_limit=memory_limit, container=container, 

1534 shared_name=shared_name, name=name) 

1535 _result = _outputs[:] 

1536 if not _result: 

1537 return _op 

1538 if _execute.must_record_gradient(): 

1539 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

1540 _op._get_attr_int("memory_limit"), "dtypes", 

1541 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

1542 "shared_name", _op.get_attr("shared_name")) 

1543 _inputs_flat = _op.inputs 

1544 _execute.record_gradient( 

1545 "MapPeek", _inputs_flat, _attrs, _result) 

1546 return _result 

1547 

1548MapPeek = tf_export("raw_ops.MapPeek")(_ops.to_raw_op(map_peek)) 

1549 

1550 

1551def map_peek_eager_fallback(key, indices, dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

1552 if not isinstance(dtypes, (list, tuple)): 

1553 raise TypeError( 

1554 "Expected list for 'dtypes' argument to " 

1555 "'map_peek' Op, not %r." % dtypes) 

1556 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1557 if capacity is None: 

1558 capacity = 0 

1559 capacity = _execute.make_int(capacity, "capacity") 

1560 if memory_limit is None: 

1561 memory_limit = 0 

1562 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1563 if container is None: 

1564 container = "" 

1565 container = _execute.make_str(container, "container") 

1566 if shared_name is None: 

1567 shared_name = "" 

1568 shared_name = _execute.make_str(shared_name, "shared_name") 

1569 key = _ops.convert_to_tensor(key, _dtypes.int64) 

1570 indices = _ops.convert_to_tensor(indices, _dtypes.int32) 

1571 _inputs_flat = [key, indices] 

1572 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

1573 dtypes, "container", container, "shared_name", shared_name) 

1574 _result = _execute.execute(b"MapPeek", len(dtypes), inputs=_inputs_flat, 

1575 attrs=_attrs, ctx=ctx, name=name) 

1576 if _execute.must_record_gradient(): 

1577 _execute.record_gradient( 

1578 "MapPeek", _inputs_flat, _attrs, _result) 

1579 return _result 

1580 

1581 

1582def map_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

1583 r"""Op returns the number of elements in the underlying container. 

1584 

1585 Args: 

1586 dtypes: A list of `tf.DTypes`. 

1587 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

1588 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

1589 container: An optional `string`. Defaults to `""`. 

1590 shared_name: An optional `string`. Defaults to `""`. 

1591 name: A name for the operation (optional). 

1592 

1593 Returns: 

1594 A `Tensor` of type `int32`. 

1595 """ 

1596 _ctx = _context._context or _context.context() 

1597 tld = _ctx._thread_local_data 

1598 if tld.is_eager: 

1599 try: 

1600 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1601 _ctx, "MapSize", name, "capacity", capacity, "memory_limit", 

1602 memory_limit, "dtypes", dtypes, "container", container, "shared_name", 

1603 shared_name) 

1604 return _result 

1605 except _core._NotOkStatusException as e: 

1606 _ops.raise_from_not_ok_status(e, name) 

1607 except _core._FallbackException: 

1608 pass 

1609 try: 

1610 return map_size_eager_fallback( 

1611 capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, 

1612 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

1613 except _core._SymbolicException: 

1614 pass # Add nodes to the TensorFlow graph. 

1615 # Add nodes to the TensorFlow graph. 

1616 if not isinstance(dtypes, (list, tuple)): 

1617 raise TypeError( 

1618 "Expected list for 'dtypes' argument to " 

1619 "'map_size' Op, not %r." % dtypes) 

1620 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1621 if capacity is None: 

1622 capacity = 0 

1623 capacity = _execute.make_int(capacity, "capacity") 

1624 if memory_limit is None: 

1625 memory_limit = 0 

1626 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1627 if container is None: 

1628 container = "" 

1629 container = _execute.make_str(container, "container") 

1630 if shared_name is None: 

1631 shared_name = "" 

1632 shared_name = _execute.make_str(shared_name, "shared_name") 

1633 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1634 "MapSize", dtypes=dtypes, capacity=capacity, 

1635 memory_limit=memory_limit, container=container, 

1636 shared_name=shared_name, name=name) 

1637 _result = _outputs[:] 

1638 if _execute.must_record_gradient(): 

1639 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

1640 _op._get_attr_int("memory_limit"), "dtypes", 

1641 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

1642 "shared_name", _op.get_attr("shared_name")) 

1643 _inputs_flat = _op.inputs 

1644 _execute.record_gradient( 

1645 "MapSize", _inputs_flat, _attrs, _result) 

1646 _result, = _result 

1647 return _result 

1648 

1649MapSize = tf_export("raw_ops.MapSize")(_ops.to_raw_op(map_size)) 

1650 

1651 

1652def map_size_eager_fallback(dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

1653 if not isinstance(dtypes, (list, tuple)): 

1654 raise TypeError( 

1655 "Expected list for 'dtypes' argument to " 

1656 "'map_size' Op, not %r." % dtypes) 

1657 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1658 if capacity is None: 

1659 capacity = 0 

1660 capacity = _execute.make_int(capacity, "capacity") 

1661 if memory_limit is None: 

1662 memory_limit = 0 

1663 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1664 if container is None: 

1665 container = "" 

1666 container = _execute.make_str(container, "container") 

1667 if shared_name is None: 

1668 shared_name = "" 

1669 shared_name = _execute.make_str(shared_name, "shared_name") 

1670 _inputs_flat = [] 

1671 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

1672 dtypes, "container", container, "shared_name", shared_name) 

1673 _result = _execute.execute(b"MapSize", 1, inputs=_inputs_flat, attrs=_attrs, 

1674 ctx=ctx, name=name) 

1675 if _execute.must_record_gradient(): 

1676 _execute.record_gradient( 

1677 "MapSize", _inputs_flat, _attrs, _result) 

1678 _result, = _result 

1679 return _result 

1680 

1681 

1682def map_stage(key, indices, values, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

1683 r"""Stage (key, values) in the underlying container which behaves like a hashtable. 

1684 

1685 Args: 

1686 key: A `Tensor` of type `int64`. int64 

1687 indices: A `Tensor` of type `int32`. 

1688 values: A list of `Tensor` objects. a list of tensors 

1689 dtypes A list of data types that inserted values should adhere to. 

1690 dtypes: A list of `tf.DTypes`. 

1691 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

1692 Maximum number of elements in the Staging Area. If > 0, inserts 

1693 on the container will block when the capacity is reached. 

1694 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

1695 container: An optional `string`. Defaults to `""`. 

1696 If non-empty, this queue is placed in the given container. Otherwise, 

1697 a default container is used. 

1698 shared_name: An optional `string`. Defaults to `""`. 

1699 It is necessary to match this name to the matching Unstage Op. 

1700 name: A name for the operation (optional). 

1701 

1702 Returns: 

1703 The created Operation. 

1704 """ 

1705 _ctx = _context._context or _context.context() 

1706 tld = _ctx._thread_local_data 

1707 if tld.is_eager: 

1708 try: 

1709 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1710 _ctx, "MapStage", name, key, indices, values, "capacity", capacity, 

1711 "memory_limit", memory_limit, "dtypes", dtypes, "container", 

1712 container, "shared_name", shared_name) 

1713 return _result 

1714 except _core._NotOkStatusException as e: 

1715 _ops.raise_from_not_ok_status(e, name) 

1716 except _core._FallbackException: 

1717 pass 

1718 try: 

1719 return map_stage_eager_fallback( 

1720 key, indices, values, capacity=capacity, memory_limit=memory_limit, 

1721 dtypes=dtypes, container=container, shared_name=shared_name, 

1722 name=name, ctx=_ctx) 

1723 except _core._SymbolicException: 

1724 pass # Add nodes to the TensorFlow graph. 

1725 # Add nodes to the TensorFlow graph. 

1726 if not isinstance(dtypes, (list, tuple)): 

1727 raise TypeError( 

1728 "Expected list for 'dtypes' argument to " 

1729 "'map_stage' Op, not %r." % dtypes) 

1730 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1731 if capacity is None: 

1732 capacity = 0 

1733 capacity = _execute.make_int(capacity, "capacity") 

1734 if memory_limit is None: 

1735 memory_limit = 0 

1736 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1737 if container is None: 

1738 container = "" 

1739 container = _execute.make_str(container, "container") 

1740 if shared_name is None: 

1741 shared_name = "" 

1742 shared_name = _execute.make_str(shared_name, "shared_name") 

1743 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1744 "MapStage", key=key, indices=indices, values=values, dtypes=dtypes, 

1745 capacity=capacity, memory_limit=memory_limit, 

1746 container=container, shared_name=shared_name, name=name) 

1747 return _op 

1748MapStage = tf_export("raw_ops.MapStage")(_ops.to_raw_op(map_stage)) 

1749 

1750 

1751def map_stage_eager_fallback(key, indices, values, dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

1752 if not isinstance(dtypes, (list, tuple)): 

1753 raise TypeError( 

1754 "Expected list for 'dtypes' argument to " 

1755 "'map_stage' Op, not %r." % dtypes) 

1756 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1757 if capacity is None: 

1758 capacity = 0 

1759 capacity = _execute.make_int(capacity, "capacity") 

1760 if memory_limit is None: 

1761 memory_limit = 0 

1762 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1763 if container is None: 

1764 container = "" 

1765 container = _execute.make_str(container, "container") 

1766 if shared_name is None: 

1767 shared_name = "" 

1768 shared_name = _execute.make_str(shared_name, "shared_name") 

1769 _attr_fake_dtypes, values = _execute.convert_to_mixed_eager_tensors(values, ctx) 

1770 key = _ops.convert_to_tensor(key, _dtypes.int64) 

1771 indices = _ops.convert_to_tensor(indices, _dtypes.int32) 

1772 _inputs_flat = [key, indices] + list(values) 

1773 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

1774 dtypes, "fake_dtypes", _attr_fake_dtypes, "container", container, 

1775 "shared_name", shared_name) 

1776 _result = _execute.execute(b"MapStage", 0, inputs=_inputs_flat, 

1777 attrs=_attrs, ctx=ctx, name=name) 

1778 _result = None 

1779 return _result 

1780 

1781 

1782def map_unstage(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

1783 r"""Op removes and returns the values associated with the key 

1784 

1785 from the underlying container. If the underlying container 

1786 does not contain this key, the op will block until it does. 

1787 

1788 Args: 

1789 key: A `Tensor` of type `int64`. 

1790 indices: A `Tensor` of type `int32`. 

1791 dtypes: A list of `tf.DTypes` that has length `>= 1`. 

1792 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

1793 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

1794 container: An optional `string`. Defaults to `""`. 

1795 shared_name: An optional `string`. Defaults to `""`. 

1796 name: A name for the operation (optional). 

1797 

1798 Returns: 

1799 A list of `Tensor` objects of type `dtypes`. 

1800 """ 

1801 _ctx = _context._context or _context.context() 

1802 tld = _ctx._thread_local_data 

1803 if tld.is_eager: 

1804 try: 

1805 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1806 _ctx, "MapUnstage", name, key, indices, "capacity", capacity, 

1807 "memory_limit", memory_limit, "dtypes", dtypes, "container", 

1808 container, "shared_name", shared_name) 

1809 return _result 

1810 except _core._NotOkStatusException as e: 

1811 _ops.raise_from_not_ok_status(e, name) 

1812 except _core._FallbackException: 

1813 pass 

1814 try: 

1815 return map_unstage_eager_fallback( 

1816 key, indices, capacity=capacity, memory_limit=memory_limit, 

1817 dtypes=dtypes, container=container, shared_name=shared_name, 

1818 name=name, ctx=_ctx) 

1819 except _core._SymbolicException: 

1820 pass # Add nodes to the TensorFlow graph. 

1821 # Add nodes to the TensorFlow graph. 

1822 if not isinstance(dtypes, (list, tuple)): 

1823 raise TypeError( 

1824 "Expected list for 'dtypes' argument to " 

1825 "'map_unstage' Op, not %r." % dtypes) 

1826 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1827 if capacity is None: 

1828 capacity = 0 

1829 capacity = _execute.make_int(capacity, "capacity") 

1830 if memory_limit is None: 

1831 memory_limit = 0 

1832 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1833 if container is None: 

1834 container = "" 

1835 container = _execute.make_str(container, "container") 

1836 if shared_name is None: 

1837 shared_name = "" 

1838 shared_name = _execute.make_str(shared_name, "shared_name") 

1839 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1840 "MapUnstage", key=key, indices=indices, dtypes=dtypes, 

1841 capacity=capacity, memory_limit=memory_limit, 

1842 container=container, shared_name=shared_name, name=name) 

1843 _result = _outputs[:] 

1844 if not _result: 

1845 return _op 

1846 if _execute.must_record_gradient(): 

1847 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

1848 _op._get_attr_int("memory_limit"), "dtypes", 

1849 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

1850 "shared_name", _op.get_attr("shared_name")) 

1851 _inputs_flat = _op.inputs 

1852 _execute.record_gradient( 

1853 "MapUnstage", _inputs_flat, _attrs, _result) 

1854 return _result 

1855 

1856MapUnstage = tf_export("raw_ops.MapUnstage")(_ops.to_raw_op(map_unstage)) 

1857 

1858 

1859def map_unstage_eager_fallback(key, indices, dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

1860 if not isinstance(dtypes, (list, tuple)): 

1861 raise TypeError( 

1862 "Expected list for 'dtypes' argument to " 

1863 "'map_unstage' Op, not %r." % dtypes) 

1864 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1865 if capacity is None: 

1866 capacity = 0 

1867 capacity = _execute.make_int(capacity, "capacity") 

1868 if memory_limit is None: 

1869 memory_limit = 0 

1870 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1871 if container is None: 

1872 container = "" 

1873 container = _execute.make_str(container, "container") 

1874 if shared_name is None: 

1875 shared_name = "" 

1876 shared_name = _execute.make_str(shared_name, "shared_name") 

1877 key = _ops.convert_to_tensor(key, _dtypes.int64) 

1878 indices = _ops.convert_to_tensor(indices, _dtypes.int32) 

1879 _inputs_flat = [key, indices] 

1880 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

1881 dtypes, "container", container, "shared_name", shared_name) 

1882 _result = _execute.execute(b"MapUnstage", len(dtypes), inputs=_inputs_flat, 

1883 attrs=_attrs, ctx=ctx, name=name) 

1884 if _execute.must_record_gradient(): 

1885 _execute.record_gradient( 

1886 "MapUnstage", _inputs_flat, _attrs, _result) 

1887 return _result 

1888 

1889_MapUnstageNoKeyOutput = collections.namedtuple( 

1890 "MapUnstageNoKey", 

1891 ["key", "values"]) 

1892 

1893 

1894def map_unstage_no_key(indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

1895 r"""Op removes and returns a random (key, value) 

1896 

1897 from the underlying container. If the underlying container 

1898 does not contain elements, the op will block until it does. 

1899 

1900 Args: 

1901 indices: A `Tensor` of type `int32`. 

1902 dtypes: A list of `tf.DTypes` that has length `>= 1`. 

1903 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

1904 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

1905 container: An optional `string`. Defaults to `""`. 

1906 shared_name: An optional `string`. Defaults to `""`. 

1907 name: A name for the operation (optional). 

1908 

1909 Returns: 

1910 A tuple of `Tensor` objects (key, values). 

1911 

1912 key: A `Tensor` of type `int64`. 

1913 values: A list of `Tensor` objects of type `dtypes`. 

1914 """ 

1915 _ctx = _context._context or _context.context() 

1916 tld = _ctx._thread_local_data 

1917 if tld.is_eager: 

1918 try: 

1919 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1920 _ctx, "MapUnstageNoKey", name, indices, "capacity", capacity, 

1921 "memory_limit", memory_limit, "dtypes", dtypes, "container", 

1922 container, "shared_name", shared_name) 

1923 _result = _MapUnstageNoKeyOutput._make(_result) 

1924 return _result 

1925 except _core._NotOkStatusException as e: 

1926 _ops.raise_from_not_ok_status(e, name) 

1927 except _core._FallbackException: 

1928 pass 

1929 try: 

1930 return map_unstage_no_key_eager_fallback( 

1931 indices, capacity=capacity, memory_limit=memory_limit, 

1932 dtypes=dtypes, container=container, shared_name=shared_name, 

1933 name=name, ctx=_ctx) 

1934 except _core._SymbolicException: 

1935 pass # Add nodes to the TensorFlow graph. 

1936 # Add nodes to the TensorFlow graph. 

1937 if not isinstance(dtypes, (list, tuple)): 

1938 raise TypeError( 

1939 "Expected list for 'dtypes' argument to " 

1940 "'map_unstage_no_key' Op, not %r." % dtypes) 

1941 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1942 if capacity is None: 

1943 capacity = 0 

1944 capacity = _execute.make_int(capacity, "capacity") 

1945 if memory_limit is None: 

1946 memory_limit = 0 

1947 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1948 if container is None: 

1949 container = "" 

1950 container = _execute.make_str(container, "container") 

1951 if shared_name is None: 

1952 shared_name = "" 

1953 shared_name = _execute.make_str(shared_name, "shared_name") 

1954 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1955 "MapUnstageNoKey", indices=indices, dtypes=dtypes, capacity=capacity, 

1956 memory_limit=memory_limit, container=container, 

1957 shared_name=shared_name, name=name) 

1958 _result = _outputs[:] 

1959 if _execute.must_record_gradient(): 

1960 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

1961 _op._get_attr_int("memory_limit"), "dtypes", 

1962 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

1963 "shared_name", _op.get_attr("shared_name")) 

1964 _inputs_flat = _op.inputs 

1965 _execute.record_gradient( 

1966 "MapUnstageNoKey", _inputs_flat, _attrs, _result) 

1967 _result = _result[:1] + [_result[1:]] 

1968 _result = _MapUnstageNoKeyOutput._make(_result) 

1969 return _result 

1970 

1971MapUnstageNoKey = tf_export("raw_ops.MapUnstageNoKey")(_ops.to_raw_op(map_unstage_no_key)) 

1972 

1973 

1974def map_unstage_no_key_eager_fallback(indices, dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

1975 if not isinstance(dtypes, (list, tuple)): 

1976 raise TypeError( 

1977 "Expected list for 'dtypes' argument to " 

1978 "'map_unstage_no_key' Op, not %r." % dtypes) 

1979 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1980 if capacity is None: 

1981 capacity = 0 

1982 capacity = _execute.make_int(capacity, "capacity") 

1983 if memory_limit is None: 

1984 memory_limit = 0 

1985 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

1986 if container is None: 

1987 container = "" 

1988 container = _execute.make_str(container, "container") 

1989 if shared_name is None: 

1990 shared_name = "" 

1991 shared_name = _execute.make_str(shared_name, "shared_name") 

1992 indices = _ops.convert_to_tensor(indices, _dtypes.int32) 

1993 _inputs_flat = [indices] 

1994 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

1995 dtypes, "container", container, "shared_name", shared_name) 

1996 _result = _execute.execute(b"MapUnstageNoKey", len(dtypes) + 1, 

1997 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1998 name=name) 

1999 if _execute.must_record_gradient(): 

2000 _execute.record_gradient( 

2001 "MapUnstageNoKey", _inputs_flat, _attrs, _result) 

2002 _result = _result[:1] + [_result[1:]] 

2003 _result = _MapUnstageNoKeyOutput._make(_result) 

2004 return _result 

2005 

2006 

2007def ordered_map_clear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

2008 r"""Op removes all elements in the underlying container. 

2009 

2010 Args: 

2011 dtypes: A list of `tf.DTypes`. 

2012 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

2013 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

2014 container: An optional `string`. Defaults to `""`. 

2015 shared_name: An optional `string`. Defaults to `""`. 

2016 name: A name for the operation (optional). 

2017 

2018 Returns: 

2019 The created Operation. 

2020 """ 

2021 _ctx = _context._context or _context.context() 

2022 tld = _ctx._thread_local_data 

2023 if tld.is_eager: 

2024 try: 

2025 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2026 _ctx, "OrderedMapClear", name, "capacity", capacity, "memory_limit", 

2027 memory_limit, "dtypes", dtypes, "container", container, "shared_name", 

2028 shared_name) 

2029 return _result 

2030 except _core._NotOkStatusException as e: 

2031 _ops.raise_from_not_ok_status(e, name) 

2032 except _core._FallbackException: 

2033 pass 

2034 try: 

2035 return ordered_map_clear_eager_fallback( 

2036 capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, 

2037 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

2038 except _core._SymbolicException: 

2039 pass # Add nodes to the TensorFlow graph. 

2040 # Add nodes to the TensorFlow graph. 

2041 if not isinstance(dtypes, (list, tuple)): 

2042 raise TypeError( 

2043 "Expected list for 'dtypes' argument to " 

2044 "'ordered_map_clear' Op, not %r." % dtypes) 

2045 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2046 if capacity is None: 

2047 capacity = 0 

2048 capacity = _execute.make_int(capacity, "capacity") 

2049 if memory_limit is None: 

2050 memory_limit = 0 

2051 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2052 if container is None: 

2053 container = "" 

2054 container = _execute.make_str(container, "container") 

2055 if shared_name is None: 

2056 shared_name = "" 

2057 shared_name = _execute.make_str(shared_name, "shared_name") 

2058 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2059 "OrderedMapClear", dtypes=dtypes, capacity=capacity, 

2060 memory_limit=memory_limit, container=container, 

2061 shared_name=shared_name, name=name) 

2062 return _op 

2063OrderedMapClear = tf_export("raw_ops.OrderedMapClear")(_ops.to_raw_op(ordered_map_clear)) 

2064 

2065 

2066def ordered_map_clear_eager_fallback(dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

2067 if not isinstance(dtypes, (list, tuple)): 

2068 raise TypeError( 

2069 "Expected list for 'dtypes' argument to " 

2070 "'ordered_map_clear' Op, not %r." % dtypes) 

2071 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2072 if capacity is None: 

2073 capacity = 0 

2074 capacity = _execute.make_int(capacity, "capacity") 

2075 if memory_limit is None: 

2076 memory_limit = 0 

2077 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2078 if container is None: 

2079 container = "" 

2080 container = _execute.make_str(container, "container") 

2081 if shared_name is None: 

2082 shared_name = "" 

2083 shared_name = _execute.make_str(shared_name, "shared_name") 

2084 _inputs_flat = [] 

2085 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

2086 dtypes, "container", container, "shared_name", shared_name) 

2087 _result = _execute.execute(b"OrderedMapClear", 0, inputs=_inputs_flat, 

2088 attrs=_attrs, ctx=ctx, name=name) 

2089 _result = None 

2090 return _result 

2091 

2092 

2093def ordered_map_incomplete_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

2094 r"""Op returns the number of incomplete elements in the underlying container. 

2095 

2096 Args: 

2097 dtypes: A list of `tf.DTypes`. 

2098 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

2099 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

2100 container: An optional `string`. Defaults to `""`. 

2101 shared_name: An optional `string`. Defaults to `""`. 

2102 name: A name for the operation (optional). 

2103 

2104 Returns: 

2105 A `Tensor` of type `int32`. 

2106 """ 

2107 _ctx = _context._context or _context.context() 

2108 tld = _ctx._thread_local_data 

2109 if tld.is_eager: 

2110 try: 

2111 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2112 _ctx, "OrderedMapIncompleteSize", name, "capacity", capacity, 

2113 "memory_limit", memory_limit, "dtypes", dtypes, "container", 

2114 container, "shared_name", shared_name) 

2115 return _result 

2116 except _core._NotOkStatusException as e: 

2117 _ops.raise_from_not_ok_status(e, name) 

2118 except _core._FallbackException: 

2119 pass 

2120 try: 

2121 return ordered_map_incomplete_size_eager_fallback( 

2122 capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, 

2123 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

2124 except _core._SymbolicException: 

2125 pass # Add nodes to the TensorFlow graph. 

2126 # Add nodes to the TensorFlow graph. 

2127 if not isinstance(dtypes, (list, tuple)): 

2128 raise TypeError( 

2129 "Expected list for 'dtypes' argument to " 

2130 "'ordered_map_incomplete_size' Op, not %r." % dtypes) 

2131 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2132 if capacity is None: 

2133 capacity = 0 

2134 capacity = _execute.make_int(capacity, "capacity") 

2135 if memory_limit is None: 

2136 memory_limit = 0 

2137 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2138 if container is None: 

2139 container = "" 

2140 container = _execute.make_str(container, "container") 

2141 if shared_name is None: 

2142 shared_name = "" 

2143 shared_name = _execute.make_str(shared_name, "shared_name") 

2144 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2145 "OrderedMapIncompleteSize", dtypes=dtypes, capacity=capacity, 

2146 memory_limit=memory_limit, 

2147 container=container, 

2148 shared_name=shared_name, name=name) 

2149 _result = _outputs[:] 

2150 if _execute.must_record_gradient(): 

2151 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

2152 _op._get_attr_int("memory_limit"), "dtypes", 

2153 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

2154 "shared_name", _op.get_attr("shared_name")) 

2155 _inputs_flat = _op.inputs 

2156 _execute.record_gradient( 

2157 "OrderedMapIncompleteSize", _inputs_flat, _attrs, _result) 

2158 _result, = _result 

2159 return _result 

2160 

2161OrderedMapIncompleteSize = tf_export("raw_ops.OrderedMapIncompleteSize")(_ops.to_raw_op(ordered_map_incomplete_size)) 

2162 

2163 

2164def ordered_map_incomplete_size_eager_fallback(dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

2165 if not isinstance(dtypes, (list, tuple)): 

2166 raise TypeError( 

2167 "Expected list for 'dtypes' argument to " 

2168 "'ordered_map_incomplete_size' Op, not %r." % dtypes) 

2169 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2170 if capacity is None: 

2171 capacity = 0 

2172 capacity = _execute.make_int(capacity, "capacity") 

2173 if memory_limit is None: 

2174 memory_limit = 0 

2175 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2176 if container is None: 

2177 container = "" 

2178 container = _execute.make_str(container, "container") 

2179 if shared_name is None: 

2180 shared_name = "" 

2181 shared_name = _execute.make_str(shared_name, "shared_name") 

2182 _inputs_flat = [] 

2183 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

2184 dtypes, "container", container, "shared_name", shared_name) 

2185 _result = _execute.execute(b"OrderedMapIncompleteSize", 1, 

2186 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2187 name=name) 

2188 if _execute.must_record_gradient(): 

2189 _execute.record_gradient( 

2190 "OrderedMapIncompleteSize", _inputs_flat, _attrs, _result) 

2191 _result, = _result 

2192 return _result 

2193 

2194 

2195def ordered_map_peek(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

2196 r"""Op peeks at the values at the specified key. If the 

2197 

2198 underlying container does not contain this key 

2199 this op will block until it does. This Op is optimized for 

2200 performance. 

2201 

2202 Args: 

2203 key: A `Tensor` of type `int64`. 

2204 indices: A `Tensor` of type `int32`. 

2205 dtypes: A list of `tf.DTypes` that has length `>= 1`. 

2206 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

2207 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

2208 container: An optional `string`. Defaults to `""`. 

2209 shared_name: An optional `string`. Defaults to `""`. 

2210 name: A name for the operation (optional). 

2211 

2212 Returns: 

2213 A list of `Tensor` objects of type `dtypes`. 

2214 """ 

2215 _ctx = _context._context or _context.context() 

2216 tld = _ctx._thread_local_data 

2217 if tld.is_eager: 

2218 try: 

2219 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2220 _ctx, "OrderedMapPeek", name, key, indices, "capacity", capacity, 

2221 "memory_limit", memory_limit, "dtypes", dtypes, "container", 

2222 container, "shared_name", shared_name) 

2223 return _result 

2224 except _core._NotOkStatusException as e: 

2225 _ops.raise_from_not_ok_status(e, name) 

2226 except _core._FallbackException: 

2227 pass 

2228 try: 

2229 return ordered_map_peek_eager_fallback( 

2230 key, indices, capacity=capacity, memory_limit=memory_limit, 

2231 dtypes=dtypes, container=container, shared_name=shared_name, 

2232 name=name, ctx=_ctx) 

2233 except _core._SymbolicException: 

2234 pass # Add nodes to the TensorFlow graph. 

2235 # Add nodes to the TensorFlow graph. 

2236 if not isinstance(dtypes, (list, tuple)): 

2237 raise TypeError( 

2238 "Expected list for 'dtypes' argument to " 

2239 "'ordered_map_peek' Op, not %r." % dtypes) 

2240 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2241 if capacity is None: 

2242 capacity = 0 

2243 capacity = _execute.make_int(capacity, "capacity") 

2244 if memory_limit is None: 

2245 memory_limit = 0 

2246 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2247 if container is None: 

2248 container = "" 

2249 container = _execute.make_str(container, "container") 

2250 if shared_name is None: 

2251 shared_name = "" 

2252 shared_name = _execute.make_str(shared_name, "shared_name") 

2253 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2254 "OrderedMapPeek", key=key, indices=indices, dtypes=dtypes, 

2255 capacity=capacity, memory_limit=memory_limit, 

2256 container=container, shared_name=shared_name, 

2257 name=name) 

2258 _result = _outputs[:] 

2259 if not _result: 

2260 return _op 

2261 if _execute.must_record_gradient(): 

2262 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

2263 _op._get_attr_int("memory_limit"), "dtypes", 

2264 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

2265 "shared_name", _op.get_attr("shared_name")) 

2266 _inputs_flat = _op.inputs 

2267 _execute.record_gradient( 

2268 "OrderedMapPeek", _inputs_flat, _attrs, _result) 

2269 return _result 

2270 

2271OrderedMapPeek = tf_export("raw_ops.OrderedMapPeek")(_ops.to_raw_op(ordered_map_peek)) 

2272 

2273 

2274def ordered_map_peek_eager_fallback(key, indices, dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

2275 if not isinstance(dtypes, (list, tuple)): 

2276 raise TypeError( 

2277 "Expected list for 'dtypes' argument to " 

2278 "'ordered_map_peek' Op, not %r." % dtypes) 

2279 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2280 if capacity is None: 

2281 capacity = 0 

2282 capacity = _execute.make_int(capacity, "capacity") 

2283 if memory_limit is None: 

2284 memory_limit = 0 

2285 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2286 if container is None: 

2287 container = "" 

2288 container = _execute.make_str(container, "container") 

2289 if shared_name is None: 

2290 shared_name = "" 

2291 shared_name = _execute.make_str(shared_name, "shared_name") 

2292 key = _ops.convert_to_tensor(key, _dtypes.int64) 

2293 indices = _ops.convert_to_tensor(indices, _dtypes.int32) 

2294 _inputs_flat = [key, indices] 

2295 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

2296 dtypes, "container", container, "shared_name", shared_name) 

2297 _result = _execute.execute(b"OrderedMapPeek", len(dtypes), 

2298 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2299 name=name) 

2300 if _execute.must_record_gradient(): 

2301 _execute.record_gradient( 

2302 "OrderedMapPeek", _inputs_flat, _attrs, _result) 

2303 return _result 

2304 

2305 

2306def ordered_map_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

2307 r"""Op returns the number of elements in the underlying container. 

2308 

2309 Args: 

2310 dtypes: A list of `tf.DTypes`. 

2311 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

2312 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

2313 container: An optional `string`. Defaults to `""`. 

2314 shared_name: An optional `string`. Defaults to `""`. 

2315 name: A name for the operation (optional). 

2316 

2317 Returns: 

2318 A `Tensor` of type `int32`. 

2319 """ 

2320 _ctx = _context._context or _context.context() 

2321 tld = _ctx._thread_local_data 

2322 if tld.is_eager: 

2323 try: 

2324 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2325 _ctx, "OrderedMapSize", name, "capacity", capacity, "memory_limit", 

2326 memory_limit, "dtypes", dtypes, "container", container, "shared_name", 

2327 shared_name) 

2328 return _result 

2329 except _core._NotOkStatusException as e: 

2330 _ops.raise_from_not_ok_status(e, name) 

2331 except _core._FallbackException: 

2332 pass 

2333 try: 

2334 return ordered_map_size_eager_fallback( 

2335 capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, 

2336 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

2337 except _core._SymbolicException: 

2338 pass # Add nodes to the TensorFlow graph. 

2339 # Add nodes to the TensorFlow graph. 

2340 if not isinstance(dtypes, (list, tuple)): 

2341 raise TypeError( 

2342 "Expected list for 'dtypes' argument to " 

2343 "'ordered_map_size' Op, not %r." % dtypes) 

2344 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2345 if capacity is None: 

2346 capacity = 0 

2347 capacity = _execute.make_int(capacity, "capacity") 

2348 if memory_limit is None: 

2349 memory_limit = 0 

2350 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2351 if container is None: 

2352 container = "" 

2353 container = _execute.make_str(container, "container") 

2354 if shared_name is None: 

2355 shared_name = "" 

2356 shared_name = _execute.make_str(shared_name, "shared_name") 

2357 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2358 "OrderedMapSize", dtypes=dtypes, capacity=capacity, 

2359 memory_limit=memory_limit, container=container, 

2360 shared_name=shared_name, name=name) 

2361 _result = _outputs[:] 

2362 if _execute.must_record_gradient(): 

2363 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

2364 _op._get_attr_int("memory_limit"), "dtypes", 

2365 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

2366 "shared_name", _op.get_attr("shared_name")) 

2367 _inputs_flat = _op.inputs 

2368 _execute.record_gradient( 

2369 "OrderedMapSize", _inputs_flat, _attrs, _result) 

2370 _result, = _result 

2371 return _result 

2372 

2373OrderedMapSize = tf_export("raw_ops.OrderedMapSize")(_ops.to_raw_op(ordered_map_size)) 

2374 

2375 

2376def ordered_map_size_eager_fallback(dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

2377 if not isinstance(dtypes, (list, tuple)): 

2378 raise TypeError( 

2379 "Expected list for 'dtypes' argument to " 

2380 "'ordered_map_size' Op, not %r." % dtypes) 

2381 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2382 if capacity is None: 

2383 capacity = 0 

2384 capacity = _execute.make_int(capacity, "capacity") 

2385 if memory_limit is None: 

2386 memory_limit = 0 

2387 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2388 if container is None: 

2389 container = "" 

2390 container = _execute.make_str(container, "container") 

2391 if shared_name is None: 

2392 shared_name = "" 

2393 shared_name = _execute.make_str(shared_name, "shared_name") 

2394 _inputs_flat = [] 

2395 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

2396 dtypes, "container", container, "shared_name", shared_name) 

2397 _result = _execute.execute(b"OrderedMapSize", 1, inputs=_inputs_flat, 

2398 attrs=_attrs, ctx=ctx, name=name) 

2399 if _execute.must_record_gradient(): 

2400 _execute.record_gradient( 

2401 "OrderedMapSize", _inputs_flat, _attrs, _result) 

2402 _result, = _result 

2403 return _result 

2404 

2405 

2406def ordered_map_stage(key, indices, values, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

2407 r"""Stage (key, values) in the underlying container which behaves like a ordered 

2408 

2409 associative container. Elements are ordered by key. 

2410 

2411 Args: 

2412 key: A `Tensor` of type `int64`. int64 

2413 indices: A `Tensor` of type `int32`. 

2414 values: A list of `Tensor` objects. a list of tensors 

2415 dtypes A list of data types that inserted values should adhere to. 

2416 dtypes: A list of `tf.DTypes`. 

2417 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

2418 Maximum number of elements in the Staging Area. If > 0, inserts 

2419 on the container will block when the capacity is reached. 

2420 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

2421 container: An optional `string`. Defaults to `""`. 

2422 If non-empty, this queue is placed in the given container. Otherwise, 

2423 a default container is used. 

2424 shared_name: An optional `string`. Defaults to `""`. 

2425 It is necessary to match this name to the matching Unstage Op. 

2426 name: A name for the operation (optional). 

2427 

2428 Returns: 

2429 The created Operation. 

2430 """ 

2431 _ctx = _context._context or _context.context() 

2432 tld = _ctx._thread_local_data 

2433 if tld.is_eager: 

2434 try: 

2435 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2436 _ctx, "OrderedMapStage", name, key, indices, values, "capacity", 

2437 capacity, "memory_limit", memory_limit, "dtypes", dtypes, "container", 

2438 container, "shared_name", shared_name) 

2439 return _result 

2440 except _core._NotOkStatusException as e: 

2441 _ops.raise_from_not_ok_status(e, name) 

2442 except _core._FallbackException: 

2443 pass 

2444 try: 

2445 return ordered_map_stage_eager_fallback( 

2446 key, indices, values, capacity=capacity, memory_limit=memory_limit, 

2447 dtypes=dtypes, container=container, shared_name=shared_name, 

2448 name=name, ctx=_ctx) 

2449 except _core._SymbolicException: 

2450 pass # Add nodes to the TensorFlow graph. 

2451 # Add nodes to the TensorFlow graph. 

2452 if not isinstance(dtypes, (list, tuple)): 

2453 raise TypeError( 

2454 "Expected list for 'dtypes' argument to " 

2455 "'ordered_map_stage' Op, not %r." % dtypes) 

2456 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2457 if capacity is None: 

2458 capacity = 0 

2459 capacity = _execute.make_int(capacity, "capacity") 

2460 if memory_limit is None: 

2461 memory_limit = 0 

2462 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2463 if container is None: 

2464 container = "" 

2465 container = _execute.make_str(container, "container") 

2466 if shared_name is None: 

2467 shared_name = "" 

2468 shared_name = _execute.make_str(shared_name, "shared_name") 

2469 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2470 "OrderedMapStage", key=key, indices=indices, values=values, 

2471 dtypes=dtypes, capacity=capacity, 

2472 memory_limit=memory_limit, container=container, 

2473 shared_name=shared_name, name=name) 

2474 return _op 

2475OrderedMapStage = tf_export("raw_ops.OrderedMapStage")(_ops.to_raw_op(ordered_map_stage)) 

2476 

2477 

2478def ordered_map_stage_eager_fallback(key, indices, values, dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

2479 if not isinstance(dtypes, (list, tuple)): 

2480 raise TypeError( 

2481 "Expected list for 'dtypes' argument to " 

2482 "'ordered_map_stage' Op, not %r." % dtypes) 

2483 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2484 if capacity is None: 

2485 capacity = 0 

2486 capacity = _execute.make_int(capacity, "capacity") 

2487 if memory_limit is None: 

2488 memory_limit = 0 

2489 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2490 if container is None: 

2491 container = "" 

2492 container = _execute.make_str(container, "container") 

2493 if shared_name is None: 

2494 shared_name = "" 

2495 shared_name = _execute.make_str(shared_name, "shared_name") 

2496 _attr_fake_dtypes, values = _execute.convert_to_mixed_eager_tensors(values, ctx) 

2497 key = _ops.convert_to_tensor(key, _dtypes.int64) 

2498 indices = _ops.convert_to_tensor(indices, _dtypes.int32) 

2499 _inputs_flat = [key, indices] + list(values) 

2500 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

2501 dtypes, "fake_dtypes", _attr_fake_dtypes, "container", container, 

2502 "shared_name", shared_name) 

2503 _result = _execute.execute(b"OrderedMapStage", 0, inputs=_inputs_flat, 

2504 attrs=_attrs, ctx=ctx, name=name) 

2505 _result = None 

2506 return _result 

2507 

2508 

2509def ordered_map_unstage(key, indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

2510 r"""Op removes and returns the values associated with the key 

2511 

2512 from the underlying container. If the underlying container 

2513 does not contain this key, the op will block until it does. 

2514 

2515 Args: 

2516 key: A `Tensor` of type `int64`. 

2517 indices: A `Tensor` of type `int32`. 

2518 dtypes: A list of `tf.DTypes` that has length `>= 1`. 

2519 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

2520 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

2521 container: An optional `string`. Defaults to `""`. 

2522 shared_name: An optional `string`. Defaults to `""`. 

2523 name: A name for the operation (optional). 

2524 

2525 Returns: 

2526 A list of `Tensor` objects of type `dtypes`. 

2527 """ 

2528 _ctx = _context._context or _context.context() 

2529 tld = _ctx._thread_local_data 

2530 if tld.is_eager: 

2531 try: 

2532 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2533 _ctx, "OrderedMapUnstage", name, key, indices, "capacity", capacity, 

2534 "memory_limit", memory_limit, "dtypes", dtypes, "container", 

2535 container, "shared_name", shared_name) 

2536 return _result 

2537 except _core._NotOkStatusException as e: 

2538 _ops.raise_from_not_ok_status(e, name) 

2539 except _core._FallbackException: 

2540 pass 

2541 try: 

2542 return ordered_map_unstage_eager_fallback( 

2543 key, indices, capacity=capacity, memory_limit=memory_limit, 

2544 dtypes=dtypes, container=container, shared_name=shared_name, 

2545 name=name, ctx=_ctx) 

2546 except _core._SymbolicException: 

2547 pass # Add nodes to the TensorFlow graph. 

2548 # Add nodes to the TensorFlow graph. 

2549 if not isinstance(dtypes, (list, tuple)): 

2550 raise TypeError( 

2551 "Expected list for 'dtypes' argument to " 

2552 "'ordered_map_unstage' Op, not %r." % dtypes) 

2553 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2554 if capacity is None: 

2555 capacity = 0 

2556 capacity = _execute.make_int(capacity, "capacity") 

2557 if memory_limit is None: 

2558 memory_limit = 0 

2559 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2560 if container is None: 

2561 container = "" 

2562 container = _execute.make_str(container, "container") 

2563 if shared_name is None: 

2564 shared_name = "" 

2565 shared_name = _execute.make_str(shared_name, "shared_name") 

2566 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2567 "OrderedMapUnstage", key=key, indices=indices, dtypes=dtypes, 

2568 capacity=capacity, memory_limit=memory_limit, 

2569 container=container, shared_name=shared_name, 

2570 name=name) 

2571 _result = _outputs[:] 

2572 if not _result: 

2573 return _op 

2574 if _execute.must_record_gradient(): 

2575 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

2576 _op._get_attr_int("memory_limit"), "dtypes", 

2577 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

2578 "shared_name", _op.get_attr("shared_name")) 

2579 _inputs_flat = _op.inputs 

2580 _execute.record_gradient( 

2581 "OrderedMapUnstage", _inputs_flat, _attrs, _result) 

2582 return _result 

2583 

2584OrderedMapUnstage = tf_export("raw_ops.OrderedMapUnstage")(_ops.to_raw_op(ordered_map_unstage)) 

2585 

2586 

2587def ordered_map_unstage_eager_fallback(key, indices, dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

2588 if not isinstance(dtypes, (list, tuple)): 

2589 raise TypeError( 

2590 "Expected list for 'dtypes' argument to " 

2591 "'ordered_map_unstage' Op, not %r." % dtypes) 

2592 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2593 if capacity is None: 

2594 capacity = 0 

2595 capacity = _execute.make_int(capacity, "capacity") 

2596 if memory_limit is None: 

2597 memory_limit = 0 

2598 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2599 if container is None: 

2600 container = "" 

2601 container = _execute.make_str(container, "container") 

2602 if shared_name is None: 

2603 shared_name = "" 

2604 shared_name = _execute.make_str(shared_name, "shared_name") 

2605 key = _ops.convert_to_tensor(key, _dtypes.int64) 

2606 indices = _ops.convert_to_tensor(indices, _dtypes.int32) 

2607 _inputs_flat = [key, indices] 

2608 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

2609 dtypes, "container", container, "shared_name", shared_name) 

2610 _result = _execute.execute(b"OrderedMapUnstage", len(dtypes), 

2611 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2612 name=name) 

2613 if _execute.must_record_gradient(): 

2614 _execute.record_gradient( 

2615 "OrderedMapUnstage", _inputs_flat, _attrs, _result) 

2616 return _result 

2617 

2618_OrderedMapUnstageNoKeyOutput = collections.namedtuple( 

2619 "OrderedMapUnstageNoKey", 

2620 ["key", "values"]) 

2621 

2622 

2623def ordered_map_unstage_no_key(indices, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

2624 r"""Op removes and returns the (key, value) element with the smallest 

2625 

2626 key from the underlying container. If the underlying container 

2627 does not contain elements, the op will block until it does. 

2628 

2629 Args: 

2630 indices: A `Tensor` of type `int32`. 

2631 dtypes: A list of `tf.DTypes` that has length `>= 1`. 

2632 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

2633 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

2634 container: An optional `string`. Defaults to `""`. 

2635 shared_name: An optional `string`. Defaults to `""`. 

2636 name: A name for the operation (optional). 

2637 

2638 Returns: 

2639 A tuple of `Tensor` objects (key, values). 

2640 

2641 key: A `Tensor` of type `int64`. 

2642 values: A list of `Tensor` objects of type `dtypes`. 

2643 """ 

2644 _ctx = _context._context or _context.context() 

2645 tld = _ctx._thread_local_data 

2646 if tld.is_eager: 

2647 try: 

2648 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2649 _ctx, "OrderedMapUnstageNoKey", name, indices, "capacity", capacity, 

2650 "memory_limit", memory_limit, "dtypes", dtypes, "container", 

2651 container, "shared_name", shared_name) 

2652 _result = _OrderedMapUnstageNoKeyOutput._make(_result) 

2653 return _result 

2654 except _core._NotOkStatusException as e: 

2655 _ops.raise_from_not_ok_status(e, name) 

2656 except _core._FallbackException: 

2657 pass 

2658 try: 

2659 return ordered_map_unstage_no_key_eager_fallback( 

2660 indices, capacity=capacity, memory_limit=memory_limit, 

2661 dtypes=dtypes, container=container, shared_name=shared_name, 

2662 name=name, ctx=_ctx) 

2663 except _core._SymbolicException: 

2664 pass # Add nodes to the TensorFlow graph. 

2665 # Add nodes to the TensorFlow graph. 

2666 if not isinstance(dtypes, (list, tuple)): 

2667 raise TypeError( 

2668 "Expected list for 'dtypes' argument to " 

2669 "'ordered_map_unstage_no_key' Op, not %r." % dtypes) 

2670 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2671 if capacity is None: 

2672 capacity = 0 

2673 capacity = _execute.make_int(capacity, "capacity") 

2674 if memory_limit is None: 

2675 memory_limit = 0 

2676 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2677 if container is None: 

2678 container = "" 

2679 container = _execute.make_str(container, "container") 

2680 if shared_name is None: 

2681 shared_name = "" 

2682 shared_name = _execute.make_str(shared_name, "shared_name") 

2683 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2684 "OrderedMapUnstageNoKey", indices=indices, dtypes=dtypes, 

2685 capacity=capacity, 

2686 memory_limit=memory_limit, 

2687 container=container, 

2688 shared_name=shared_name, name=name) 

2689 _result = _outputs[:] 

2690 if _execute.must_record_gradient(): 

2691 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

2692 _op._get_attr_int("memory_limit"), "dtypes", 

2693 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

2694 "shared_name", _op.get_attr("shared_name")) 

2695 _inputs_flat = _op.inputs 

2696 _execute.record_gradient( 

2697 "OrderedMapUnstageNoKey", _inputs_flat, _attrs, _result) 

2698 _result = _result[:1] + [_result[1:]] 

2699 _result = _OrderedMapUnstageNoKeyOutput._make(_result) 

2700 return _result 

2701 

2702OrderedMapUnstageNoKey = tf_export("raw_ops.OrderedMapUnstageNoKey")(_ops.to_raw_op(ordered_map_unstage_no_key)) 

2703 

2704 

2705def ordered_map_unstage_no_key_eager_fallback(indices, dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

2706 if not isinstance(dtypes, (list, tuple)): 

2707 raise TypeError( 

2708 "Expected list for 'dtypes' argument to " 

2709 "'ordered_map_unstage_no_key' Op, not %r." % dtypes) 

2710 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

2711 if capacity is None: 

2712 capacity = 0 

2713 capacity = _execute.make_int(capacity, "capacity") 

2714 if memory_limit is None: 

2715 memory_limit = 0 

2716 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

2717 if container is None: 

2718 container = "" 

2719 container = _execute.make_str(container, "container") 

2720 if shared_name is None: 

2721 shared_name = "" 

2722 shared_name = _execute.make_str(shared_name, "shared_name") 

2723 indices = _ops.convert_to_tensor(indices, _dtypes.int32) 

2724 _inputs_flat = [indices] 

2725 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

2726 dtypes, "container", container, "shared_name", shared_name) 

2727 _result = _execute.execute(b"OrderedMapUnstageNoKey", len(dtypes) + 1, 

2728 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2729 name=name) 

2730 if _execute.must_record_gradient(): 

2731 _execute.record_gradient( 

2732 "OrderedMapUnstageNoKey", _inputs_flat, _attrs, _result) 

2733 _result = _result[:1] + [_result[1:]] 

2734 _result = _OrderedMapUnstageNoKeyOutput._make(_result) 

2735 return _result 

2736 

2737 

2738def padding_fifo_queue(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None): 

2739 r"""A queue that produces elements in first-in first-out order. 

2740 

2741 Variable-size shapes are allowed by setting the corresponding shape dimensions 

2742 to 0 in the shape attr. In this case DequeueMany will pad up to the maximum 

2743 size of any given element in the minibatch. See below for details. 

2744 

2745 Args: 

2746 component_types: A list of `tf.DTypes` that has length `>= 1`. 

2747 The type of each component in a value. 

2748 shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. 

2749 The shape of each component in a value. The length of this attr must 

2750 be either 0 or the same as the length of component_types. 

2751 Shapes of fixed rank but variable size are allowed by setting 

2752 any shape dimension to -1. In this case, the inputs' shape may vary along 

2753 the given dimension, and DequeueMany will pad the given dimension with 

2754 zeros up to the maximum shape of all elements in the given batch. 

2755 If the length of this attr is 0, different queue elements may have 

2756 different ranks and shapes, but only one element may be dequeued at a time. 

2757 capacity: An optional `int`. Defaults to `-1`. 

2758 The upper bound on the number of elements in this queue. 

2759 Negative numbers mean no limit. 

2760 container: An optional `string`. Defaults to `""`. 

2761 If non-empty, this queue is placed in the given container. 

2762 Otherwise, a default container is used. 

2763 shared_name: An optional `string`. Defaults to `""`. 

2764 If non-empty, this queue will be shared under the given name 

2765 across multiple sessions. 

2766 name: A name for the operation (optional). 

2767 

2768 Returns: 

2769 A `Tensor` of type mutable `string`. 

2770 """ 

2771 _ctx = _context._context or _context.context() 

2772 tld = _ctx._thread_local_data 

2773 if tld.is_eager: 

2774 raise RuntimeError("padding_fifo_queue op does not support eager execution. Arg 'handle' is a ref.") 

2775 # Add nodes to the TensorFlow graph. 

2776 if not isinstance(component_types, (list, tuple)): 

2777 raise TypeError( 

2778 "Expected list for 'component_types' argument to " 

2779 "'padding_fifo_queue' Op, not %r." % component_types) 

2780 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

2781 if shapes is None: 

2782 shapes = [] 

2783 if not isinstance(shapes, (list, tuple)): 

2784 raise TypeError( 

2785 "Expected list for 'shapes' argument to " 

2786 "'padding_fifo_queue' Op, not %r." % shapes) 

2787 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

2788 if capacity is None: 

2789 capacity = -1 

2790 capacity = _execute.make_int(capacity, "capacity") 

2791 if container is None: 

2792 container = "" 

2793 container = _execute.make_str(container, "container") 

2794 if shared_name is None: 

2795 shared_name = "" 

2796 shared_name = _execute.make_str(shared_name, "shared_name") 

2797 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2798 "PaddingFIFOQueue", component_types=component_types, shapes=shapes, 

2799 capacity=capacity, container=container, 

2800 shared_name=shared_name, name=name) 

2801 _result = _outputs[:] 

2802 if _execute.must_record_gradient(): 

2803 _attrs = ("component_types", _op.get_attr("component_types"), "shapes", 

2804 _op.get_attr("shapes"), "capacity", 

2805 _op._get_attr_int("capacity"), "container", 

2806 _op.get_attr("container"), "shared_name", 

2807 _op.get_attr("shared_name")) 

2808 _inputs_flat = _op.inputs 

2809 _execute.record_gradient( 

2810 "PaddingFIFOQueue", _inputs_flat, _attrs, _result) 

2811 _result, = _result 

2812 return _result 

2813 

2814PaddingFIFOQueue = tf_export("raw_ops.PaddingFIFOQueue")(_ops.to_raw_op(padding_fifo_queue)) 

2815 

2816 

2817def padding_fifo_queue_eager_fallback(component_types, shapes, capacity, container, shared_name, name, ctx): 

2818 raise RuntimeError("padding_fifo_queue op does not support eager execution. Arg 'handle' is a ref.") 

2819 

2820def padding_fifo_queue_v2(component_types, shapes=[], capacity=-1, container="", shared_name="", name=None): 

2821 r"""A queue that produces elements in first-in first-out order. 

2822 

2823 Variable-size shapes are allowed by setting the corresponding shape dimensions 

2824 to 0 in the shape attr. In this case DequeueMany will pad up to the maximum 

2825 size of any given element in the minibatch. See below for details. 

2826 

2827 Args: 

2828 component_types: A list of `tf.DTypes` that has length `>= 1`. 

2829 The type of each component in a value. 

2830 shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. 

2831 The shape of each component in a value. The length of this attr must 

2832 be either 0 or the same as the length of component_types. 

2833 Shapes of fixed rank but variable size are allowed by setting 

2834 any shape dimension to -1. In this case, the inputs' shape may vary along 

2835 the given dimension, and DequeueMany will pad the given dimension with 

2836 zeros up to the maximum shape of all elements in the given batch. 

2837 If the length of this attr is 0, different queue elements may have 

2838 different ranks and shapes, but only one element may be dequeued at a time. 

2839 capacity: An optional `int`. Defaults to `-1`. 

2840 The upper bound on the number of elements in this queue. 

2841 Negative numbers mean no limit. 

2842 container: An optional `string`. Defaults to `""`. 

2843 If non-empty, this queue is placed in the given container. 

2844 Otherwise, a default container is used. 

2845 shared_name: An optional `string`. Defaults to `""`. 

2846 If non-empty, this queue will be shared under the given name 

2847 across multiple sessions. 

2848 name: A name for the operation (optional). 

2849 

2850 Returns: 

2851 A `Tensor` of type `resource`. 

2852 """ 

2853 _ctx = _context._context or _context.context() 

2854 tld = _ctx._thread_local_data 

2855 if tld.is_eager: 

2856 try: 

2857 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2858 _ctx, "PaddingFIFOQueueV2", name, "component_types", component_types, 

2859 "shapes", shapes, "capacity", capacity, "container", container, 

2860 "shared_name", shared_name) 

2861 return _result 

2862 except _core._NotOkStatusException as e: 

2863 _ops.raise_from_not_ok_status(e, name) 

2864 except _core._FallbackException: 

2865 pass 

2866 try: 

2867 return padding_fifo_queue_v2_eager_fallback( 

2868 component_types=component_types, shapes=shapes, capacity=capacity, 

2869 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

2870 except _core._SymbolicException: 

2871 pass # Add nodes to the TensorFlow graph. 

2872 # Add nodes to the TensorFlow graph. 

2873 if not isinstance(component_types, (list, tuple)): 

2874 raise TypeError( 

2875 "Expected list for 'component_types' argument to " 

2876 "'padding_fifo_queue_v2' Op, not %r." % component_types) 

2877 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

2878 if shapes is None: 

2879 shapes = [] 

2880 if not isinstance(shapes, (list, tuple)): 

2881 raise TypeError( 

2882 "Expected list for 'shapes' argument to " 

2883 "'padding_fifo_queue_v2' Op, not %r." % shapes) 

2884 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

2885 if capacity is None: 

2886 capacity = -1 

2887 capacity = _execute.make_int(capacity, "capacity") 

2888 if container is None: 

2889 container = "" 

2890 container = _execute.make_str(container, "container") 

2891 if shared_name is None: 

2892 shared_name = "" 

2893 shared_name = _execute.make_str(shared_name, "shared_name") 

2894 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2895 "PaddingFIFOQueueV2", component_types=component_types, shapes=shapes, 

2896 capacity=capacity, container=container, 

2897 shared_name=shared_name, name=name) 

2898 _result = _outputs[:] 

2899 if _execute.must_record_gradient(): 

2900 _attrs = ("component_types", _op.get_attr("component_types"), "shapes", 

2901 _op.get_attr("shapes"), "capacity", 

2902 _op._get_attr_int("capacity"), "container", 

2903 _op.get_attr("container"), "shared_name", 

2904 _op.get_attr("shared_name")) 

2905 _inputs_flat = _op.inputs 

2906 _execute.record_gradient( 

2907 "PaddingFIFOQueueV2", _inputs_flat, _attrs, _result) 

2908 _result, = _result 

2909 return _result 

2910 

2911PaddingFIFOQueueV2 = tf_export("raw_ops.PaddingFIFOQueueV2")(_ops.to_raw_op(padding_fifo_queue_v2)) 

2912 

2913 

2914def padding_fifo_queue_v2_eager_fallback(component_types, shapes, capacity, container, shared_name, name, ctx): 

2915 if not isinstance(component_types, (list, tuple)): 

2916 raise TypeError( 

2917 "Expected list for 'component_types' argument to " 

2918 "'padding_fifo_queue_v2' Op, not %r." % component_types) 

2919 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

2920 if shapes is None: 

2921 shapes = [] 

2922 if not isinstance(shapes, (list, tuple)): 

2923 raise TypeError( 

2924 "Expected list for 'shapes' argument to " 

2925 "'padding_fifo_queue_v2' Op, not %r." % shapes) 

2926 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

2927 if capacity is None: 

2928 capacity = -1 

2929 capacity = _execute.make_int(capacity, "capacity") 

2930 if container is None: 

2931 container = "" 

2932 container = _execute.make_str(container, "container") 

2933 if shared_name is None: 

2934 shared_name = "" 

2935 shared_name = _execute.make_str(shared_name, "shared_name") 

2936 _inputs_flat = [] 

2937 _attrs = ("component_types", component_types, "shapes", shapes, "capacity", 

2938 capacity, "container", container, "shared_name", shared_name) 

2939 _result = _execute.execute(b"PaddingFIFOQueueV2", 1, inputs=_inputs_flat, 

2940 attrs=_attrs, ctx=ctx, name=name) 

2941 if _execute.must_record_gradient(): 

2942 _execute.record_gradient( 

2943 "PaddingFIFOQueueV2", _inputs_flat, _attrs, _result) 

2944 _result, = _result 

2945 return _result 

2946 

2947 

2948def parallel_dynamic_stitch(indices, data, name=None): 

2949 r"""Interleave the values from the `data` tensors into a single tensor. 

2950 

2951 Builds a merged tensor such that 

2952 

2953 ```python 

2954 merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] 

2955 ``` 

2956 

2957 For example, if each `indices[m]` is scalar or vector, we have 

2958 

2959 ```python 

2960 # Scalar indices: 

2961 merged[indices[m], ...] = data[m][...] 

2962 

2963 # Vector indices: 

2964 merged[indices[m][i], ...] = data[m][i, ...] 

2965 ``` 

2966 

2967 Each `data[i].shape` must start with the corresponding `indices[i].shape`, 

2968 and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we 

2969 must have `data[i].shape = indices[i].shape + constant`. In terms of this 

2970 `constant`, the output shape is 

2971 

2972 merged.shape = [max(indices)] + constant 

2973 

2974 Values may be merged in parallel, so if an index appears in both `indices[m][i]` 

2975 and `indices[n][j]`, the result may be invalid. This differs from the normal 

2976 DynamicStitch operator that defines the behavior in that case. 

2977 

2978 For example: 

2979 

2980 ```python 

2981 indices[0] = 6 

2982 indices[1] = [4, 1] 

2983 indices[2] = [[5, 2], [0, 3]] 

2984 data[0] = [61, 62] 

2985 data[1] = [[41, 42], [11, 12]] 

2986 data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] 

2987 merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], 

2988 [51, 52], [61, 62]] 

2989 ``` 

2990 

2991 This method can be used to merge partitions created by `dynamic_partition` 

2992 as illustrated on the following example: 

2993 

2994 ```python 

2995 # Apply function (increments x_i) on elements for which a certain condition 

2996 # apply (x_i != -1 in this example). 

2997 x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) 

2998 condition_mask=tf.not_equal(x,tf.constant(-1.)) 

2999 partitioned_data = tf.dynamic_partition( 

3000 x, tf.cast(condition_mask, tf.int32) , 2) 

3001 partitioned_data[1] = partitioned_data[1] + 1.0 

3002 condition_indices = tf.dynamic_partition( 

3003 tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) 

3004 x = tf.dynamic_stitch(condition_indices, partitioned_data) 

3005 # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain 

3006 # unchanged. 

3007 ``` 

3008 

3009 <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> 

3010 <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt> 

3011 </div> 

3012 

3013 Args: 

3014 indices: A list of at least 1 `Tensor` objects with type `int32`. 

3015 data: A list with the same length as `indices` of `Tensor` objects with the same type. 

3016 name: A name for the operation (optional). 

3017 

3018 Returns: 

3019 A `Tensor`. Has the same type as `data`. 

3020 """ 

3021 _ctx = _context._context or _context.context() 

3022 tld = _ctx._thread_local_data 

3023 if tld.is_eager: 

3024 try: 

3025 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3026 _ctx, "ParallelDynamicStitch", name, indices, data) 

3027 return _result 

3028 except _core._NotOkStatusException as e: 

3029 _ops.raise_from_not_ok_status(e, name) 

3030 except _core._FallbackException: 

3031 pass 

3032 try: 

3033 return parallel_dynamic_stitch_eager_fallback( 

3034 indices, data, name=name, ctx=_ctx) 

3035 except _core._SymbolicException: 

3036 pass # Add nodes to the TensorFlow graph. 

3037 # Add nodes to the TensorFlow graph. 

3038 if not isinstance(indices, (list, tuple)): 

3039 raise TypeError( 

3040 "Expected list for 'indices' argument to " 

3041 "'parallel_dynamic_stitch' Op, not %r." % indices) 

3042 _attr_N = len(indices) 

3043 if not isinstance(data, (list, tuple)): 

3044 raise TypeError( 

3045 "Expected list for 'data' argument to " 

3046 "'parallel_dynamic_stitch' Op, not %r." % data) 

3047 if len(data) != _attr_N: 

3048 raise ValueError( 

3049 "List argument 'data' to 'parallel_dynamic_stitch' Op with length %d " 

3050 "must match length %d of argument 'indices'." % 

3051 (len(data), _attr_N)) 

3052 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3053 "ParallelDynamicStitch", indices=indices, data=data, name=name) 

3054 _result = _outputs[:] 

3055 if _execute.must_record_gradient(): 

3056 _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T")) 

3057 _inputs_flat = _op.inputs 

3058 _execute.record_gradient( 

3059 "ParallelDynamicStitch", _inputs_flat, _attrs, _result) 

3060 _result, = _result 

3061 return _result 

3062 

3063ParallelDynamicStitch = tf_export("raw_ops.ParallelDynamicStitch")(_ops.to_raw_op(parallel_dynamic_stitch)) 

3064 

3065 

3066def parallel_dynamic_stitch_eager_fallback(indices, data, name, ctx): 

3067 if not isinstance(indices, (list, tuple)): 

3068 raise TypeError( 

3069 "Expected list for 'indices' argument to " 

3070 "'parallel_dynamic_stitch' Op, not %r." % indices) 

3071 _attr_N = len(indices) 

3072 if not isinstance(data, (list, tuple)): 

3073 raise TypeError( 

3074 "Expected list for 'data' argument to " 

3075 "'parallel_dynamic_stitch' Op, not %r." % data) 

3076 if len(data) != _attr_N: 

3077 raise ValueError( 

3078 "List argument 'data' to 'parallel_dynamic_stitch' Op with length %d " 

3079 "must match length %d of argument 'indices'." % 

3080 (len(data), _attr_N)) 

3081 _attr_T, data = _execute.args_to_matching_eager(list(data), ctx, []) 

3082 indices = _ops.convert_n_to_tensor(indices, _dtypes.int32) 

3083 _inputs_flat = list(indices) + list(data) 

3084 _attrs = ("N", _attr_N, "T", _attr_T) 

3085 _result = _execute.execute(b"ParallelDynamicStitch", 1, inputs=_inputs_flat, 

3086 attrs=_attrs, ctx=ctx, name=name) 

3087 if _execute.must_record_gradient(): 

3088 _execute.record_gradient( 

3089 "ParallelDynamicStitch", _inputs_flat, _attrs, _result) 

3090 _result, = _result 

3091 return _result 

3092 

3093 

3094def priority_queue(shapes, component_types=[], capacity=-1, container="", shared_name="", name=None): 

3095 r"""A queue that produces elements sorted by the first component value. 

3096 

3097 Note that the PriorityQueue requires the first component of any element 

3098 to be a scalar int64, in addition to the other elements declared by 

3099 component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue 

3100 and DequeueMany) on a PriorityQueue will all require (resp. output) one extra 

3101 entry in their input (resp. output) lists. 

3102 

3103 Args: 

3104 shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). 

3105 The shape of each component in a value. The length of this attr must 

3106 be either 0 or the same as the length of component_types. If the length of 

3107 this attr is 0, the shapes of queue elements are not constrained, and 

3108 only one element may be dequeued at a time. 

3109 component_types: An optional list of `tf.DTypes`. Defaults to `[]`. 

3110 The type of each component in a value. 

3111 capacity: An optional `int`. Defaults to `-1`. 

3112 The upper bound on the number of elements in this queue. 

3113 Negative numbers mean no limit. 

3114 container: An optional `string`. Defaults to `""`. 

3115 If non-empty, this queue is placed in the given container. 

3116 Otherwise, a default container is used. 

3117 shared_name: An optional `string`. Defaults to `""`. 

3118 If non-empty, this queue will be shared under the given name 

3119 across multiple sessions. 

3120 name: A name for the operation (optional). 

3121 

3122 Returns: 

3123 A `Tensor` of type mutable `string`. 

3124 """ 

3125 _ctx = _context._context or _context.context() 

3126 tld = _ctx._thread_local_data 

3127 if tld.is_eager: 

3128 raise RuntimeError("priority_queue op does not support eager execution. Arg 'handle' is a ref.") 

3129 # Add nodes to the TensorFlow graph. 

3130 if not isinstance(shapes, (list, tuple)): 

3131 raise TypeError( 

3132 "Expected list for 'shapes' argument to " 

3133 "'priority_queue' Op, not %r." % shapes) 

3134 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

3135 if component_types is None: 

3136 component_types = [] 

3137 if not isinstance(component_types, (list, tuple)): 

3138 raise TypeError( 

3139 "Expected list for 'component_types' argument to " 

3140 "'priority_queue' Op, not %r." % component_types) 

3141 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

3142 if capacity is None: 

3143 capacity = -1 

3144 capacity = _execute.make_int(capacity, "capacity") 

3145 if container is None: 

3146 container = "" 

3147 container = _execute.make_str(container, "container") 

3148 if shared_name is None: 

3149 shared_name = "" 

3150 shared_name = _execute.make_str(shared_name, "shared_name") 

3151 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3152 "PriorityQueue", shapes=shapes, component_types=component_types, 

3153 capacity=capacity, container=container, 

3154 shared_name=shared_name, name=name) 

3155 _result = _outputs[:] 

3156 if _execute.must_record_gradient(): 

3157 _attrs = ("component_types", _op.get_attr("component_types"), "shapes", 

3158 _op.get_attr("shapes"), "capacity", 

3159 _op._get_attr_int("capacity"), "container", 

3160 _op.get_attr("container"), "shared_name", 

3161 _op.get_attr("shared_name")) 

3162 _inputs_flat = _op.inputs 

3163 _execute.record_gradient( 

3164 "PriorityQueue", _inputs_flat, _attrs, _result) 

3165 _result, = _result 

3166 return _result 

3167 

3168PriorityQueue = tf_export("raw_ops.PriorityQueue")(_ops.to_raw_op(priority_queue)) 

3169 

3170 

3171def priority_queue_eager_fallback(shapes, component_types, capacity, container, shared_name, name, ctx): 

3172 raise RuntimeError("priority_queue op does not support eager execution. Arg 'handle' is a ref.") 

3173 

3174def priority_queue_v2(shapes, component_types=[], capacity=-1, container="", shared_name="", name=None): 

3175 r"""A queue that produces elements sorted by the first component value. 

3176 

3177 Note that the PriorityQueue requires the first component of any element 

3178 to be a scalar int64, in addition to the other elements declared by 

3179 component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue 

3180 and DequeueMany) on a PriorityQueue will all require (resp. output) one extra 

3181 entry in their input (resp. output) lists. 

3182 

3183 Args: 

3184 shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). 

3185 The shape of each component in a value. The length of this attr must 

3186 be either 0 or the same as the length of component_types. If the length of 

3187 this attr is 0, the shapes of queue elements are not constrained, and 

3188 only one element may be dequeued at a time. 

3189 component_types: An optional list of `tf.DTypes`. Defaults to `[]`. 

3190 The type of each component in a value. 

3191 capacity: An optional `int`. Defaults to `-1`. 

3192 The upper bound on the number of elements in this queue. 

3193 Negative numbers mean no limit. 

3194 container: An optional `string`. Defaults to `""`. 

3195 If non-empty, this queue is placed in the given container. 

3196 Otherwise, a default container is used. 

3197 shared_name: An optional `string`. Defaults to `""`. 

3198 If non-empty, this queue will be shared under the given name 

3199 across multiple sessions. 

3200 name: A name for the operation (optional). 

3201 

3202 Returns: 

3203 A `Tensor` of type `resource`. 

3204 """ 

3205 _ctx = _context._context or _context.context() 

3206 tld = _ctx._thread_local_data 

3207 if tld.is_eager: 

3208 try: 

3209 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3210 _ctx, "PriorityQueueV2", name, "component_types", component_types, 

3211 "shapes", shapes, "capacity", capacity, "container", container, 

3212 "shared_name", shared_name) 

3213 return _result 

3214 except _core._NotOkStatusException as e: 

3215 _ops.raise_from_not_ok_status(e, name) 

3216 except _core._FallbackException: 

3217 pass 

3218 try: 

3219 return priority_queue_v2_eager_fallback( 

3220 component_types=component_types, shapes=shapes, capacity=capacity, 

3221 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

3222 except _core._SymbolicException: 

3223 pass # Add nodes to the TensorFlow graph. 

3224 # Add nodes to the TensorFlow graph. 

3225 if not isinstance(shapes, (list, tuple)): 

3226 raise TypeError( 

3227 "Expected list for 'shapes' argument to " 

3228 "'priority_queue_v2' Op, not %r." % shapes) 

3229 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

3230 if component_types is None: 

3231 component_types = [] 

3232 if not isinstance(component_types, (list, tuple)): 

3233 raise TypeError( 

3234 "Expected list for 'component_types' argument to " 

3235 "'priority_queue_v2' Op, not %r." % component_types) 

3236 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

3237 if capacity is None: 

3238 capacity = -1 

3239 capacity = _execute.make_int(capacity, "capacity") 

3240 if container is None: 

3241 container = "" 

3242 container = _execute.make_str(container, "container") 

3243 if shared_name is None: 

3244 shared_name = "" 

3245 shared_name = _execute.make_str(shared_name, "shared_name") 

3246 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3247 "PriorityQueueV2", shapes=shapes, component_types=component_types, 

3248 capacity=capacity, container=container, 

3249 shared_name=shared_name, name=name) 

3250 _result = _outputs[:] 

3251 if _execute.must_record_gradient(): 

3252 _attrs = ("component_types", _op.get_attr("component_types"), "shapes", 

3253 _op.get_attr("shapes"), "capacity", 

3254 _op._get_attr_int("capacity"), "container", 

3255 _op.get_attr("container"), "shared_name", 

3256 _op.get_attr("shared_name")) 

3257 _inputs_flat = _op.inputs 

3258 _execute.record_gradient( 

3259 "PriorityQueueV2", _inputs_flat, _attrs, _result) 

3260 _result, = _result 

3261 return _result 

3262 

3263PriorityQueueV2 = tf_export("raw_ops.PriorityQueueV2")(_ops.to_raw_op(priority_queue_v2)) 

3264 

3265 

3266def priority_queue_v2_eager_fallback(shapes, component_types, capacity, container, shared_name, name, ctx): 

3267 if not isinstance(shapes, (list, tuple)): 

3268 raise TypeError( 

3269 "Expected list for 'shapes' argument to " 

3270 "'priority_queue_v2' Op, not %r." % shapes) 

3271 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

3272 if component_types is None: 

3273 component_types = [] 

3274 if not isinstance(component_types, (list, tuple)): 

3275 raise TypeError( 

3276 "Expected list for 'component_types' argument to " 

3277 "'priority_queue_v2' Op, not %r." % component_types) 

3278 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

3279 if capacity is None: 

3280 capacity = -1 

3281 capacity = _execute.make_int(capacity, "capacity") 

3282 if container is None: 

3283 container = "" 

3284 container = _execute.make_str(container, "container") 

3285 if shared_name is None: 

3286 shared_name = "" 

3287 shared_name = _execute.make_str(shared_name, "shared_name") 

3288 _inputs_flat = [] 

3289 _attrs = ("component_types", component_types, "shapes", shapes, "capacity", 

3290 capacity, "container", container, "shared_name", shared_name) 

3291 _result = _execute.execute(b"PriorityQueueV2", 1, inputs=_inputs_flat, 

3292 attrs=_attrs, ctx=ctx, name=name) 

3293 if _execute.must_record_gradient(): 

3294 _execute.record_gradient( 

3295 "PriorityQueueV2", _inputs_flat, _attrs, _result) 

3296 _result, = _result 

3297 return _result 

3298 

3299 

3300def queue_close(handle, cancel_pending_enqueues=False, name=None): 

3301 r"""Closes the given queue. 

3302 

3303 This operation signals that no more elements will be enqueued in the 

3304 given queue. Subsequent Enqueue(Many) operations will fail. 

3305 Subsequent Dequeue(Many) operations will continue to succeed if 

3306 sufficient elements remain in the queue. Subsequent Dequeue(Many) 

3307 operations that would block will fail immediately. 

3308 

3309 Args: 

3310 handle: A `Tensor` of type mutable `string`. The handle to a queue. 

3311 cancel_pending_enqueues: An optional `bool`. Defaults to `False`. 

3312 If true, all pending enqueue requests that are 

3313 blocked on the given queue will be canceled. 

3314 name: A name for the operation (optional). 

3315 

3316 Returns: 

3317 The created Operation. 

3318 """ 

3319 _ctx = _context._context or _context.context() 

3320 tld = _ctx._thread_local_data 

3321 if tld.is_eager: 

3322 raise RuntimeError("queue_close op does not support eager execution. Arg 'handle' is a ref.") 

3323 # Add nodes to the TensorFlow graph. 

3324 if cancel_pending_enqueues is None: 

3325 cancel_pending_enqueues = False 

3326 cancel_pending_enqueues = _execute.make_bool(cancel_pending_enqueues, "cancel_pending_enqueues") 

3327 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3328 "QueueClose", handle=handle, 

3329 cancel_pending_enqueues=cancel_pending_enqueues, 

3330 name=name) 

3331 return _op 

3332QueueClose = tf_export("raw_ops.QueueClose")(_ops.to_raw_op(queue_close)) 

3333 

3334 

3335def queue_close_eager_fallback(handle, cancel_pending_enqueues, name, ctx): 

3336 raise RuntimeError("queue_close op does not support eager execution. Arg 'handle' is a ref.") 

3337 

3338def queue_close_v2(handle, cancel_pending_enqueues=False, name=None): 

3339 r"""Closes the given queue. 

3340 

3341 This operation signals that no more elements will be enqueued in the 

3342 given queue. Subsequent Enqueue(Many) operations will fail. 

3343 Subsequent Dequeue(Many) operations will continue to succeed if 

3344 sufficient elements remain in the queue. Subsequent Dequeue(Many) 

3345 operations that would block will fail immediately. 

3346 

3347 Args: 

3348 handle: A `Tensor` of type `resource`. The handle to a queue. 

3349 cancel_pending_enqueues: An optional `bool`. Defaults to `False`. 

3350 If true, all pending enqueue requests that are 

3351 blocked on the given queue will be canceled. 

3352 name: A name for the operation (optional). 

3353 

3354 Returns: 

3355 The created Operation. 

3356 """ 

3357 _ctx = _context._context or _context.context() 

3358 tld = _ctx._thread_local_data 

3359 if tld.is_eager: 

3360 try: 

3361 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3362 _ctx, "QueueCloseV2", name, handle, "cancel_pending_enqueues", 

3363 cancel_pending_enqueues) 

3364 return _result 

3365 except _core._NotOkStatusException as e: 

3366 _ops.raise_from_not_ok_status(e, name) 

3367 except _core._FallbackException: 

3368 pass 

3369 try: 

3370 return queue_close_v2_eager_fallback( 

3371 handle, cancel_pending_enqueues=cancel_pending_enqueues, name=name, 

3372 ctx=_ctx) 

3373 except _core._SymbolicException: 

3374 pass # Add nodes to the TensorFlow graph. 

3375 # Add nodes to the TensorFlow graph. 

3376 if cancel_pending_enqueues is None: 

3377 cancel_pending_enqueues = False 

3378 cancel_pending_enqueues = _execute.make_bool(cancel_pending_enqueues, "cancel_pending_enqueues") 

3379 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3380 "QueueCloseV2", handle=handle, 

3381 cancel_pending_enqueues=cancel_pending_enqueues, 

3382 name=name) 

3383 return _op 

3384QueueCloseV2 = tf_export("raw_ops.QueueCloseV2")(_ops.to_raw_op(queue_close_v2)) 

3385 

3386 

3387def queue_close_v2_eager_fallback(handle, cancel_pending_enqueues, name, ctx): 

3388 if cancel_pending_enqueues is None: 

3389 cancel_pending_enqueues = False 

3390 cancel_pending_enqueues = _execute.make_bool(cancel_pending_enqueues, "cancel_pending_enqueues") 

3391 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

3392 _inputs_flat = [handle] 

3393 _attrs = ("cancel_pending_enqueues", cancel_pending_enqueues) 

3394 _result = _execute.execute(b"QueueCloseV2", 0, inputs=_inputs_flat, 

3395 attrs=_attrs, ctx=ctx, name=name) 

3396 _result = None 

3397 return _result 

3398 

3399 

3400def queue_dequeue(handle, component_types, timeout_ms=-1, name=None): 

3401 r"""Dequeues a tuple of one or more tensors from the given queue. 

3402 

3403 This operation has k outputs, where k is the number of components 

3404 in the tuples stored in the given queue, and output i is the ith 

3405 component of the dequeued tuple. 

3406 

3407 N.B. If the queue is empty, this operation will block until an element 

3408 has been dequeued (or 'timeout_ms' elapses, if specified). 

3409 

3410 Args: 

3411 handle: A `Tensor` of type mutable `string`. The handle to a queue. 

3412 component_types: A list of `tf.DTypes` that has length `>= 1`. 

3413 The type of each component in a tuple. 

3414 timeout_ms: An optional `int`. Defaults to `-1`. 

3415 If the queue is empty, this operation will block for up to 

3416 timeout_ms milliseconds. 

3417 Note: This option is not supported yet. 

3418 name: A name for the operation (optional). 

3419 

3420 Returns: 

3421 A list of `Tensor` objects of type `component_types`. 

3422 """ 

3423 _ctx = _context._context or _context.context() 

3424 tld = _ctx._thread_local_data 

3425 if tld.is_eager: 

3426 raise RuntimeError("queue_dequeue op does not support eager execution. Arg 'handle' is a ref.") 

3427 # Add nodes to the TensorFlow graph. 

3428 if not isinstance(component_types, (list, tuple)): 

3429 raise TypeError( 

3430 "Expected list for 'component_types' argument to " 

3431 "'queue_dequeue' Op, not %r." % component_types) 

3432 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

3433 if timeout_ms is None: 

3434 timeout_ms = -1 

3435 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

3436 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3437 "QueueDequeue", handle=handle, component_types=component_types, 

3438 timeout_ms=timeout_ms, name=name) 

3439 _result = _outputs[:] 

3440 if _execute.must_record_gradient(): 

3441 _attrs = ("component_types", _op.get_attr("component_types"), 

3442 "timeout_ms", _op._get_attr_int("timeout_ms")) 

3443 _inputs_flat = _op.inputs 

3444 _execute.record_gradient( 

3445 "QueueDequeue", _inputs_flat, _attrs, _result) 

3446 return _result 

3447 

3448QueueDequeue = tf_export("raw_ops.QueueDequeue")(_ops.to_raw_op(queue_dequeue)) 

3449 

3450 

3451def queue_dequeue_eager_fallback(handle, component_types, timeout_ms, name, ctx): 

3452 raise RuntimeError("queue_dequeue op does not support eager execution. Arg 'handle' is a ref.") 

3453 

3454def queue_dequeue_many(handle, n, component_types, timeout_ms=-1, name=None): 

3455 r"""Dequeues `n` tuples of one or more tensors from the given queue. 

3456 

3457 If the queue is closed and there are fewer than `n` elements, then an 

3458 OutOfRange error is returned. 

3459 

3460 This operation concatenates queue-element component tensors along the 

3461 0th dimension to make a single component tensor. All of the components 

3462 in the dequeued tuple will have size `n` in the 0th dimension. 

3463 

3464 This operation has `k` outputs, where `k` is the number of components in 

3465 the tuples stored in the given queue, and output `i` is the ith 

3466 component of the dequeued tuple. 

3467 

3468 N.B. If the queue is empty, this operation will block until `n` elements 

3469 have been dequeued (or 'timeout_ms' elapses, if specified). 

3470 

3471 Args: 

3472 handle: A `Tensor` of type mutable `string`. The handle to a queue. 

3473 n: A `Tensor` of type `int32`. The number of tuples to dequeue. 

3474 component_types: A list of `tf.DTypes` that has length `>= 1`. 

3475 The type of each component in a tuple. 

3476 timeout_ms: An optional `int`. Defaults to `-1`. 

3477 If the queue has fewer than n elements, this operation 

3478 will block for up to timeout_ms milliseconds. 

3479 Note: This option is not supported yet. 

3480 name: A name for the operation (optional). 

3481 

3482 Returns: 

3483 A list of `Tensor` objects of type `component_types`. 

3484 """ 

3485 _ctx = _context._context or _context.context() 

3486 tld = _ctx._thread_local_data 

3487 if tld.is_eager: 

3488 raise RuntimeError("queue_dequeue_many op does not support eager execution. Arg 'handle' is a ref.") 

3489 # Add nodes to the TensorFlow graph. 

3490 if not isinstance(component_types, (list, tuple)): 

3491 raise TypeError( 

3492 "Expected list for 'component_types' argument to " 

3493 "'queue_dequeue_many' Op, not %r." % component_types) 

3494 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

3495 if timeout_ms is None: 

3496 timeout_ms = -1 

3497 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

3498 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3499 "QueueDequeueMany", handle=handle, n=n, 

3500 component_types=component_types, 

3501 timeout_ms=timeout_ms, name=name) 

3502 _result = _outputs[:] 

3503 if _execute.must_record_gradient(): 

3504 _attrs = ("component_types", _op.get_attr("component_types"), 

3505 "timeout_ms", _op._get_attr_int("timeout_ms")) 

3506 _inputs_flat = _op.inputs 

3507 _execute.record_gradient( 

3508 "QueueDequeueMany", _inputs_flat, _attrs, _result) 

3509 return _result 

3510 

3511QueueDequeueMany = tf_export("raw_ops.QueueDequeueMany")(_ops.to_raw_op(queue_dequeue_many)) 

3512 

3513 

3514def queue_dequeue_many_eager_fallback(handle, n, component_types, timeout_ms, name, ctx): 

3515 raise RuntimeError("queue_dequeue_many op does not support eager execution. Arg 'handle' is a ref.") 

3516 

3517def queue_dequeue_many_v2(handle, n, component_types, timeout_ms=-1, name=None): 

3518 r"""Dequeues `n` tuples of one or more tensors from the given queue. 

3519 

3520 If the queue is closed and there are fewer than `n` elements, then an 

3521 OutOfRange error is returned. 

3522 

3523 This operation concatenates queue-element component tensors along the 

3524 0th dimension to make a single component tensor. All of the components 

3525 in the dequeued tuple will have size `n` in the 0th dimension. 

3526 

3527 This operation has `k` outputs, where `k` is the number of components in 

3528 the tuples stored in the given queue, and output `i` is the ith 

3529 component of the dequeued tuple. 

3530 

3531 N.B. If the queue is empty, this operation will block until `n` elements 

3532 have been dequeued (or 'timeout_ms' elapses, if specified). 

3533 

3534 Args: 

3535 handle: A `Tensor` of type `resource`. The handle to a queue. 

3536 n: A `Tensor` of type `int32`. The number of tuples to dequeue. 

3537 component_types: A list of `tf.DTypes` that has length `>= 1`. 

3538 The type of each component in a tuple. 

3539 timeout_ms: An optional `int`. Defaults to `-1`. 

3540 If the queue has fewer than n elements, this operation 

3541 will block for up to timeout_ms milliseconds. 

3542 Note: This option is not supported yet. 

3543 name: A name for the operation (optional). 

3544 

3545 Returns: 

3546 A list of `Tensor` objects of type `component_types`. 

3547 """ 

3548 _ctx = _context._context or _context.context() 

3549 tld = _ctx._thread_local_data 

3550 if tld.is_eager: 

3551 try: 

3552 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3553 _ctx, "QueueDequeueManyV2", name, handle, n, "component_types", 

3554 component_types, "timeout_ms", timeout_ms) 

3555 return _result 

3556 except _core._NotOkStatusException as e: 

3557 _ops.raise_from_not_ok_status(e, name) 

3558 except _core._FallbackException: 

3559 pass 

3560 try: 

3561 return queue_dequeue_many_v2_eager_fallback( 

3562 handle, n, component_types=component_types, timeout_ms=timeout_ms, 

3563 name=name, ctx=_ctx) 

3564 except _core._SymbolicException: 

3565 pass # Add nodes to the TensorFlow graph. 

3566 # Add nodes to the TensorFlow graph. 

3567 if not isinstance(component_types, (list, tuple)): 

3568 raise TypeError( 

3569 "Expected list for 'component_types' argument to " 

3570 "'queue_dequeue_many_v2' Op, not %r." % component_types) 

3571 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

3572 if timeout_ms is None: 

3573 timeout_ms = -1 

3574 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

3575 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3576 "QueueDequeueManyV2", handle=handle, n=n, 

3577 component_types=component_types, 

3578 timeout_ms=timeout_ms, name=name) 

3579 _result = _outputs[:] 

3580 if not _result: 

3581 return _op 

3582 if _execute.must_record_gradient(): 

3583 _attrs = ("component_types", _op.get_attr("component_types"), 

3584 "timeout_ms", _op._get_attr_int("timeout_ms")) 

3585 _inputs_flat = _op.inputs 

3586 _execute.record_gradient( 

3587 "QueueDequeueManyV2", _inputs_flat, _attrs, _result) 

3588 return _result 

3589 

3590QueueDequeueManyV2 = tf_export("raw_ops.QueueDequeueManyV2")(_ops.to_raw_op(queue_dequeue_many_v2)) 

3591 

3592 

3593def queue_dequeue_many_v2_eager_fallback(handle, n, component_types, timeout_ms, name, ctx): 

3594 if not isinstance(component_types, (list, tuple)): 

3595 raise TypeError( 

3596 "Expected list for 'component_types' argument to " 

3597 "'queue_dequeue_many_v2' Op, not %r." % component_types) 

3598 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

3599 if timeout_ms is None: 

3600 timeout_ms = -1 

3601 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

3602 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

3603 n = _ops.convert_to_tensor(n, _dtypes.int32) 

3604 _inputs_flat = [handle, n] 

3605 _attrs = ("component_types", component_types, "timeout_ms", timeout_ms) 

3606 _result = _execute.execute(b"QueueDequeueManyV2", len(component_types), 

3607 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3608 name=name) 

3609 if _execute.must_record_gradient(): 

3610 _execute.record_gradient( 

3611 "QueueDequeueManyV2", _inputs_flat, _attrs, _result) 

3612 return _result 

3613 

3614 

3615def queue_dequeue_up_to(handle, n, component_types, timeout_ms=-1, name=None): 

3616 r"""Dequeues `n` tuples of one or more tensors from the given queue. 

3617 

3618 This operation is not supported by all queues. If a queue does not support 

3619 DequeueUpTo, then an Unimplemented error is returned. 

3620 

3621 If the queue is closed and there are more than 0 but less than `n` 

3622 elements remaining, then instead of returning an OutOfRange error like 

3623 QueueDequeueMany, less than `n` elements are returned immediately. If 

3624 the queue is closed and there are 0 elements left in the queue, then 

3625 an OutOfRange error is returned just like in QueueDequeueMany. 

3626 Otherwise the behavior is identical to QueueDequeueMany: 

3627 

3628 This operation concatenates queue-element component tensors along the 

3629 0th dimension to make a single component tensor. All of the components 

3630 in the dequeued tuple will have size `n` in the 0th dimension. 

3631 

3632 This operation has k outputs, where `k` is the number of components in 

3633 the tuples stored in the given queue, and output `i` is the ith 

3634 component of the dequeued tuple. 

3635 

3636 Args: 

3637 handle: A `Tensor` of type mutable `string`. The handle to a queue. 

3638 n: A `Tensor` of type `int32`. The number of tuples to dequeue. 

3639 component_types: A list of `tf.DTypes` that has length `>= 1`. 

3640 The type of each component in a tuple. 

3641 timeout_ms: An optional `int`. Defaults to `-1`. 

3642 If the queue has fewer than n elements, this operation 

3643 will block for up to timeout_ms milliseconds. 

3644 Note: This option is not supported yet. 

3645 name: A name for the operation (optional). 

3646 

3647 Returns: 

3648 A list of `Tensor` objects of type `component_types`. 

3649 """ 

3650 _ctx = _context._context or _context.context() 

3651 tld = _ctx._thread_local_data 

3652 if tld.is_eager: 

3653 raise RuntimeError("queue_dequeue_up_to op does not support eager execution. Arg 'handle' is a ref.") 

3654 # Add nodes to the TensorFlow graph. 

3655 if not isinstance(component_types, (list, tuple)): 

3656 raise TypeError( 

3657 "Expected list for 'component_types' argument to " 

3658 "'queue_dequeue_up_to' Op, not %r." % component_types) 

3659 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

3660 if timeout_ms is None: 

3661 timeout_ms = -1 

3662 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

3663 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3664 "QueueDequeueUpTo", handle=handle, n=n, 

3665 component_types=component_types, 

3666 timeout_ms=timeout_ms, name=name) 

3667 _result = _outputs[:] 

3668 if _execute.must_record_gradient(): 

3669 _attrs = ("component_types", _op.get_attr("component_types"), 

3670 "timeout_ms", _op._get_attr_int("timeout_ms")) 

3671 _inputs_flat = _op.inputs 

3672 _execute.record_gradient( 

3673 "QueueDequeueUpTo", _inputs_flat, _attrs, _result) 

3674 return _result 

3675 

3676QueueDequeueUpTo = tf_export("raw_ops.QueueDequeueUpTo")(_ops.to_raw_op(queue_dequeue_up_to)) 

3677 

3678 

3679def queue_dequeue_up_to_eager_fallback(handle, n, component_types, timeout_ms, name, ctx): 

3680 raise RuntimeError("queue_dequeue_up_to op does not support eager execution. Arg 'handle' is a ref.") 

3681 

3682def queue_dequeue_up_to_v2(handle, n, component_types, timeout_ms=-1, name=None): 

3683 r"""Dequeues `n` tuples of one or more tensors from the given queue. 

3684 

3685 This operation is not supported by all queues. If a queue does not support 

3686 DequeueUpTo, then an Unimplemented error is returned. 

3687 

3688 If the queue is closed and there are more than 0 but less than `n` 

3689 elements remaining, then instead of returning an OutOfRange error like 

3690 QueueDequeueMany, less than `n` elements are returned immediately. If 

3691 the queue is closed and there are 0 elements left in the queue, then 

3692 an OutOfRange error is returned just like in QueueDequeueMany. 

3693 Otherwise the behavior is identical to QueueDequeueMany: 

3694 

3695 This operation concatenates queue-element component tensors along the 

3696 0th dimension to make a single component tensor. All of the components 

3697 in the dequeued tuple will have size n in the 0th dimension. 

3698 

3699 This operation has `k` outputs, where `k` is the number of components in 

3700 the tuples stored in the given queue, and output `i` is the ith 

3701 component of the dequeued tuple. 

3702 

3703 Args: 

3704 handle: A `Tensor` of type `resource`. The handle to a queue. 

3705 n: A `Tensor` of type `int32`. The number of tuples to dequeue. 

3706 component_types: A list of `tf.DTypes` that has length `>= 1`. 

3707 The type of each component in a tuple. 

3708 timeout_ms: An optional `int`. Defaults to `-1`. 

3709 If the queue has fewer than n elements, this operation 

3710 will block for up to timeout_ms milliseconds. 

3711 Note: This option is not supported yet. 

3712 name: A name for the operation (optional). 

3713 

3714 Returns: 

3715 A list of `Tensor` objects of type `component_types`. 

3716 """ 

3717 _ctx = _context._context or _context.context() 

3718 tld = _ctx._thread_local_data 

3719 if tld.is_eager: 

3720 try: 

3721 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3722 _ctx, "QueueDequeueUpToV2", name, handle, n, "component_types", 

3723 component_types, "timeout_ms", timeout_ms) 

3724 return _result 

3725 except _core._NotOkStatusException as e: 

3726 _ops.raise_from_not_ok_status(e, name) 

3727 except _core._FallbackException: 

3728 pass 

3729 try: 

3730 return queue_dequeue_up_to_v2_eager_fallback( 

3731 handle, n, component_types=component_types, timeout_ms=timeout_ms, 

3732 name=name, ctx=_ctx) 

3733 except _core._SymbolicException: 

3734 pass # Add nodes to the TensorFlow graph. 

3735 # Add nodes to the TensorFlow graph. 

3736 if not isinstance(component_types, (list, tuple)): 

3737 raise TypeError( 

3738 "Expected list for 'component_types' argument to " 

3739 "'queue_dequeue_up_to_v2' Op, not %r." % component_types) 

3740 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

3741 if timeout_ms is None: 

3742 timeout_ms = -1 

3743 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

3744 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3745 "QueueDequeueUpToV2", handle=handle, n=n, 

3746 component_types=component_types, 

3747 timeout_ms=timeout_ms, name=name) 

3748 _result = _outputs[:] 

3749 if not _result: 

3750 return _op 

3751 if _execute.must_record_gradient(): 

3752 _attrs = ("component_types", _op.get_attr("component_types"), 

3753 "timeout_ms", _op._get_attr_int("timeout_ms")) 

3754 _inputs_flat = _op.inputs 

3755 _execute.record_gradient( 

3756 "QueueDequeueUpToV2", _inputs_flat, _attrs, _result) 

3757 return _result 

3758 

3759QueueDequeueUpToV2 = tf_export("raw_ops.QueueDequeueUpToV2")(_ops.to_raw_op(queue_dequeue_up_to_v2)) 

3760 

3761 

3762def queue_dequeue_up_to_v2_eager_fallback(handle, n, component_types, timeout_ms, name, ctx): 

3763 if not isinstance(component_types, (list, tuple)): 

3764 raise TypeError( 

3765 "Expected list for 'component_types' argument to " 

3766 "'queue_dequeue_up_to_v2' Op, not %r." % component_types) 

3767 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

3768 if timeout_ms is None: 

3769 timeout_ms = -1 

3770 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

3771 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

3772 n = _ops.convert_to_tensor(n, _dtypes.int32) 

3773 _inputs_flat = [handle, n] 

3774 _attrs = ("component_types", component_types, "timeout_ms", timeout_ms) 

3775 _result = _execute.execute(b"QueueDequeueUpToV2", len(component_types), 

3776 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3777 name=name) 

3778 if _execute.must_record_gradient(): 

3779 _execute.record_gradient( 

3780 "QueueDequeueUpToV2", _inputs_flat, _attrs, _result) 

3781 return _result 

3782 

3783 

3784def queue_dequeue_v2(handle, component_types, timeout_ms=-1, name=None): 

3785 r"""Dequeues a tuple of one or more tensors from the given queue. 

3786 

3787 This operation has k outputs, where k is the number of components 

3788 in the tuples stored in the given queue, and output i is the ith 

3789 component of the dequeued tuple. 

3790 

3791 N.B. If the queue is empty, this operation will block until an element 

3792 has been dequeued (or 'timeout_ms' elapses, if specified). 

3793 

3794 Args: 

3795 handle: A `Tensor` of type `resource`. The handle to a queue. 

3796 component_types: A list of `tf.DTypes` that has length `>= 1`. 

3797 The type of each component in a tuple. 

3798 timeout_ms: An optional `int`. Defaults to `-1`. 

3799 If the queue is empty, this operation will block for up to 

3800 timeout_ms milliseconds. 

3801 Note: This option is not supported yet. 

3802 name: A name for the operation (optional). 

3803 

3804 Returns: 

3805 A list of `Tensor` objects of type `component_types`. 

3806 """ 

3807 _ctx = _context._context or _context.context() 

3808 tld = _ctx._thread_local_data 

3809 if tld.is_eager: 

3810 try: 

3811 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3812 _ctx, "QueueDequeueV2", name, handle, "component_types", 

3813 component_types, "timeout_ms", timeout_ms) 

3814 return _result 

3815 except _core._NotOkStatusException as e: 

3816 _ops.raise_from_not_ok_status(e, name) 

3817 except _core._FallbackException: 

3818 pass 

3819 try: 

3820 return queue_dequeue_v2_eager_fallback( 

3821 handle, component_types=component_types, timeout_ms=timeout_ms, 

3822 name=name, ctx=_ctx) 

3823 except _core._SymbolicException: 

3824 pass # Add nodes to the TensorFlow graph. 

3825 # Add nodes to the TensorFlow graph. 

3826 if not isinstance(component_types, (list, tuple)): 

3827 raise TypeError( 

3828 "Expected list for 'component_types' argument to " 

3829 "'queue_dequeue_v2' Op, not %r." % component_types) 

3830 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

3831 if timeout_ms is None: 

3832 timeout_ms = -1 

3833 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

3834 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3835 "QueueDequeueV2", handle=handle, component_types=component_types, 

3836 timeout_ms=timeout_ms, name=name) 

3837 _result = _outputs[:] 

3838 if not _result: 

3839 return _op 

3840 if _execute.must_record_gradient(): 

3841 _attrs = ("component_types", _op.get_attr("component_types"), 

3842 "timeout_ms", _op._get_attr_int("timeout_ms")) 

3843 _inputs_flat = _op.inputs 

3844 _execute.record_gradient( 

3845 "QueueDequeueV2", _inputs_flat, _attrs, _result) 

3846 return _result 

3847 

3848QueueDequeueV2 = tf_export("raw_ops.QueueDequeueV2")(_ops.to_raw_op(queue_dequeue_v2)) 

3849 

3850 

3851def queue_dequeue_v2_eager_fallback(handle, component_types, timeout_ms, name, ctx): 

3852 if not isinstance(component_types, (list, tuple)): 

3853 raise TypeError( 

3854 "Expected list for 'component_types' argument to " 

3855 "'queue_dequeue_v2' Op, not %r." % component_types) 

3856 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

3857 if timeout_ms is None: 

3858 timeout_ms = -1 

3859 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

3860 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

3861 _inputs_flat = [handle] 

3862 _attrs = ("component_types", component_types, "timeout_ms", timeout_ms) 

3863 _result = _execute.execute(b"QueueDequeueV2", len(component_types), 

3864 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3865 name=name) 

3866 if _execute.must_record_gradient(): 

3867 _execute.record_gradient( 

3868 "QueueDequeueV2", _inputs_flat, _attrs, _result) 

3869 return _result 

3870 

3871 

3872def queue_enqueue(handle, components, timeout_ms=-1, name=None): 

3873 r"""Enqueues a tuple of one or more tensors in the given queue. 

3874 

3875 The components input has k elements, which correspond to the components of 

3876 tuples stored in the given queue. 

3877 

3878 N.B. If the queue is full, this operation will block until the given 

3879 element has been enqueued (or 'timeout_ms' elapses, if specified). 

3880 

3881 Args: 

3882 handle: A `Tensor` of type mutable `string`. The handle to a queue. 

3883 components: A list of `Tensor` objects. 

3884 One or more tensors from which the enqueued tensors should be taken. 

3885 timeout_ms: An optional `int`. Defaults to `-1`. 

3886 If the queue is full, this operation will block for up to 

3887 timeout_ms milliseconds. 

3888 Note: This option is not supported yet. 

3889 name: A name for the operation (optional). 

3890 

3891 Returns: 

3892 The created Operation. 

3893 """ 

3894 _ctx = _context._context or _context.context() 

3895 tld = _ctx._thread_local_data 

3896 if tld.is_eager: 

3897 raise RuntimeError("queue_enqueue op does not support eager execution. Arg 'handle' is a ref.") 

3898 # Add nodes to the TensorFlow graph. 

3899 if timeout_ms is None: 

3900 timeout_ms = -1 

3901 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

3902 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3903 "QueueEnqueue", handle=handle, components=components, 

3904 timeout_ms=timeout_ms, name=name) 

3905 return _op 

3906QueueEnqueue = tf_export("raw_ops.QueueEnqueue")(_ops.to_raw_op(queue_enqueue)) 

3907 

3908 

3909def queue_enqueue_eager_fallback(handle, components, timeout_ms, name, ctx): 

3910 raise RuntimeError("queue_enqueue op does not support eager execution. Arg 'handle' is a ref.") 

3911 

3912def queue_enqueue_many(handle, components, timeout_ms=-1, name=None): 

3913 r"""Enqueues zero or more tuples of one or more tensors in the given queue. 

3914 

3915 This operation slices each component tensor along the 0th dimension to 

3916 make multiple queue elements. All of the tuple components must have the 

3917 same size in the 0th dimension. 

3918 

3919 The components input has k elements, which correspond to the components of 

3920 tuples stored in the given queue. 

3921 

3922 N.B. If the queue is full, this operation will block until the given 

3923 elements have been enqueued (or 'timeout_ms' elapses, if specified). 

3924 

3925 Args: 

3926 handle: A `Tensor` of type mutable `string`. The handle to a queue. 

3927 components: A list of `Tensor` objects. 

3928 One or more tensors from which the enqueued tensors should 

3929 be taken. 

3930 timeout_ms: An optional `int`. Defaults to `-1`. 

3931 If the queue is too full, this operation will block for up 

3932 to timeout_ms milliseconds. 

3933 Note: This option is not supported yet. 

3934 name: A name for the operation (optional). 

3935 

3936 Returns: 

3937 The created Operation. 

3938 """ 

3939 _ctx = _context._context or _context.context() 

3940 tld = _ctx._thread_local_data 

3941 if tld.is_eager: 

3942 raise RuntimeError("queue_enqueue_many op does not support eager execution. Arg 'handle' is a ref.") 

3943 # Add nodes to the TensorFlow graph. 

3944 if timeout_ms is None: 

3945 timeout_ms = -1 

3946 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

3947 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3948 "QueueEnqueueMany", handle=handle, components=components, 

3949 timeout_ms=timeout_ms, name=name) 

3950 return _op 

3951QueueEnqueueMany = tf_export("raw_ops.QueueEnqueueMany")(_ops.to_raw_op(queue_enqueue_many)) 

3952 

3953 

3954def queue_enqueue_many_eager_fallback(handle, components, timeout_ms, name, ctx): 

3955 raise RuntimeError("queue_enqueue_many op does not support eager execution. Arg 'handle' is a ref.") 

3956 

3957def queue_enqueue_many_v2(handle, components, timeout_ms=-1, name=None): 

3958 r"""Enqueues zero or more tuples of one or more tensors in the given queue. 

3959 

3960 This operation slices each component tensor along the 0th dimension to 

3961 make multiple queue elements. All of the tuple components must have the 

3962 same size in the 0th dimension. 

3963 

3964 The components input has k elements, which correspond to the components of 

3965 tuples stored in the given queue. 

3966 

3967 N.B. If the queue is full, this operation will block until the given 

3968 elements have been enqueued (or 'timeout_ms' elapses, if specified). 

3969 

3970 Args: 

3971 handle: A `Tensor` of type `resource`. The handle to a queue. 

3972 components: A list of `Tensor` objects. 

3973 One or more tensors from which the enqueued tensors should 

3974 be taken. 

3975 timeout_ms: An optional `int`. Defaults to `-1`. 

3976 If the queue is too full, this operation will block for up 

3977 to timeout_ms milliseconds. 

3978 Note: This option is not supported yet. 

3979 name: A name for the operation (optional). 

3980 

3981 Returns: 

3982 The created Operation. 

3983 """ 

3984 _ctx = _context._context or _context.context() 

3985 tld = _ctx._thread_local_data 

3986 if tld.is_eager: 

3987 try: 

3988 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3989 _ctx, "QueueEnqueueManyV2", name, handle, components, "timeout_ms", 

3990 timeout_ms) 

3991 return _result 

3992 except _core._NotOkStatusException as e: 

3993 _ops.raise_from_not_ok_status(e, name) 

3994 except _core._FallbackException: 

3995 pass 

3996 try: 

3997 return queue_enqueue_many_v2_eager_fallback( 

3998 handle, components, timeout_ms=timeout_ms, name=name, ctx=_ctx) 

3999 except _core._SymbolicException: 

4000 pass # Add nodes to the TensorFlow graph. 

4001 # Add nodes to the TensorFlow graph. 

4002 if timeout_ms is None: 

4003 timeout_ms = -1 

4004 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

4005 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4006 "QueueEnqueueManyV2", handle=handle, components=components, 

4007 timeout_ms=timeout_ms, name=name) 

4008 return _op 

4009QueueEnqueueManyV2 = tf_export("raw_ops.QueueEnqueueManyV2")(_ops.to_raw_op(queue_enqueue_many_v2)) 

4010 

4011 

4012def queue_enqueue_many_v2_eager_fallback(handle, components, timeout_ms, name, ctx): 

4013 if timeout_ms is None: 

4014 timeout_ms = -1 

4015 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

4016 _attr_Tcomponents, components = _execute.convert_to_mixed_eager_tensors(components, ctx) 

4017 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

4018 _inputs_flat = [handle] + list(components) 

4019 _attrs = ("Tcomponents", _attr_Tcomponents, "timeout_ms", timeout_ms) 

4020 _result = _execute.execute(b"QueueEnqueueManyV2", 0, inputs=_inputs_flat, 

4021 attrs=_attrs, ctx=ctx, name=name) 

4022 _result = None 

4023 return _result 

4024 

4025 

4026def queue_enqueue_v2(handle, components, timeout_ms=-1, name=None): 

4027 r"""Enqueues a tuple of one or more tensors in the given queue. 

4028 

4029 The components input has k elements, which correspond to the components of 

4030 tuples stored in the given queue. 

4031 

4032 N.B. If the queue is full, this operation will block until the given 

4033 element has been enqueued (or 'timeout_ms' elapses, if specified). 

4034 

4035 Args: 

4036 handle: A `Tensor` of type `resource`. The handle to a queue. 

4037 components: A list of `Tensor` objects. 

4038 One or more tensors from which the enqueued tensors should be taken. 

4039 timeout_ms: An optional `int`. Defaults to `-1`. 

4040 If the queue is full, this operation will block for up to 

4041 timeout_ms milliseconds. 

4042 Note: This option is not supported yet. 

4043 name: A name for the operation (optional). 

4044 

4045 Returns: 

4046 The created Operation. 

4047 """ 

4048 _ctx = _context._context or _context.context() 

4049 tld = _ctx._thread_local_data 

4050 if tld.is_eager: 

4051 try: 

4052 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4053 _ctx, "QueueEnqueueV2", name, handle, components, "timeout_ms", 

4054 timeout_ms) 

4055 return _result 

4056 except _core._NotOkStatusException as e: 

4057 _ops.raise_from_not_ok_status(e, name) 

4058 except _core._FallbackException: 

4059 pass 

4060 try: 

4061 return queue_enqueue_v2_eager_fallback( 

4062 handle, components, timeout_ms=timeout_ms, name=name, ctx=_ctx) 

4063 except _core._SymbolicException: 

4064 pass # Add nodes to the TensorFlow graph. 

4065 # Add nodes to the TensorFlow graph. 

4066 if timeout_ms is None: 

4067 timeout_ms = -1 

4068 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

4069 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4070 "QueueEnqueueV2", handle=handle, components=components, 

4071 timeout_ms=timeout_ms, name=name) 

4072 return _op 

4073QueueEnqueueV2 = tf_export("raw_ops.QueueEnqueueV2")(_ops.to_raw_op(queue_enqueue_v2)) 

4074 

4075 

4076def queue_enqueue_v2_eager_fallback(handle, components, timeout_ms, name, ctx): 

4077 if timeout_ms is None: 

4078 timeout_ms = -1 

4079 timeout_ms = _execute.make_int(timeout_ms, "timeout_ms") 

4080 _attr_Tcomponents, components = _execute.convert_to_mixed_eager_tensors(components, ctx) 

4081 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

4082 _inputs_flat = [handle] + list(components) 

4083 _attrs = ("Tcomponents", _attr_Tcomponents, "timeout_ms", timeout_ms) 

4084 _result = _execute.execute(b"QueueEnqueueV2", 0, inputs=_inputs_flat, 

4085 attrs=_attrs, ctx=ctx, name=name) 

4086 _result = None 

4087 return _result 

4088 

4089 

4090def queue_is_closed(handle, name=None): 

4091 r"""Returns true if queue is closed. 

4092 

4093 This operation returns true if the queue is closed and false if the queue 

4094 is open. 

4095 

4096 Args: 

4097 handle: A `Tensor` of type mutable `string`. The handle to a queue. 

4098 name: A name for the operation (optional). 

4099 

4100 Returns: 

4101 A `Tensor` of type `bool`. 

4102 """ 

4103 _ctx = _context._context or _context.context() 

4104 tld = _ctx._thread_local_data 

4105 if tld.is_eager: 

4106 raise RuntimeError("queue_is_closed op does not support eager execution. Arg 'handle' is a ref.") 

4107 # Add nodes to the TensorFlow graph. 

4108 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4109 "QueueIsClosed", handle=handle, name=name) 

4110 _result = _outputs[:] 

4111 if _execute.must_record_gradient(): 

4112 _attrs = () 

4113 _inputs_flat = _op.inputs 

4114 _execute.record_gradient( 

4115 "QueueIsClosed", _inputs_flat, _attrs, _result) 

4116 _result, = _result 

4117 return _result 

4118 

4119QueueIsClosed = tf_export("raw_ops.QueueIsClosed")(_ops.to_raw_op(queue_is_closed)) 

4120 

4121 

4122def queue_is_closed_eager_fallback(handle, name, ctx): 

4123 raise RuntimeError("queue_is_closed op does not support eager execution. Arg 'handle' is a ref.") 

4124 

4125def queue_is_closed_v2(handle, name=None): 

4126 r"""Returns true if queue is closed. 

4127 

4128 This operation returns true if the queue is closed and false if the queue 

4129 is open. 

4130 

4131 Args: 

4132 handle: A `Tensor` of type `resource`. The handle to a queue. 

4133 name: A name for the operation (optional). 

4134 

4135 Returns: 

4136 A `Tensor` of type `bool`. 

4137 """ 

4138 _ctx = _context._context or _context.context() 

4139 tld = _ctx._thread_local_data 

4140 if tld.is_eager: 

4141 try: 

4142 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4143 _ctx, "QueueIsClosedV2", name, handle) 

4144 return _result 

4145 except _core._NotOkStatusException as e: 

4146 _ops.raise_from_not_ok_status(e, name) 

4147 except _core._FallbackException: 

4148 pass 

4149 try: 

4150 return queue_is_closed_v2_eager_fallback( 

4151 handle, name=name, ctx=_ctx) 

4152 except _core._SymbolicException: 

4153 pass # Add nodes to the TensorFlow graph. 

4154 # Add nodes to the TensorFlow graph. 

4155 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4156 "QueueIsClosedV2", handle=handle, name=name) 

4157 _result = _outputs[:] 

4158 if _execute.must_record_gradient(): 

4159 _attrs = () 

4160 _inputs_flat = _op.inputs 

4161 _execute.record_gradient( 

4162 "QueueIsClosedV2", _inputs_flat, _attrs, _result) 

4163 _result, = _result 

4164 return _result 

4165 

4166QueueIsClosedV2 = tf_export("raw_ops.QueueIsClosedV2")(_ops.to_raw_op(queue_is_closed_v2)) 

4167 

4168 

4169def queue_is_closed_v2_eager_fallback(handle, name, ctx): 

4170 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

4171 _inputs_flat = [handle] 

4172 _attrs = None 

4173 _result = _execute.execute(b"QueueIsClosedV2", 1, inputs=_inputs_flat, 

4174 attrs=_attrs, ctx=ctx, name=name) 

4175 if _execute.must_record_gradient(): 

4176 _execute.record_gradient( 

4177 "QueueIsClosedV2", _inputs_flat, _attrs, _result) 

4178 _result, = _result 

4179 return _result 

4180 

4181 

4182def queue_size(handle, name=None): 

4183 r"""Computes the number of elements in the given queue. 

4184 

4185 Args: 

4186 handle: A `Tensor` of type mutable `string`. The handle to a queue. 

4187 name: A name for the operation (optional). 

4188 

4189 Returns: 

4190 A `Tensor` of type `int32`. 

4191 """ 

4192 _ctx = _context._context or _context.context() 

4193 tld = _ctx._thread_local_data 

4194 if tld.is_eager: 

4195 raise RuntimeError("queue_size op does not support eager execution. Arg 'handle' is a ref.") 

4196 # Add nodes to the TensorFlow graph. 

4197 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4198 "QueueSize", handle=handle, name=name) 

4199 _result = _outputs[:] 

4200 if _execute.must_record_gradient(): 

4201 _attrs = () 

4202 _inputs_flat = _op.inputs 

4203 _execute.record_gradient( 

4204 "QueueSize", _inputs_flat, _attrs, _result) 

4205 _result, = _result 

4206 return _result 

4207 

4208QueueSize = tf_export("raw_ops.QueueSize")(_ops.to_raw_op(queue_size)) 

4209 

4210 

4211def queue_size_eager_fallback(handle, name, ctx): 

4212 raise RuntimeError("queue_size op does not support eager execution. Arg 'handle' is a ref.") 

4213 

4214def queue_size_v2(handle, name=None): 

4215 r"""Computes the number of elements in the given queue. 

4216 

4217 Args: 

4218 handle: A `Tensor` of type `resource`. The handle to a queue. 

4219 name: A name for the operation (optional). 

4220 

4221 Returns: 

4222 A `Tensor` of type `int32`. 

4223 """ 

4224 _ctx = _context._context or _context.context() 

4225 tld = _ctx._thread_local_data 

4226 if tld.is_eager: 

4227 try: 

4228 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4229 _ctx, "QueueSizeV2", name, handle) 

4230 return _result 

4231 except _core._NotOkStatusException as e: 

4232 _ops.raise_from_not_ok_status(e, name) 

4233 except _core._FallbackException: 

4234 pass 

4235 try: 

4236 return queue_size_v2_eager_fallback( 

4237 handle, name=name, ctx=_ctx) 

4238 except _core._SymbolicException: 

4239 pass # Add nodes to the TensorFlow graph. 

4240 # Add nodes to the TensorFlow graph. 

4241 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4242 "QueueSizeV2", handle=handle, name=name) 

4243 _result = _outputs[:] 

4244 if _execute.must_record_gradient(): 

4245 _attrs = () 

4246 _inputs_flat = _op.inputs 

4247 _execute.record_gradient( 

4248 "QueueSizeV2", _inputs_flat, _attrs, _result) 

4249 _result, = _result 

4250 return _result 

4251 

4252QueueSizeV2 = tf_export("raw_ops.QueueSizeV2")(_ops.to_raw_op(queue_size_v2)) 

4253 

4254 

4255def queue_size_v2_eager_fallback(handle, name, ctx): 

4256 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

4257 _inputs_flat = [handle] 

4258 _attrs = None 

4259 _result = _execute.execute(b"QueueSizeV2", 1, inputs=_inputs_flat, 

4260 attrs=_attrs, ctx=ctx, name=name) 

4261 if _execute.must_record_gradient(): 

4262 _execute.record_gradient( 

4263 "QueueSizeV2", _inputs_flat, _attrs, _result) 

4264 _result, = _result 

4265 return _result 

4266 

4267 

4268def random_shuffle_queue(component_types, shapes=[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container="", shared_name="", name=None): 

4269 r"""A queue that randomizes the order of elements. 

4270 

4271 Args: 

4272 component_types: A list of `tf.DTypes` that has length `>= 1`. 

4273 The type of each component in a value. 

4274 shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. 

4275 The shape of each component in a value. The length of this attr must 

4276 be either 0 or the same as the length of component_types. If the length of 

4277 this attr is 0, the shapes of queue elements are not constrained, and 

4278 only one element may be dequeued at a time. 

4279 capacity: An optional `int`. Defaults to `-1`. 

4280 The upper bound on the number of elements in this queue. 

4281 Negative numbers mean no limit. 

4282 min_after_dequeue: An optional `int`. Defaults to `0`. 

4283 Dequeue will block unless there would be this 

4284 many elements after the dequeue or the queue is closed. This 

4285 ensures a minimum level of mixing of elements. 

4286 seed: An optional `int`. Defaults to `0`. 

4287 If either seed or seed2 is set to be non-zero, the random number 

4288 generator is seeded by the given seed. Otherwise, a random seed is used. 

4289 seed2: An optional `int`. Defaults to `0`. 

4290 A second seed to avoid seed collision. 

4291 container: An optional `string`. Defaults to `""`. 

4292 If non-empty, this queue is placed in the given container. 

4293 Otherwise, a default container is used. 

4294 shared_name: An optional `string`. Defaults to `""`. 

4295 If non-empty, this queue will be shared under the given name 

4296 across multiple sessions. 

4297 name: A name for the operation (optional). 

4298 

4299 Returns: 

4300 A `Tensor` of type mutable `string`. 

4301 """ 

4302 _ctx = _context._context or _context.context() 

4303 tld = _ctx._thread_local_data 

4304 if tld.is_eager: 

4305 raise RuntimeError("random_shuffle_queue op does not support eager execution. Arg 'handle' is a ref.") 

4306 # Add nodes to the TensorFlow graph. 

4307 if not isinstance(component_types, (list, tuple)): 

4308 raise TypeError( 

4309 "Expected list for 'component_types' argument to " 

4310 "'random_shuffle_queue' Op, not %r." % component_types) 

4311 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

4312 if shapes is None: 

4313 shapes = [] 

4314 if not isinstance(shapes, (list, tuple)): 

4315 raise TypeError( 

4316 "Expected list for 'shapes' argument to " 

4317 "'random_shuffle_queue' Op, not %r." % shapes) 

4318 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

4319 if capacity is None: 

4320 capacity = -1 

4321 capacity = _execute.make_int(capacity, "capacity") 

4322 if min_after_dequeue is None: 

4323 min_after_dequeue = 0 

4324 min_after_dequeue = _execute.make_int(min_after_dequeue, "min_after_dequeue") 

4325 if seed is None: 

4326 seed = 0 

4327 seed = _execute.make_int(seed, "seed") 

4328 if seed2 is None: 

4329 seed2 = 0 

4330 seed2 = _execute.make_int(seed2, "seed2") 

4331 if container is None: 

4332 container = "" 

4333 container = _execute.make_str(container, "container") 

4334 if shared_name is None: 

4335 shared_name = "" 

4336 shared_name = _execute.make_str(shared_name, "shared_name") 

4337 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4338 "RandomShuffleQueue", component_types=component_types, shapes=shapes, 

4339 capacity=capacity, 

4340 min_after_dequeue=min_after_dequeue, seed=seed, 

4341 seed2=seed2, container=container, 

4342 shared_name=shared_name, name=name) 

4343 _result = _outputs[:] 

4344 if _execute.must_record_gradient(): 

4345 _attrs = ("component_types", _op.get_attr("component_types"), "shapes", 

4346 _op.get_attr("shapes"), "capacity", 

4347 _op._get_attr_int("capacity"), "min_after_dequeue", 

4348 _op._get_attr_int("min_after_dequeue"), "seed", 

4349 _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2"), 

4350 "container", _op.get_attr("container"), "shared_name", 

4351 _op.get_attr("shared_name")) 

4352 _inputs_flat = _op.inputs 

4353 _execute.record_gradient( 

4354 "RandomShuffleQueue", _inputs_flat, _attrs, _result) 

4355 _result, = _result 

4356 return _result 

4357 

4358RandomShuffleQueue = tf_export("raw_ops.RandomShuffleQueue")(_ops.to_raw_op(random_shuffle_queue)) 

4359 

4360 

4361def random_shuffle_queue_eager_fallback(component_types, shapes, capacity, min_after_dequeue, seed, seed2, container, shared_name, name, ctx): 

4362 raise RuntimeError("random_shuffle_queue op does not support eager execution. Arg 'handle' is a ref.") 

4363 

4364def random_shuffle_queue_v2(component_types, shapes=[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container="", shared_name="", name=None): 

4365 r"""A queue that randomizes the order of elements. 

4366 

4367 Args: 

4368 component_types: A list of `tf.DTypes` that has length `>= 1`. 

4369 The type of each component in a value. 

4370 shapes: An optional list of shapes (each a `tf.TensorShape` or list of `ints`). Defaults to `[]`. 

4371 The shape of each component in a value. The length of this attr must 

4372 be either 0 or the same as the length of component_types. If the length of 

4373 this attr is 0, the shapes of queue elements are not constrained, and 

4374 only one element may be dequeued at a time. 

4375 capacity: An optional `int`. Defaults to `-1`. 

4376 The upper bound on the number of elements in this queue. 

4377 Negative numbers mean no limit. 

4378 min_after_dequeue: An optional `int`. Defaults to `0`. 

4379 Dequeue will block unless there would be this 

4380 many elements after the dequeue or the queue is closed. This 

4381 ensures a minimum level of mixing of elements. 

4382 seed: An optional `int`. Defaults to `0`. 

4383 If either seed or seed2 is set to be non-zero, the random number 

4384 generator is seeded by the given seed. Otherwise, a random seed is used. 

4385 seed2: An optional `int`. Defaults to `0`. 

4386 A second seed to avoid seed collision. 

4387 container: An optional `string`. Defaults to `""`. 

4388 If non-empty, this queue is placed in the given container. 

4389 Otherwise, a default container is used. 

4390 shared_name: An optional `string`. Defaults to `""`. 

4391 If non-empty, this queue will be shared under the given name 

4392 across multiple sessions. 

4393 name: A name for the operation (optional). 

4394 

4395 Returns: 

4396 A `Tensor` of type `resource`. 

4397 """ 

4398 _ctx = _context._context or _context.context() 

4399 tld = _ctx._thread_local_data 

4400 if tld.is_eager: 

4401 try: 

4402 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4403 _ctx, "RandomShuffleQueueV2", name, "component_types", 

4404 component_types, "shapes", shapes, "capacity", capacity, 

4405 "min_after_dequeue", min_after_dequeue, "seed", seed, "seed2", seed2, 

4406 "container", container, "shared_name", shared_name) 

4407 return _result 

4408 except _core._NotOkStatusException as e: 

4409 _ops.raise_from_not_ok_status(e, name) 

4410 except _core._FallbackException: 

4411 pass 

4412 try: 

4413 return random_shuffle_queue_v2_eager_fallback( 

4414 component_types=component_types, shapes=shapes, capacity=capacity, 

4415 min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, 

4416 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

4417 except _core._SymbolicException: 

4418 pass # Add nodes to the TensorFlow graph. 

4419 # Add nodes to the TensorFlow graph. 

4420 if not isinstance(component_types, (list, tuple)): 

4421 raise TypeError( 

4422 "Expected list for 'component_types' argument to " 

4423 "'random_shuffle_queue_v2' Op, not %r." % component_types) 

4424 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

4425 if shapes is None: 

4426 shapes = [] 

4427 if not isinstance(shapes, (list, tuple)): 

4428 raise TypeError( 

4429 "Expected list for 'shapes' argument to " 

4430 "'random_shuffle_queue_v2' Op, not %r." % shapes) 

4431 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

4432 if capacity is None: 

4433 capacity = -1 

4434 capacity = _execute.make_int(capacity, "capacity") 

4435 if min_after_dequeue is None: 

4436 min_after_dequeue = 0 

4437 min_after_dequeue = _execute.make_int(min_after_dequeue, "min_after_dequeue") 

4438 if seed is None: 

4439 seed = 0 

4440 seed = _execute.make_int(seed, "seed") 

4441 if seed2 is None: 

4442 seed2 = 0 

4443 seed2 = _execute.make_int(seed2, "seed2") 

4444 if container is None: 

4445 container = "" 

4446 container = _execute.make_str(container, "container") 

4447 if shared_name is None: 

4448 shared_name = "" 

4449 shared_name = _execute.make_str(shared_name, "shared_name") 

4450 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4451 "RandomShuffleQueueV2", component_types=component_types, 

4452 shapes=shapes, capacity=capacity, 

4453 min_after_dequeue=min_after_dequeue, 

4454 seed=seed, seed2=seed2, container=container, 

4455 shared_name=shared_name, name=name) 

4456 _result = _outputs[:] 

4457 if _execute.must_record_gradient(): 

4458 _attrs = ("component_types", _op.get_attr("component_types"), "shapes", 

4459 _op.get_attr("shapes"), "capacity", 

4460 _op._get_attr_int("capacity"), "min_after_dequeue", 

4461 _op._get_attr_int("min_after_dequeue"), "seed", 

4462 _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2"), 

4463 "container", _op.get_attr("container"), "shared_name", 

4464 _op.get_attr("shared_name")) 

4465 _inputs_flat = _op.inputs 

4466 _execute.record_gradient( 

4467 "RandomShuffleQueueV2", _inputs_flat, _attrs, _result) 

4468 _result, = _result 

4469 return _result 

4470 

4471RandomShuffleQueueV2 = tf_export("raw_ops.RandomShuffleQueueV2")(_ops.to_raw_op(random_shuffle_queue_v2)) 

4472 

4473 

4474def random_shuffle_queue_v2_eager_fallback(component_types, shapes, capacity, min_after_dequeue, seed, seed2, container, shared_name, name, ctx): 

4475 if not isinstance(component_types, (list, tuple)): 

4476 raise TypeError( 

4477 "Expected list for 'component_types' argument to " 

4478 "'random_shuffle_queue_v2' Op, not %r." % component_types) 

4479 component_types = [_execute.make_type(_t, "component_types") for _t in component_types] 

4480 if shapes is None: 

4481 shapes = [] 

4482 if not isinstance(shapes, (list, tuple)): 

4483 raise TypeError( 

4484 "Expected list for 'shapes' argument to " 

4485 "'random_shuffle_queue_v2' Op, not %r." % shapes) 

4486 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

4487 if capacity is None: 

4488 capacity = -1 

4489 capacity = _execute.make_int(capacity, "capacity") 

4490 if min_after_dequeue is None: 

4491 min_after_dequeue = 0 

4492 min_after_dequeue = _execute.make_int(min_after_dequeue, "min_after_dequeue") 

4493 if seed is None: 

4494 seed = 0 

4495 seed = _execute.make_int(seed, "seed") 

4496 if seed2 is None: 

4497 seed2 = 0 

4498 seed2 = _execute.make_int(seed2, "seed2") 

4499 if container is None: 

4500 container = "" 

4501 container = _execute.make_str(container, "container") 

4502 if shared_name is None: 

4503 shared_name = "" 

4504 shared_name = _execute.make_str(shared_name, "shared_name") 

4505 _inputs_flat = [] 

4506 _attrs = ("component_types", component_types, "shapes", shapes, "capacity", 

4507 capacity, "min_after_dequeue", min_after_dequeue, "seed", seed, "seed2", 

4508 seed2, "container", container, "shared_name", shared_name) 

4509 _result = _execute.execute(b"RandomShuffleQueueV2", 1, inputs=_inputs_flat, 

4510 attrs=_attrs, ctx=ctx, name=name) 

4511 if _execute.must_record_gradient(): 

4512 _execute.record_gradient( 

4513 "RandomShuffleQueueV2", _inputs_flat, _attrs, _result) 

4514 _result, = _result 

4515 return _result 

4516 

4517 

4518def record_input(file_pattern, file_random_seed=301, file_shuffle_shift_ratio=0, file_buffer_size=10000, file_parallelism=16, batch_size=32, compression_type="", name=None): 

4519 r"""Emits randomized records. 

4520 

4521 Args: 

4522 file_pattern: A `string`. Glob pattern for the data files. 

4523 file_random_seed: An optional `int`. Defaults to `301`. 

4524 Random seeds used to produce randomized records. 

4525 file_shuffle_shift_ratio: An optional `float`. Defaults to `0`. 

4526 Shifts the list of files after the list is randomly 

4527 shuffled. 

4528 file_buffer_size: An optional `int`. Defaults to `10000`. 

4529 The randomization shuffling buffer. 

4530 file_parallelism: An optional `int`. Defaults to `16`. 

4531 How many sstables are opened and concurrently iterated over. 

4532 batch_size: An optional `int`. Defaults to `32`. The batch size. 

4533 compression_type: An optional `string`. Defaults to `""`. 

4534 The type of compression for the file. Currently ZLIB and 

4535 GZIP are supported. Defaults to none. 

4536 name: A name for the operation (optional). 

4537 

4538 Returns: 

4539 A `Tensor` of type `string`. 

4540 """ 

4541 _ctx = _context._context or _context.context() 

4542 tld = _ctx._thread_local_data 

4543 if tld.is_eager: 

4544 try: 

4545 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4546 _ctx, "RecordInput", name, "file_pattern", file_pattern, 

4547 "file_random_seed", file_random_seed, "file_shuffle_shift_ratio", 

4548 file_shuffle_shift_ratio, "file_buffer_size", file_buffer_size, 

4549 "file_parallelism", file_parallelism, "batch_size", batch_size, 

4550 "compression_type", compression_type) 

4551 return _result 

4552 except _core._NotOkStatusException as e: 

4553 _ops.raise_from_not_ok_status(e, name) 

4554 except _core._FallbackException: 

4555 pass 

4556 try: 

4557 return record_input_eager_fallback( 

4558 file_pattern=file_pattern, file_random_seed=file_random_seed, 

4559 file_shuffle_shift_ratio=file_shuffle_shift_ratio, 

4560 file_buffer_size=file_buffer_size, 

4561 file_parallelism=file_parallelism, batch_size=batch_size, 

4562 compression_type=compression_type, name=name, ctx=_ctx) 

4563 except _core._SymbolicException: 

4564 pass # Add nodes to the TensorFlow graph. 

4565 # Add nodes to the TensorFlow graph. 

4566 file_pattern = _execute.make_str(file_pattern, "file_pattern") 

4567 if file_random_seed is None: 

4568 file_random_seed = 301 

4569 file_random_seed = _execute.make_int(file_random_seed, "file_random_seed") 

4570 if file_shuffle_shift_ratio is None: 

4571 file_shuffle_shift_ratio = 0 

4572 file_shuffle_shift_ratio = _execute.make_float(file_shuffle_shift_ratio, "file_shuffle_shift_ratio") 

4573 if file_buffer_size is None: 

4574 file_buffer_size = 10000 

4575 file_buffer_size = _execute.make_int(file_buffer_size, "file_buffer_size") 

4576 if file_parallelism is None: 

4577 file_parallelism = 16 

4578 file_parallelism = _execute.make_int(file_parallelism, "file_parallelism") 

4579 if batch_size is None: 

4580 batch_size = 32 

4581 batch_size = _execute.make_int(batch_size, "batch_size") 

4582 if compression_type is None: 

4583 compression_type = "" 

4584 compression_type = _execute.make_str(compression_type, "compression_type") 

4585 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4586 "RecordInput", file_pattern=file_pattern, 

4587 file_random_seed=file_random_seed, 

4588 file_shuffle_shift_ratio=file_shuffle_shift_ratio, 

4589 file_buffer_size=file_buffer_size, 

4590 file_parallelism=file_parallelism, 

4591 batch_size=batch_size, 

4592 compression_type=compression_type, name=name) 

4593 _result = _outputs[:] 

4594 if _execute.must_record_gradient(): 

4595 _attrs = ("file_pattern", _op.get_attr("file_pattern"), 

4596 "file_random_seed", _op._get_attr_int("file_random_seed"), 

4597 "file_shuffle_shift_ratio", 

4598 _op.get_attr("file_shuffle_shift_ratio"), "file_buffer_size", 

4599 _op._get_attr_int("file_buffer_size"), "file_parallelism", 

4600 _op._get_attr_int("file_parallelism"), "batch_size", 

4601 _op._get_attr_int("batch_size"), "compression_type", 

4602 _op.get_attr("compression_type")) 

4603 _inputs_flat = _op.inputs 

4604 _execute.record_gradient( 

4605 "RecordInput", _inputs_flat, _attrs, _result) 

4606 _result, = _result 

4607 return _result 

4608 

4609RecordInput = tf_export("raw_ops.RecordInput")(_ops.to_raw_op(record_input)) 

4610 

4611 

4612def record_input_eager_fallback(file_pattern, file_random_seed, file_shuffle_shift_ratio, file_buffer_size, file_parallelism, batch_size, compression_type, name, ctx): 

4613 file_pattern = _execute.make_str(file_pattern, "file_pattern") 

4614 if file_random_seed is None: 

4615 file_random_seed = 301 

4616 file_random_seed = _execute.make_int(file_random_seed, "file_random_seed") 

4617 if file_shuffle_shift_ratio is None: 

4618 file_shuffle_shift_ratio = 0 

4619 file_shuffle_shift_ratio = _execute.make_float(file_shuffle_shift_ratio, "file_shuffle_shift_ratio") 

4620 if file_buffer_size is None: 

4621 file_buffer_size = 10000 

4622 file_buffer_size = _execute.make_int(file_buffer_size, "file_buffer_size") 

4623 if file_parallelism is None: 

4624 file_parallelism = 16 

4625 file_parallelism = _execute.make_int(file_parallelism, "file_parallelism") 

4626 if batch_size is None: 

4627 batch_size = 32 

4628 batch_size = _execute.make_int(batch_size, "batch_size") 

4629 if compression_type is None: 

4630 compression_type = "" 

4631 compression_type = _execute.make_str(compression_type, "compression_type") 

4632 _inputs_flat = [] 

4633 _attrs = ("file_pattern", file_pattern, "file_random_seed", 

4634 file_random_seed, "file_shuffle_shift_ratio", file_shuffle_shift_ratio, 

4635 "file_buffer_size", file_buffer_size, "file_parallelism", file_parallelism, 

4636 "batch_size", batch_size, "compression_type", compression_type) 

4637 _result = _execute.execute(b"RecordInput", 1, inputs=_inputs_flat, 

4638 attrs=_attrs, ctx=ctx, name=name) 

4639 if _execute.must_record_gradient(): 

4640 _execute.record_gradient( 

4641 "RecordInput", _inputs_flat, _attrs, _result) 

4642 _result, = _result 

4643 return _result 

4644 

4645 

4646def resource_accumulator_apply_gradient(handle, local_step, gradient, name=None): 

4647 r"""Applies a gradient to a given accumulator. 

4648 

4649 Does not add if local_step is lesser than the accumulator's global_step. 

4650 

4651 Args: 

4652 handle: A `Tensor` of type `resource`. The handle to a accumulator. 

4653 local_step: A `Tensor` of type `int64`. 

4654 The local_step value at which the gradient was computed. 

4655 gradient: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

4656 A tensor of the gradient to be accumulated. 

4657 name: A name for the operation (optional). 

4658 

4659 Returns: 

4660 The created Operation. 

4661 """ 

4662 _ctx = _context._context or _context.context() 

4663 tld = _ctx._thread_local_data 

4664 if tld.is_eager: 

4665 try: 

4666 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4667 _ctx, "ResourceAccumulatorApplyGradient", name, handle, local_step, 

4668 gradient) 

4669 return _result 

4670 except _core._NotOkStatusException as e: 

4671 _ops.raise_from_not_ok_status(e, name) 

4672 except _core._FallbackException: 

4673 pass 

4674 try: 

4675 return resource_accumulator_apply_gradient_eager_fallback( 

4676 handle, local_step, gradient, name=name, ctx=_ctx) 

4677 except _core._SymbolicException: 

4678 pass # Add nodes to the TensorFlow graph. 

4679 # Add nodes to the TensorFlow graph. 

4680 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4681 "ResourceAccumulatorApplyGradient", handle=handle, 

4682 local_step=local_step, 

4683 gradient=gradient, name=name) 

4684 return _op 

4685ResourceAccumulatorApplyGradient = tf_export("raw_ops.ResourceAccumulatorApplyGradient")(_ops.to_raw_op(resource_accumulator_apply_gradient)) 

4686 

4687 

4688def resource_accumulator_apply_gradient_eager_fallback(handle, local_step, gradient, name, ctx): 

4689 _attr_dtype, (gradient,) = _execute.args_to_matching_eager([gradient], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

4690 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

4691 local_step = _ops.convert_to_tensor(local_step, _dtypes.int64) 

4692 _inputs_flat = [handle, local_step, gradient] 

4693 _attrs = ("dtype", _attr_dtype) 

4694 _result = _execute.execute(b"ResourceAccumulatorApplyGradient", 0, 

4695 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4696 name=name) 

4697 _result = None 

4698 return _result 

4699 

4700 

4701def resource_accumulator_num_accumulated(handle, name=None): 

4702 r"""Returns the number of gradients aggregated in the given accumulators. 

4703 

4704 Args: 

4705 handle: A `Tensor` of type `resource`. The handle to an accumulator. 

4706 name: A name for the operation (optional). 

4707 

4708 Returns: 

4709 A `Tensor` of type `int32`. 

4710 """ 

4711 _ctx = _context._context or _context.context() 

4712 tld = _ctx._thread_local_data 

4713 if tld.is_eager: 

4714 try: 

4715 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4716 _ctx, "ResourceAccumulatorNumAccumulated", name, handle) 

4717 return _result 

4718 except _core._NotOkStatusException as e: 

4719 _ops.raise_from_not_ok_status(e, name) 

4720 except _core._FallbackException: 

4721 pass 

4722 try: 

4723 return resource_accumulator_num_accumulated_eager_fallback( 

4724 handle, name=name, ctx=_ctx) 

4725 except _core._SymbolicException: 

4726 pass # Add nodes to the TensorFlow graph. 

4727 # Add nodes to the TensorFlow graph. 

4728 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4729 "ResourceAccumulatorNumAccumulated", handle=handle, name=name) 

4730 _result = _outputs[:] 

4731 if _execute.must_record_gradient(): 

4732 _attrs = () 

4733 _inputs_flat = _op.inputs 

4734 _execute.record_gradient( 

4735 "ResourceAccumulatorNumAccumulated", _inputs_flat, _attrs, _result) 

4736 _result, = _result 

4737 return _result 

4738 

4739ResourceAccumulatorNumAccumulated = tf_export("raw_ops.ResourceAccumulatorNumAccumulated")(_ops.to_raw_op(resource_accumulator_num_accumulated)) 

4740 

4741 

4742def resource_accumulator_num_accumulated_eager_fallback(handle, name, ctx): 

4743 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

4744 _inputs_flat = [handle] 

4745 _attrs = None 

4746 _result = _execute.execute(b"ResourceAccumulatorNumAccumulated", 1, 

4747 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4748 name=name) 

4749 if _execute.must_record_gradient(): 

4750 _execute.record_gradient( 

4751 "ResourceAccumulatorNumAccumulated", _inputs_flat, _attrs, _result) 

4752 _result, = _result 

4753 return _result 

4754 

4755 

4756def resource_accumulator_set_global_step(handle, new_global_step, name=None): 

4757 r"""Updates the accumulator with a new value for global_step. 

4758 

4759 Logs warning if the accumulator's value is already higher than 

4760 new_global_step. 

4761 

4762 Args: 

4763 handle: A `Tensor` of type `resource`. The handle to an accumulator. 

4764 new_global_step: A `Tensor` of type `int64`. 

4765 The new global_step value to set. 

4766 name: A name for the operation (optional). 

4767 

4768 Returns: 

4769 The created Operation. 

4770 """ 

4771 _ctx = _context._context or _context.context() 

4772 tld = _ctx._thread_local_data 

4773 if tld.is_eager: 

4774 try: 

4775 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4776 _ctx, "ResourceAccumulatorSetGlobalStep", name, handle, 

4777 new_global_step) 

4778 return _result 

4779 except _core._NotOkStatusException as e: 

4780 _ops.raise_from_not_ok_status(e, name) 

4781 except _core._FallbackException: 

4782 pass 

4783 try: 

4784 return resource_accumulator_set_global_step_eager_fallback( 

4785 handle, new_global_step, name=name, ctx=_ctx) 

4786 except _core._SymbolicException: 

4787 pass # Add nodes to the TensorFlow graph. 

4788 # Add nodes to the TensorFlow graph. 

4789 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4790 "ResourceAccumulatorSetGlobalStep", handle=handle, 

4791 new_global_step=new_global_step, 

4792 name=name) 

4793 return _op 

4794ResourceAccumulatorSetGlobalStep = tf_export("raw_ops.ResourceAccumulatorSetGlobalStep")(_ops.to_raw_op(resource_accumulator_set_global_step)) 

4795 

4796 

4797def resource_accumulator_set_global_step_eager_fallback(handle, new_global_step, name, ctx): 

4798 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

4799 new_global_step = _ops.convert_to_tensor(new_global_step, _dtypes.int64) 

4800 _inputs_flat = [handle, new_global_step] 

4801 _attrs = None 

4802 _result = _execute.execute(b"ResourceAccumulatorSetGlobalStep", 0, 

4803 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4804 name=name) 

4805 _result = None 

4806 return _result 

4807 

4808 

4809def resource_accumulator_take_gradient(handle, num_required, dtype, name=None): 

4810 r"""Extracts the average gradient in the given ConditionalAccumulator. 

4811 

4812 The op blocks until sufficient (i.e., more than num_required) 

4813 gradients have been accumulated. If the accumulator has already 

4814 aggregated more than num_required gradients, it returns the average of 

4815 the accumulated gradients. Also automatically increments the recorded 

4816 global_step in the accumulator by 1, and resets the aggregate to 0. 

4817 

4818 Args: 

4819 handle: A `Tensor` of type `resource`. The handle to an accumulator. 

4820 num_required: A `Tensor` of type `int32`. 

4821 Number of gradients required before we return an aggregate. 

4822 dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.qint16, tf.quint16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`. 

4823 The data type of accumulated gradients. Needs to correspond to the type 

4824 of the accumulator. 

4825 name: A name for the operation (optional). 

4826 

4827 Returns: 

4828 A `Tensor` of type `dtype`. 

4829 """ 

4830 _ctx = _context._context or _context.context() 

4831 tld = _ctx._thread_local_data 

4832 if tld.is_eager: 

4833 try: 

4834 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4835 _ctx, "ResourceAccumulatorTakeGradient", name, handle, num_required, 

4836 "dtype", dtype) 

4837 return _result 

4838 except _core._NotOkStatusException as e: 

4839 _ops.raise_from_not_ok_status(e, name) 

4840 except _core._FallbackException: 

4841 pass 

4842 try: 

4843 return resource_accumulator_take_gradient_eager_fallback( 

4844 handle, num_required, dtype=dtype, name=name, ctx=_ctx) 

4845 except _core._SymbolicException: 

4846 pass # Add nodes to the TensorFlow graph. 

4847 # Add nodes to the TensorFlow graph. 

4848 dtype = _execute.make_type(dtype, "dtype") 

4849 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4850 "ResourceAccumulatorTakeGradient", handle=handle, 

4851 num_required=num_required, 

4852 dtype=dtype, name=name) 

4853 _result = _outputs[:] 

4854 if _execute.must_record_gradient(): 

4855 _attrs = ("dtype", _op._get_attr_type("dtype")) 

4856 _inputs_flat = _op.inputs 

4857 _execute.record_gradient( 

4858 "ResourceAccumulatorTakeGradient", _inputs_flat, _attrs, _result) 

4859 _result, = _result 

4860 return _result 

4861 

4862ResourceAccumulatorTakeGradient = tf_export("raw_ops.ResourceAccumulatorTakeGradient")(_ops.to_raw_op(resource_accumulator_take_gradient)) 

4863 

4864 

4865def resource_accumulator_take_gradient_eager_fallback(handle, num_required, dtype, name, ctx): 

4866 dtype = _execute.make_type(dtype, "dtype") 

4867 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

4868 num_required = _ops.convert_to_tensor(num_required, _dtypes.int32) 

4869 _inputs_flat = [handle, num_required] 

4870 _attrs = ("dtype", dtype) 

4871 _result = _execute.execute(b"ResourceAccumulatorTakeGradient", 1, 

4872 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4873 name=name) 

4874 if _execute.must_record_gradient(): 

4875 _execute.record_gradient( 

4876 "ResourceAccumulatorTakeGradient", _inputs_flat, _attrs, _result) 

4877 _result, = _result 

4878 return _result 

4879 

4880 

4881def resource_conditional_accumulator(dtype, shape, container="", shared_name="", reduction_type="MEAN", name=None): 

4882 r"""A conditional accumulator for aggregating gradients. 

4883 

4884 The accumulator accepts gradients marked with local_step greater or 

4885 equal to the most recent global_step known to the accumulator. The 

4886 average can be extracted from the accumulator, provided sufficient 

4887 gradients have been accumulated. Extracting the average automatically 

4888 resets the aggregate to 0, and increments the global_step recorded by 

4889 the accumulator. 

4890 This is a resource version of ConditionalAccumulator that will work in TF2.0 

4891 with tf.cond version 2. 

4892 

4893 Args: 

4894 dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.qint16, tf.quint16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`. 

4895 The type of the value being accumulated. 

4896 shape: A `tf.TensorShape` or list of `ints`. 

4897 The shape of the values, can be [], in which case shape is unknown. 

4898 container: An optional `string`. Defaults to `""`. 

4899 If non-empty, this accumulator is placed in the given container. 

4900 Otherwise, a default container is used. 

4901 shared_name: An optional `string`. Defaults to `""`. 

4902 If non-empty, this accumulator will be shared under the 

4903 given name across multiple sessions. 

4904 reduction_type: An optional `string` from: `"MEAN", "SUM"`. Defaults to `"MEAN"`. 

4905 name: A name for the operation (optional). 

4906 

4907 Returns: 

4908 A `Tensor` of type `resource`. 

4909 """ 

4910 _ctx = _context._context or _context.context() 

4911 tld = _ctx._thread_local_data 

4912 if tld.is_eager: 

4913 try: 

4914 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4915 _ctx, "ResourceConditionalAccumulator", name, "dtype", dtype, "shape", 

4916 shape, "container", container, "shared_name", shared_name, 

4917 "reduction_type", reduction_type) 

4918 return _result 

4919 except _core._NotOkStatusException as e: 

4920 _ops.raise_from_not_ok_status(e, name) 

4921 except _core._FallbackException: 

4922 pass 

4923 try: 

4924 return resource_conditional_accumulator_eager_fallback( 

4925 dtype=dtype, shape=shape, container=container, 

4926 shared_name=shared_name, reduction_type=reduction_type, name=name, 

4927 ctx=_ctx) 

4928 except _core._SymbolicException: 

4929 pass # Add nodes to the TensorFlow graph. 

4930 # Add nodes to the TensorFlow graph. 

4931 dtype = _execute.make_type(dtype, "dtype") 

4932 shape = _execute.make_shape(shape, "shape") 

4933 if container is None: 

4934 container = "" 

4935 container = _execute.make_str(container, "container") 

4936 if shared_name is None: 

4937 shared_name = "" 

4938 shared_name = _execute.make_str(shared_name, "shared_name") 

4939 if reduction_type is None: 

4940 reduction_type = "MEAN" 

4941 reduction_type = _execute.make_str(reduction_type, "reduction_type") 

4942 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4943 "ResourceConditionalAccumulator", dtype=dtype, shape=shape, 

4944 container=container, 

4945 shared_name=shared_name, 

4946 reduction_type=reduction_type, 

4947 name=name) 

4948 _result = _outputs[:] 

4949 if _execute.must_record_gradient(): 

4950 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", 

4951 _op.get_attr("shape"), "container", _op.get_attr("container"), 

4952 "shared_name", _op.get_attr("shared_name"), "reduction_type", 

4953 _op.get_attr("reduction_type")) 

4954 _inputs_flat = _op.inputs 

4955 _execute.record_gradient( 

4956 "ResourceConditionalAccumulator", _inputs_flat, _attrs, _result) 

4957 _result, = _result 

4958 return _result 

4959 

4960ResourceConditionalAccumulator = tf_export("raw_ops.ResourceConditionalAccumulator")(_ops.to_raw_op(resource_conditional_accumulator)) 

4961 

4962 

4963def resource_conditional_accumulator_eager_fallback(dtype, shape, container, shared_name, reduction_type, name, ctx): 

4964 dtype = _execute.make_type(dtype, "dtype") 

4965 shape = _execute.make_shape(shape, "shape") 

4966 if container is None: 

4967 container = "" 

4968 container = _execute.make_str(container, "container") 

4969 if shared_name is None: 

4970 shared_name = "" 

4971 shared_name = _execute.make_str(shared_name, "shared_name") 

4972 if reduction_type is None: 

4973 reduction_type = "MEAN" 

4974 reduction_type = _execute.make_str(reduction_type, "reduction_type") 

4975 _inputs_flat = [] 

4976 _attrs = ("dtype", dtype, "shape", shape, "container", container, 

4977 "shared_name", shared_name, "reduction_type", reduction_type) 

4978 _result = _execute.execute(b"ResourceConditionalAccumulator", 1, 

4979 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4980 name=name) 

4981 if _execute.must_record_gradient(): 

4982 _execute.record_gradient( 

4983 "ResourceConditionalAccumulator", _inputs_flat, _attrs, _result) 

4984 _result, = _result 

4985 return _result 

4986 

4987 

4988def sparse_accumulator_apply_gradient(handle, local_step, gradient_indices, gradient_values, gradient_shape, has_known_shape, name=None): 

4989 r"""Applies a sparse gradient to a given accumulator. 

4990 

4991 Does not add if local_step is smaller than the accumulator's 

4992 global_step. 

4993 

4994 Args: 

4995 handle: A `Tensor` of type mutable `string`. The handle to a accumulator. 

4996 local_step: A `Tensor` of type `int64`. 

4997 The local_step value at which the sparse gradient was computed. 

4998 gradient_indices: A `Tensor` of type `int64`. 

4999 Indices of the sparse gradient to be accumulated. Must be a 

5000 vector. 

5001 gradient_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

5002 Values are the non-zero slices of the gradient, and must have 

5003 the same first dimension as indices, i.e., the nnz represented by indices and 

5004 values must be consistent. 

5005 gradient_shape: A `Tensor` of type `int64`. 

5006 Shape of the sparse gradient to be accumulated. 

5007 has_known_shape: A `bool`. 

5008 Boolean indicating whether gradient_shape is unknown, in which 

5009 case the input is ignored during validation. 

5010 name: A name for the operation (optional). 

5011 

5012 Returns: 

5013 The created Operation. 

5014 """ 

5015 _ctx = _context._context or _context.context() 

5016 tld = _ctx._thread_local_data 

5017 if tld.is_eager: 

5018 raise RuntimeError("sparse_accumulator_apply_gradient op does not support eager execution. Arg 'handle' is a ref.") 

5019 # Add nodes to the TensorFlow graph. 

5020 has_known_shape = _execute.make_bool(has_known_shape, "has_known_shape") 

5021 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5022 "SparseAccumulatorApplyGradient", handle=handle, 

5023 local_step=local_step, 

5024 gradient_indices=gradient_indices, 

5025 gradient_values=gradient_values, 

5026 gradient_shape=gradient_shape, 

5027 has_known_shape=has_known_shape, 

5028 name=name) 

5029 return _op 

5030SparseAccumulatorApplyGradient = tf_export("raw_ops.SparseAccumulatorApplyGradient")(_ops.to_raw_op(sparse_accumulator_apply_gradient)) 

5031 

5032 

5033def sparse_accumulator_apply_gradient_eager_fallback(handle, local_step, gradient_indices, gradient_values, gradient_shape, has_known_shape, name, ctx): 

5034 raise RuntimeError("sparse_accumulator_apply_gradient op does not support eager execution. Arg 'handle' is a ref.") 

5035_SparseAccumulatorTakeGradientOutput = collections.namedtuple( 

5036 "SparseAccumulatorTakeGradient", 

5037 ["indices", "values", "shape"]) 

5038 

5039 

5040def sparse_accumulator_take_gradient(handle, num_required, dtype, name=None): 

5041 r"""Extracts the average sparse gradient in a SparseConditionalAccumulator. 

5042 

5043 The op will blocks until sufficient (i.e., more than num_required) 

5044 gradients have been accumulated. If the accumulator has already 

5045 aggregated more than num_required gradients, it will return its 

5046 average of the accumulated gradients. Also automatically increments 

5047 the recorded global_step in the accumulator by 1, and resets the 

5048 aggregate to 0. 

5049 

5050 Args: 

5051 handle: A `Tensor` of type mutable `string`. 

5052 The handle to a SparseConditionalAccumulator. 

5053 num_required: A `Tensor` of type `int32`. 

5054 Number of gradients required before we return an aggregate. 

5055 dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.qint16, tf.quint16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`. 

5056 The data type of accumulated gradients. Needs to correspond to the type 

5057 of the accumulator. 

5058 name: A name for the operation (optional). 

5059 

5060 Returns: 

5061 A tuple of `Tensor` objects (indices, values, shape). 

5062 

5063 indices: A `Tensor` of type `int64`. 

5064 values: A `Tensor` of type `dtype`. 

5065 shape: A `Tensor` of type `int64`. 

5066 """ 

5067 _ctx = _context._context or _context.context() 

5068 tld = _ctx._thread_local_data 

5069 if tld.is_eager: 

5070 raise RuntimeError("sparse_accumulator_take_gradient op does not support eager execution. Arg 'handle' is a ref.") 

5071 # Add nodes to the TensorFlow graph. 

5072 dtype = _execute.make_type(dtype, "dtype") 

5073 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5074 "SparseAccumulatorTakeGradient", handle=handle, 

5075 num_required=num_required, 

5076 dtype=dtype, name=name) 

5077 _result = _outputs[:] 

5078 if _execute.must_record_gradient(): 

5079 _attrs = ("dtype", _op._get_attr_type("dtype")) 

5080 _inputs_flat = _op.inputs 

5081 _execute.record_gradient( 

5082 "SparseAccumulatorTakeGradient", _inputs_flat, _attrs, _result) 

5083 _result = _SparseAccumulatorTakeGradientOutput._make(_result) 

5084 return _result 

5085 

5086SparseAccumulatorTakeGradient = tf_export("raw_ops.SparseAccumulatorTakeGradient")(_ops.to_raw_op(sparse_accumulator_take_gradient)) 

5087 

5088 

5089def sparse_accumulator_take_gradient_eager_fallback(handle, num_required, dtype, name, ctx): 

5090 raise RuntimeError("sparse_accumulator_take_gradient op does not support eager execution. Arg 'handle' is a ref.") 

5091 

5092def sparse_conditional_accumulator(dtype, shape, container="", shared_name="", reduction_type="MEAN", name=None): 

5093 r"""A conditional accumulator for aggregating sparse gradients. 

5094 

5095 The accumulator accepts gradients marked with local_step greater or 

5096 equal to the most recent global_step known to the accumulator. The 

5097 average can be extracted from the accumulator, provided sufficient 

5098 gradients have been accumulated. Extracting the average automatically 

5099 resets the aggregate to 0, and increments the global_step recorded by 

5100 the accumulator. 

5101 

5102 Args: 

5103 dtype: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.qint16, tf.quint16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`. 

5104 The type of the value being accumulated. 

5105 shape: A `tf.TensorShape` or list of `ints`. The shape of the values. 

5106 container: An optional `string`. Defaults to `""`. 

5107 If non-empty, this accumulator is placed in the given container. 

5108 Otherwise, a default container is used. 

5109 shared_name: An optional `string`. Defaults to `""`. 

5110 If non-empty, this accumulator will be shared under the given name 

5111 across multiple sessions. 

5112 reduction_type: An optional `string` from: `"MEAN", "SUM"`. Defaults to `"MEAN"`. 

5113 name: A name for the operation (optional). 

5114 

5115 Returns: 

5116 A `Tensor` of type mutable `string`. 

5117 """ 

5118 _ctx = _context._context or _context.context() 

5119 tld = _ctx._thread_local_data 

5120 if tld.is_eager: 

5121 raise RuntimeError("sparse_conditional_accumulator op does not support eager execution. Arg 'handle' is a ref.") 

5122 # Add nodes to the TensorFlow graph. 

5123 dtype = _execute.make_type(dtype, "dtype") 

5124 shape = _execute.make_shape(shape, "shape") 

5125 if container is None: 

5126 container = "" 

5127 container = _execute.make_str(container, "container") 

5128 if shared_name is None: 

5129 shared_name = "" 

5130 shared_name = _execute.make_str(shared_name, "shared_name") 

5131 if reduction_type is None: 

5132 reduction_type = "MEAN" 

5133 reduction_type = _execute.make_str(reduction_type, "reduction_type") 

5134 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5135 "SparseConditionalAccumulator", dtype=dtype, shape=shape, 

5136 container=container, 

5137 shared_name=shared_name, 

5138 reduction_type=reduction_type, 

5139 name=name) 

5140 _result = _outputs[:] 

5141 if _execute.must_record_gradient(): 

5142 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", 

5143 _op.get_attr("shape"), "container", _op.get_attr("container"), 

5144 "shared_name", _op.get_attr("shared_name"), "reduction_type", 

5145 _op.get_attr("reduction_type")) 

5146 _inputs_flat = _op.inputs 

5147 _execute.record_gradient( 

5148 "SparseConditionalAccumulator", _inputs_flat, _attrs, _result) 

5149 _result, = _result 

5150 return _result 

5151 

5152SparseConditionalAccumulator = tf_export("raw_ops.SparseConditionalAccumulator")(_ops.to_raw_op(sparse_conditional_accumulator)) 

5153 

5154 

5155def sparse_conditional_accumulator_eager_fallback(dtype, shape, container, shared_name, reduction_type, name, ctx): 

5156 raise RuntimeError("sparse_conditional_accumulator op does not support eager execution. Arg 'handle' is a ref.") 

5157 

5158def _stack(elem_type, stack_name="", name=None): 

5159 r"""Deprecated, use StackV2. 

5160 

5161 Args: 

5162 elem_type: A `tf.DType`. 

5163 stack_name: An optional `string`. Defaults to `""`. 

5164 name: A name for the operation (optional). 

5165 

5166 Returns: 

5167 A `Tensor` of type mutable `string`. 

5168 """ 

5169 _ctx = _context._context or _context.context() 

5170 tld = _ctx._thread_local_data 

5171 if tld.is_eager: 

5172 raise RuntimeError("stack op does not support eager execution. Arg 'handle' is a ref.") 

5173 # Add nodes to the TensorFlow graph. 

5174 elem_type = _execute.make_type(elem_type, "elem_type") 

5175 if stack_name is None: 

5176 stack_name = "" 

5177 stack_name = _execute.make_str(stack_name, "stack_name") 

5178 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5179 "Stack", elem_type=elem_type, stack_name=stack_name, name=name) 

5180 _result = _outputs[:] 

5181 if _execute.must_record_gradient(): 

5182 _attrs = ("elem_type", _op._get_attr_type("elem_type"), "stack_name", 

5183 _op.get_attr("stack_name")) 

5184 _inputs_flat = _op.inputs 

5185 _execute.record_gradient( 

5186 "Stack", _inputs_flat, _attrs, _result) 

5187 _result, = _result 

5188 return _result 

5189 

5190Stack = tf_export("raw_ops.Stack")(_ops.to_raw_op(_stack)) 

5191 

5192 

5193def _stack_eager_fallback(elem_type, stack_name, name, ctx): 

5194 raise RuntimeError("stack op does not support eager execution. Arg 'handle' is a ref.") 

5195 

5196def stack_close(handle, name=None): 

5197 r"""Deprecated, use StackCloseV2. 

5198 

5199 Args: 

5200 handle: A `Tensor` of type mutable `string`. 

5201 name: A name for the operation (optional). 

5202 

5203 Returns: 

5204 The created Operation. 

5205 """ 

5206 _ctx = _context._context or _context.context() 

5207 tld = _ctx._thread_local_data 

5208 if tld.is_eager: 

5209 raise RuntimeError("stack_close op does not support eager execution. Arg 'handle' is a ref.") 

5210 # Add nodes to the TensorFlow graph. 

5211 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5212 "StackClose", handle=handle, name=name) 

5213 return _op 

5214StackClose = tf_export("raw_ops.StackClose")(_ops.to_raw_op(stack_close)) 

5215 

5216 

5217def stack_close_eager_fallback(handle, name, ctx): 

5218 raise RuntimeError("stack_close op does not support eager execution. Arg 'handle' is a ref.") 

5219 

5220def stack_close_v2(handle, name=None): 

5221 r"""Delete the stack from its resource container. 

5222 

5223 Args: 

5224 handle: A `Tensor` of type `resource`. The handle to a stack. 

5225 name: A name for the operation (optional). 

5226 

5227 Returns: 

5228 The created Operation. 

5229 """ 

5230 _ctx = _context._context or _context.context() 

5231 tld = _ctx._thread_local_data 

5232 if tld.is_eager: 

5233 try: 

5234 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5235 _ctx, "StackCloseV2", name, handle) 

5236 return _result 

5237 except _core._NotOkStatusException as e: 

5238 _ops.raise_from_not_ok_status(e, name) 

5239 except _core._FallbackException: 

5240 pass 

5241 try: 

5242 return stack_close_v2_eager_fallback( 

5243 handle, name=name, ctx=_ctx) 

5244 except _core._SymbolicException: 

5245 pass # Add nodes to the TensorFlow graph. 

5246 # Add nodes to the TensorFlow graph. 

5247 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5248 "StackCloseV2", handle=handle, name=name) 

5249 return _op 

5250StackCloseV2 = tf_export("raw_ops.StackCloseV2")(_ops.to_raw_op(stack_close_v2)) 

5251 

5252 

5253def stack_close_v2_eager_fallback(handle, name, ctx): 

5254 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

5255 _inputs_flat = [handle] 

5256 _attrs = None 

5257 _result = _execute.execute(b"StackCloseV2", 0, inputs=_inputs_flat, 

5258 attrs=_attrs, ctx=ctx, name=name) 

5259 _result = None 

5260 return _result 

5261 

5262 

5263def stack_pop(handle, elem_type, name=None): 

5264 r"""Deprecated, use StackPopV2. 

5265 

5266 Args: 

5267 handle: A `Tensor` of type mutable `string`. 

5268 elem_type: A `tf.DType`. 

5269 name: A name for the operation (optional). 

5270 

5271 Returns: 

5272 A `Tensor` of type `elem_type`. 

5273 """ 

5274 _ctx = _context._context or _context.context() 

5275 tld = _ctx._thread_local_data 

5276 if tld.is_eager: 

5277 raise RuntimeError("stack_pop op does not support eager execution. Arg 'handle' is a ref.") 

5278 # Add nodes to the TensorFlow graph. 

5279 elem_type = _execute.make_type(elem_type, "elem_type") 

5280 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5281 "StackPop", handle=handle, elem_type=elem_type, name=name) 

5282 _result = _outputs[:] 

5283 if _execute.must_record_gradient(): 

5284 _attrs = ("elem_type", _op._get_attr_type("elem_type")) 

5285 _inputs_flat = _op.inputs 

5286 _execute.record_gradient( 

5287 "StackPop", _inputs_flat, _attrs, _result) 

5288 _result, = _result 

5289 return _result 

5290 

5291StackPop = tf_export("raw_ops.StackPop")(_ops.to_raw_op(stack_pop)) 

5292 

5293 

5294def stack_pop_eager_fallback(handle, elem_type, name, ctx): 

5295 raise RuntimeError("stack_pop op does not support eager execution. Arg 'handle' is a ref.") 

5296 

5297def stack_pop_v2(handle, elem_type, name=None): 

5298 r"""Pop the element at the top of the stack. 

5299 

5300 Args: 

5301 handle: A `Tensor` of type `resource`. The handle to a stack. 

5302 elem_type: A `tf.DType`. The type of the elem that is popped. 

5303 name: A name for the operation (optional). 

5304 

5305 Returns: 

5306 A `Tensor` of type `elem_type`. 

5307 """ 

5308 _ctx = _context._context or _context.context() 

5309 tld = _ctx._thread_local_data 

5310 if tld.is_eager: 

5311 try: 

5312 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5313 _ctx, "StackPopV2", name, handle, "elem_type", elem_type) 

5314 return _result 

5315 except _core._NotOkStatusException as e: 

5316 _ops.raise_from_not_ok_status(e, name) 

5317 except _core._FallbackException: 

5318 pass 

5319 try: 

5320 return stack_pop_v2_eager_fallback( 

5321 handle, elem_type=elem_type, name=name, ctx=_ctx) 

5322 except _core._SymbolicException: 

5323 pass # Add nodes to the TensorFlow graph. 

5324 # Add nodes to the TensorFlow graph. 

5325 elem_type = _execute.make_type(elem_type, "elem_type") 

5326 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5327 "StackPopV2", handle=handle, elem_type=elem_type, name=name) 

5328 _result = _outputs[:] 

5329 if _execute.must_record_gradient(): 

5330 _attrs = ("elem_type", _op._get_attr_type("elem_type")) 

5331 _inputs_flat = _op.inputs 

5332 _execute.record_gradient( 

5333 "StackPopV2", _inputs_flat, _attrs, _result) 

5334 _result, = _result 

5335 return _result 

5336 

5337StackPopV2 = tf_export("raw_ops.StackPopV2")(_ops.to_raw_op(stack_pop_v2)) 

5338 

5339 

5340def stack_pop_v2_eager_fallback(handle, elem_type, name, ctx): 

5341 elem_type = _execute.make_type(elem_type, "elem_type") 

5342 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

5343 _inputs_flat = [handle] 

5344 _attrs = ("elem_type", elem_type) 

5345 _result = _execute.execute(b"StackPopV2", 1, inputs=_inputs_flat, 

5346 attrs=_attrs, ctx=ctx, name=name) 

5347 if _execute.must_record_gradient(): 

5348 _execute.record_gradient( 

5349 "StackPopV2", _inputs_flat, _attrs, _result) 

5350 _result, = _result 

5351 return _result 

5352 

5353 

5354def stack_push(handle, elem, swap_memory=False, name=None): 

5355 r"""Deprecated, use StackPushV2. 

5356 

5357 Args: 

5358 handle: A `Tensor` of type mutable `string`. 

5359 elem: A `Tensor`. 

5360 swap_memory: An optional `bool`. Defaults to `False`. 

5361 name: A name for the operation (optional). 

5362 

5363 Returns: 

5364 A `Tensor`. Has the same type as `elem`. 

5365 """ 

5366 _ctx = _context._context or _context.context() 

5367 tld = _ctx._thread_local_data 

5368 if tld.is_eager: 

5369 raise RuntimeError("stack_push op does not support eager execution. Arg 'handle' is a ref.") 

5370 # Add nodes to the TensorFlow graph. 

5371 if swap_memory is None: 

5372 swap_memory = False 

5373 swap_memory = _execute.make_bool(swap_memory, "swap_memory") 

5374 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5375 "StackPush", handle=handle, elem=elem, swap_memory=swap_memory, 

5376 name=name) 

5377 _result = _outputs[:] 

5378 if _execute.must_record_gradient(): 

5379 _attrs = ("T", _op._get_attr_type("T"), "swap_memory", 

5380 _op._get_attr_bool("swap_memory")) 

5381 _inputs_flat = _op.inputs 

5382 _execute.record_gradient( 

5383 "StackPush", _inputs_flat, _attrs, _result) 

5384 _result, = _result 

5385 return _result 

5386 

5387StackPush = tf_export("raw_ops.StackPush")(_ops.to_raw_op(stack_push)) 

5388 

5389 

5390def stack_push_eager_fallback(handle, elem, swap_memory, name, ctx): 

5391 raise RuntimeError("stack_push op does not support eager execution. Arg 'handle' is a ref.") 

5392 

5393def stack_push_v2(handle, elem, swap_memory=False, name=None): 

5394 r"""Push an element onto the stack. 

5395 

5396 Args: 

5397 handle: A `Tensor` of type `resource`. The handle to a stack. 

5398 elem: A `Tensor`. The tensor to be pushed onto the stack. 

5399 swap_memory: An optional `bool`. Defaults to `False`. 

5400 Swap `elem` to CPU. Default to false. 

5401 name: A name for the operation (optional). 

5402 

5403 Returns: 

5404 A `Tensor`. Has the same type as `elem`. 

5405 """ 

5406 _ctx = _context._context or _context.context() 

5407 tld = _ctx._thread_local_data 

5408 if tld.is_eager: 

5409 try: 

5410 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5411 _ctx, "StackPushV2", name, handle, elem, "swap_memory", swap_memory) 

5412 return _result 

5413 except _core._NotOkStatusException as e: 

5414 _ops.raise_from_not_ok_status(e, name) 

5415 except _core._FallbackException: 

5416 pass 

5417 try: 

5418 return stack_push_v2_eager_fallback( 

5419 handle, elem, swap_memory=swap_memory, name=name, ctx=_ctx) 

5420 except _core._SymbolicException: 

5421 pass # Add nodes to the TensorFlow graph. 

5422 # Add nodes to the TensorFlow graph. 

5423 if swap_memory is None: 

5424 swap_memory = False 

5425 swap_memory = _execute.make_bool(swap_memory, "swap_memory") 

5426 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5427 "StackPushV2", handle=handle, elem=elem, swap_memory=swap_memory, 

5428 name=name) 

5429 _result = _outputs[:] 

5430 if _execute.must_record_gradient(): 

5431 _attrs = ("T", _op._get_attr_type("T"), "swap_memory", 

5432 _op._get_attr_bool("swap_memory")) 

5433 _inputs_flat = _op.inputs 

5434 _execute.record_gradient( 

5435 "StackPushV2", _inputs_flat, _attrs, _result) 

5436 _result, = _result 

5437 return _result 

5438 

5439StackPushV2 = tf_export("raw_ops.StackPushV2")(_ops.to_raw_op(stack_push_v2)) 

5440 

5441 

5442def stack_push_v2_eager_fallback(handle, elem, swap_memory, name, ctx): 

5443 if swap_memory is None: 

5444 swap_memory = False 

5445 swap_memory = _execute.make_bool(swap_memory, "swap_memory") 

5446 _attr_T, (elem,) = _execute.args_to_matching_eager([elem], ctx, []) 

5447 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

5448 _inputs_flat = [handle, elem] 

5449 _attrs = ("T", _attr_T, "swap_memory", swap_memory) 

5450 _result = _execute.execute(b"StackPushV2", 1, inputs=_inputs_flat, 

5451 attrs=_attrs, ctx=ctx, name=name) 

5452 if _execute.must_record_gradient(): 

5453 _execute.record_gradient( 

5454 "StackPushV2", _inputs_flat, _attrs, _result) 

5455 _result, = _result 

5456 return _result 

5457 

5458 

5459def stack_v2(max_size, elem_type, stack_name="", name=None): 

5460 r"""A stack that produces elements in first-in last-out order. 

5461 

5462 Args: 

5463 max_size: A `Tensor` of type `int32`. 

5464 The maximum size of the stack if non-negative. If negative, the stack 

5465 size is unlimited. 

5466 elem_type: A `tf.DType`. The type of the elements on the stack. 

5467 stack_name: An optional `string`. Defaults to `""`. 

5468 Overrides the name used for the temporary stack resource. Default 

5469 value is the name of the 'Stack' op (which is guaranteed unique). 

5470 name: A name for the operation (optional). 

5471 

5472 Returns: 

5473 A `Tensor` of type `resource`. 

5474 """ 

5475 _ctx = _context._context or _context.context() 

5476 tld = _ctx._thread_local_data 

5477 if tld.is_eager: 

5478 try: 

5479 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5480 _ctx, "StackV2", name, max_size, "elem_type", elem_type, "stack_name", 

5481 stack_name) 

5482 return _result 

5483 except _core._NotOkStatusException as e: 

5484 _ops.raise_from_not_ok_status(e, name) 

5485 except _core._FallbackException: 

5486 pass 

5487 try: 

5488 return stack_v2_eager_fallback( 

5489 max_size, elem_type=elem_type, stack_name=stack_name, name=name, 

5490 ctx=_ctx) 

5491 except _core._SymbolicException: 

5492 pass # Add nodes to the TensorFlow graph. 

5493 # Add nodes to the TensorFlow graph. 

5494 elem_type = _execute.make_type(elem_type, "elem_type") 

5495 if stack_name is None: 

5496 stack_name = "" 

5497 stack_name = _execute.make_str(stack_name, "stack_name") 

5498 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5499 "StackV2", max_size=max_size, elem_type=elem_type, 

5500 stack_name=stack_name, name=name) 

5501 _result = _outputs[:] 

5502 if _execute.must_record_gradient(): 

5503 _attrs = ("elem_type", _op._get_attr_type("elem_type"), "stack_name", 

5504 _op.get_attr("stack_name")) 

5505 _inputs_flat = _op.inputs 

5506 _execute.record_gradient( 

5507 "StackV2", _inputs_flat, _attrs, _result) 

5508 _result, = _result 

5509 return _result 

5510 

5511StackV2 = tf_export("raw_ops.StackV2")(_ops.to_raw_op(stack_v2)) 

5512 

5513 

5514def stack_v2_eager_fallback(max_size, elem_type, stack_name, name, ctx): 

5515 elem_type = _execute.make_type(elem_type, "elem_type") 

5516 if stack_name is None: 

5517 stack_name = "" 

5518 stack_name = _execute.make_str(stack_name, "stack_name") 

5519 max_size = _ops.convert_to_tensor(max_size, _dtypes.int32) 

5520 _inputs_flat = [max_size] 

5521 _attrs = ("elem_type", elem_type, "stack_name", stack_name) 

5522 _result = _execute.execute(b"StackV2", 1, inputs=_inputs_flat, attrs=_attrs, 

5523 ctx=ctx, name=name) 

5524 if _execute.must_record_gradient(): 

5525 _execute.record_gradient( 

5526 "StackV2", _inputs_flat, _attrs, _result) 

5527 _result, = _result 

5528 return _result 

5529 

5530 

5531def stage(values, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

5532 r"""Stage values similar to a lightweight Enqueue. 

5533 

5534 The basic functionality of this Op is similar to a queue with many 

5535 fewer capabilities and options. This Op is optimized for performance. 

5536 

5537 Args: 

5538 values: A list of `Tensor` objects. a list of tensors 

5539 dtypes A list of data types that inserted values should adhere to. 

5540 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

5541 Maximum number of elements in the Staging Area. If > 0, inserts 

5542 on the container will block when the capacity is reached. 

5543 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

5544 The maximum number of bytes allowed for Tensors in the Staging Area. 

5545 If > 0, inserts will block until sufficient space is available. 

5546 container: An optional `string`. Defaults to `""`. 

5547 If non-empty, this queue is placed in the given container. Otherwise, 

5548 a default container is used. 

5549 shared_name: An optional `string`. Defaults to `""`. 

5550 It is necessary to match this name to the matching Unstage Op. 

5551 name: A name for the operation (optional). 

5552 

5553 Returns: 

5554 The created Operation. 

5555 """ 

5556 _ctx = _context._context or _context.context() 

5557 tld = _ctx._thread_local_data 

5558 if tld.is_eager: 

5559 try: 

5560 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5561 _ctx, "Stage", name, values, "capacity", capacity, "memory_limit", 

5562 memory_limit, "container", container, "shared_name", shared_name) 

5563 return _result 

5564 except _core._NotOkStatusException as e: 

5565 _ops.raise_from_not_ok_status(e, name) 

5566 except _core._FallbackException: 

5567 pass 

5568 try: 

5569 return stage_eager_fallback( 

5570 values, capacity=capacity, memory_limit=memory_limit, 

5571 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

5572 except _core._SymbolicException: 

5573 pass # Add nodes to the TensorFlow graph. 

5574 # Add nodes to the TensorFlow graph. 

5575 if capacity is None: 

5576 capacity = 0 

5577 capacity = _execute.make_int(capacity, "capacity") 

5578 if memory_limit is None: 

5579 memory_limit = 0 

5580 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

5581 if container is None: 

5582 container = "" 

5583 container = _execute.make_str(container, "container") 

5584 if shared_name is None: 

5585 shared_name = "" 

5586 shared_name = _execute.make_str(shared_name, "shared_name") 

5587 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5588 "Stage", values=values, capacity=capacity, memory_limit=memory_limit, 

5589 container=container, shared_name=shared_name, name=name) 

5590 return _op 

5591Stage = tf_export("raw_ops.Stage")(_ops.to_raw_op(stage)) 

5592 

5593 

5594def stage_eager_fallback(values, capacity, memory_limit, container, shared_name, name, ctx): 

5595 if capacity is None: 

5596 capacity = 0 

5597 capacity = _execute.make_int(capacity, "capacity") 

5598 if memory_limit is None: 

5599 memory_limit = 0 

5600 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

5601 if container is None: 

5602 container = "" 

5603 container = _execute.make_str(container, "container") 

5604 if shared_name is None: 

5605 shared_name = "" 

5606 shared_name = _execute.make_str(shared_name, "shared_name") 

5607 _attr_dtypes, values = _execute.convert_to_mixed_eager_tensors(values, ctx) 

5608 _inputs_flat = list(values) 

5609 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

5610 _attr_dtypes, "container", container, "shared_name", shared_name) 

5611 _result = _execute.execute(b"Stage", 0, inputs=_inputs_flat, attrs=_attrs, 

5612 ctx=ctx, name=name) 

5613 _result = None 

5614 return _result 

5615 

5616 

5617def stage_clear(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

5618 r"""Op removes all elements in the underlying container. 

5619 

5620 Args: 

5621 dtypes: A list of `tf.DTypes`. 

5622 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

5623 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

5624 container: An optional `string`. Defaults to `""`. 

5625 shared_name: An optional `string`. Defaults to `""`. 

5626 name: A name for the operation (optional). 

5627 

5628 Returns: 

5629 The created Operation. 

5630 """ 

5631 _ctx = _context._context or _context.context() 

5632 tld = _ctx._thread_local_data 

5633 if tld.is_eager: 

5634 try: 

5635 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5636 _ctx, "StageClear", name, "capacity", capacity, "memory_limit", 

5637 memory_limit, "dtypes", dtypes, "container", container, "shared_name", 

5638 shared_name) 

5639 return _result 

5640 except _core._NotOkStatusException as e: 

5641 _ops.raise_from_not_ok_status(e, name) 

5642 except _core._FallbackException: 

5643 pass 

5644 try: 

5645 return stage_clear_eager_fallback( 

5646 capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, 

5647 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

5648 except _core._SymbolicException: 

5649 pass # Add nodes to the TensorFlow graph. 

5650 # Add nodes to the TensorFlow graph. 

5651 if not isinstance(dtypes, (list, tuple)): 

5652 raise TypeError( 

5653 "Expected list for 'dtypes' argument to " 

5654 "'stage_clear' Op, not %r." % dtypes) 

5655 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

5656 if capacity is None: 

5657 capacity = 0 

5658 capacity = _execute.make_int(capacity, "capacity") 

5659 if memory_limit is None: 

5660 memory_limit = 0 

5661 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

5662 if container is None: 

5663 container = "" 

5664 container = _execute.make_str(container, "container") 

5665 if shared_name is None: 

5666 shared_name = "" 

5667 shared_name = _execute.make_str(shared_name, "shared_name") 

5668 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5669 "StageClear", dtypes=dtypes, capacity=capacity, 

5670 memory_limit=memory_limit, container=container, 

5671 shared_name=shared_name, name=name) 

5672 return _op 

5673StageClear = tf_export("raw_ops.StageClear")(_ops.to_raw_op(stage_clear)) 

5674 

5675 

5676def stage_clear_eager_fallback(dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

5677 if not isinstance(dtypes, (list, tuple)): 

5678 raise TypeError( 

5679 "Expected list for 'dtypes' argument to " 

5680 "'stage_clear' Op, not %r." % dtypes) 

5681 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

5682 if capacity is None: 

5683 capacity = 0 

5684 capacity = _execute.make_int(capacity, "capacity") 

5685 if memory_limit is None: 

5686 memory_limit = 0 

5687 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

5688 if container is None: 

5689 container = "" 

5690 container = _execute.make_str(container, "container") 

5691 if shared_name is None: 

5692 shared_name = "" 

5693 shared_name = _execute.make_str(shared_name, "shared_name") 

5694 _inputs_flat = [] 

5695 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

5696 dtypes, "container", container, "shared_name", shared_name) 

5697 _result = _execute.execute(b"StageClear", 0, inputs=_inputs_flat, 

5698 attrs=_attrs, ctx=ctx, name=name) 

5699 _result = None 

5700 return _result 

5701 

5702 

5703def stage_peek(index, dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

5704 r"""Op peeks at the values at the specified index. If the 

5705 

5706 underlying container does not contain sufficient elements 

5707 this op will block until it does. This Op is optimized for 

5708 performance. 

5709 

5710 Args: 

5711 index: A `Tensor` of type `int32`. 

5712 dtypes: A list of `tf.DTypes` that has length `>= 1`. 

5713 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

5714 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

5715 container: An optional `string`. Defaults to `""`. 

5716 shared_name: An optional `string`. Defaults to `""`. 

5717 name: A name for the operation (optional). 

5718 

5719 Returns: 

5720 A list of `Tensor` objects of type `dtypes`. 

5721 """ 

5722 _ctx = _context._context or _context.context() 

5723 tld = _ctx._thread_local_data 

5724 if tld.is_eager: 

5725 try: 

5726 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5727 _ctx, "StagePeek", name, index, "capacity", capacity, "memory_limit", 

5728 memory_limit, "dtypes", dtypes, "container", container, "shared_name", 

5729 shared_name) 

5730 return _result 

5731 except _core._NotOkStatusException as e: 

5732 _ops.raise_from_not_ok_status(e, name) 

5733 except _core._FallbackException: 

5734 pass 

5735 try: 

5736 return stage_peek_eager_fallback( 

5737 index, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, 

5738 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

5739 except _core._SymbolicException: 

5740 pass # Add nodes to the TensorFlow graph. 

5741 # Add nodes to the TensorFlow graph. 

5742 if not isinstance(dtypes, (list, tuple)): 

5743 raise TypeError( 

5744 "Expected list for 'dtypes' argument to " 

5745 "'stage_peek' Op, not %r." % dtypes) 

5746 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

5747 if capacity is None: 

5748 capacity = 0 

5749 capacity = _execute.make_int(capacity, "capacity") 

5750 if memory_limit is None: 

5751 memory_limit = 0 

5752 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

5753 if container is None: 

5754 container = "" 

5755 container = _execute.make_str(container, "container") 

5756 if shared_name is None: 

5757 shared_name = "" 

5758 shared_name = _execute.make_str(shared_name, "shared_name") 

5759 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5760 "StagePeek", index=index, dtypes=dtypes, capacity=capacity, 

5761 memory_limit=memory_limit, container=container, 

5762 shared_name=shared_name, name=name) 

5763 _result = _outputs[:] 

5764 if not _result: 

5765 return _op 

5766 if _execute.must_record_gradient(): 

5767 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

5768 _op._get_attr_int("memory_limit"), "dtypes", 

5769 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

5770 "shared_name", _op.get_attr("shared_name")) 

5771 _inputs_flat = _op.inputs 

5772 _execute.record_gradient( 

5773 "StagePeek", _inputs_flat, _attrs, _result) 

5774 return _result 

5775 

5776StagePeek = tf_export("raw_ops.StagePeek")(_ops.to_raw_op(stage_peek)) 

5777 

5778 

5779def stage_peek_eager_fallback(index, dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

5780 if not isinstance(dtypes, (list, tuple)): 

5781 raise TypeError( 

5782 "Expected list for 'dtypes' argument to " 

5783 "'stage_peek' Op, not %r." % dtypes) 

5784 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

5785 if capacity is None: 

5786 capacity = 0 

5787 capacity = _execute.make_int(capacity, "capacity") 

5788 if memory_limit is None: 

5789 memory_limit = 0 

5790 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

5791 if container is None: 

5792 container = "" 

5793 container = _execute.make_str(container, "container") 

5794 if shared_name is None: 

5795 shared_name = "" 

5796 shared_name = _execute.make_str(shared_name, "shared_name") 

5797 index = _ops.convert_to_tensor(index, _dtypes.int32) 

5798 _inputs_flat = [index] 

5799 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

5800 dtypes, "container", container, "shared_name", shared_name) 

5801 _result = _execute.execute(b"StagePeek", len(dtypes), inputs=_inputs_flat, 

5802 attrs=_attrs, ctx=ctx, name=name) 

5803 if _execute.must_record_gradient(): 

5804 _execute.record_gradient( 

5805 "StagePeek", _inputs_flat, _attrs, _result) 

5806 return _result 

5807 

5808 

5809def stage_size(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

5810 r"""Op returns the number of elements in the underlying container. 

5811 

5812 Args: 

5813 dtypes: A list of `tf.DTypes`. 

5814 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

5815 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

5816 container: An optional `string`. Defaults to `""`. 

5817 shared_name: An optional `string`. Defaults to `""`. 

5818 name: A name for the operation (optional). 

5819 

5820 Returns: 

5821 A `Tensor` of type `int32`. 

5822 """ 

5823 _ctx = _context._context or _context.context() 

5824 tld = _ctx._thread_local_data 

5825 if tld.is_eager: 

5826 try: 

5827 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5828 _ctx, "StageSize", name, "capacity", capacity, "memory_limit", 

5829 memory_limit, "dtypes", dtypes, "container", container, "shared_name", 

5830 shared_name) 

5831 return _result 

5832 except _core._NotOkStatusException as e: 

5833 _ops.raise_from_not_ok_status(e, name) 

5834 except _core._FallbackException: 

5835 pass 

5836 try: 

5837 return stage_size_eager_fallback( 

5838 capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, 

5839 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

5840 except _core._SymbolicException: 

5841 pass # Add nodes to the TensorFlow graph. 

5842 # Add nodes to the TensorFlow graph. 

5843 if not isinstance(dtypes, (list, tuple)): 

5844 raise TypeError( 

5845 "Expected list for 'dtypes' argument to " 

5846 "'stage_size' Op, not %r." % dtypes) 

5847 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

5848 if capacity is None: 

5849 capacity = 0 

5850 capacity = _execute.make_int(capacity, "capacity") 

5851 if memory_limit is None: 

5852 memory_limit = 0 

5853 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

5854 if container is None: 

5855 container = "" 

5856 container = _execute.make_str(container, "container") 

5857 if shared_name is None: 

5858 shared_name = "" 

5859 shared_name = _execute.make_str(shared_name, "shared_name") 

5860 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5861 "StageSize", dtypes=dtypes, capacity=capacity, 

5862 memory_limit=memory_limit, container=container, 

5863 shared_name=shared_name, name=name) 

5864 _result = _outputs[:] 

5865 if _execute.must_record_gradient(): 

5866 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

5867 _op._get_attr_int("memory_limit"), "dtypes", 

5868 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

5869 "shared_name", _op.get_attr("shared_name")) 

5870 _inputs_flat = _op.inputs 

5871 _execute.record_gradient( 

5872 "StageSize", _inputs_flat, _attrs, _result) 

5873 _result, = _result 

5874 return _result 

5875 

5876StageSize = tf_export("raw_ops.StageSize")(_ops.to_raw_op(stage_size)) 

5877 

5878 

5879def stage_size_eager_fallback(dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

5880 if not isinstance(dtypes, (list, tuple)): 

5881 raise TypeError( 

5882 "Expected list for 'dtypes' argument to " 

5883 "'stage_size' Op, not %r." % dtypes) 

5884 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

5885 if capacity is None: 

5886 capacity = 0 

5887 capacity = _execute.make_int(capacity, "capacity") 

5888 if memory_limit is None: 

5889 memory_limit = 0 

5890 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

5891 if container is None: 

5892 container = "" 

5893 container = _execute.make_str(container, "container") 

5894 if shared_name is None: 

5895 shared_name = "" 

5896 shared_name = _execute.make_str(shared_name, "shared_name") 

5897 _inputs_flat = [] 

5898 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

5899 dtypes, "container", container, "shared_name", shared_name) 

5900 _result = _execute.execute(b"StageSize", 1, inputs=_inputs_flat, 

5901 attrs=_attrs, ctx=ctx, name=name) 

5902 if _execute.must_record_gradient(): 

5903 _execute.record_gradient( 

5904 "StageSize", _inputs_flat, _attrs, _result) 

5905 _result, = _result 

5906 return _result 

5907 

5908 

5909def tensor_array(size, dtype, dynamic_size=False, clear_after_read=True, tensor_array_name="", element_shape=None, name=None): 

5910 r"""TODO: add doc. 

5911 

5912 Args: 

5913 size: A `Tensor` of type `int32`. 

5914 dtype: A `tf.DType`. 

5915 dynamic_size: An optional `bool`. Defaults to `False`. 

5916 clear_after_read: An optional `bool`. Defaults to `True`. 

5917 tensor_array_name: An optional `string`. Defaults to `""`. 

5918 element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. 

5919 name: A name for the operation (optional). 

5920 

5921 Returns: 

5922 A `Tensor` of type mutable `string`. 

5923 """ 

5924 _ctx = _context._context or _context.context() 

5925 tld = _ctx._thread_local_data 

5926 if tld.is_eager: 

5927 raise RuntimeError("tensor_array op does not support eager execution. Arg 'handle' is a ref.") 

5928 # Add nodes to the TensorFlow graph. 

5929 dtype = _execute.make_type(dtype, "dtype") 

5930 if dynamic_size is None: 

5931 dynamic_size = False 

5932 dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size") 

5933 if clear_after_read is None: 

5934 clear_after_read = True 

5935 clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read") 

5936 if tensor_array_name is None: 

5937 tensor_array_name = "" 

5938 tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name") 

5939 if element_shape is None: 

5940 element_shape = None 

5941 element_shape = _execute.make_shape(element_shape, "element_shape") 

5942 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5943 "TensorArray", size=size, dtype=dtype, dynamic_size=dynamic_size, 

5944 clear_after_read=clear_after_read, 

5945 tensor_array_name=tensor_array_name, 

5946 element_shape=element_shape, name=name) 

5947 _result = _outputs[:] 

5948 if _execute.must_record_gradient(): 

5949 _attrs = ("dtype", _op._get_attr_type("dtype"), "dynamic_size", 

5950 _op._get_attr_bool("dynamic_size"), "clear_after_read", 

5951 _op._get_attr_bool("clear_after_read"), "tensor_array_name", 

5952 _op.get_attr("tensor_array_name"), "element_shape", 

5953 _op.get_attr("element_shape")) 

5954 _inputs_flat = _op.inputs 

5955 _execute.record_gradient( 

5956 "TensorArray", _inputs_flat, _attrs, _result) 

5957 _result, = _result 

5958 return _result 

5959 

5960TensorArray = tf_export("raw_ops.TensorArray")(_ops.to_raw_op(tensor_array)) 

5961 

5962 

5963def tensor_array_eager_fallback(size, dtype, dynamic_size, clear_after_read, tensor_array_name, element_shape, name, ctx): 

5964 raise RuntimeError("tensor_array op does not support eager execution. Arg 'handle' is a ref.") 

5965 

5966def tensor_array_close(handle, name=None): 

5967 r"""TODO: add doc. 

5968 

5969 Args: 

5970 handle: A `Tensor` of type mutable `string`. 

5971 name: A name for the operation (optional). 

5972 

5973 Returns: 

5974 The created Operation. 

5975 """ 

5976 _ctx = _context._context or _context.context() 

5977 tld = _ctx._thread_local_data 

5978 if tld.is_eager: 

5979 raise RuntimeError("tensor_array_close op does not support eager execution. Arg 'handle' is a ref.") 

5980 # Add nodes to the TensorFlow graph. 

5981 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5982 "TensorArrayClose", handle=handle, name=name) 

5983 return _op 

5984TensorArrayClose = tf_export("raw_ops.TensorArrayClose")(_ops.to_raw_op(tensor_array_close)) 

5985 

5986 

5987def tensor_array_close_eager_fallback(handle, name, ctx): 

5988 raise RuntimeError("tensor_array_close op does not support eager execution. Arg 'handle' is a ref.") 

5989 

5990def tensor_array_close_v2(handle, name=None): 

5991 r"""Deprecated. Use TensorArrayCloseV3 

5992 

5993 Args: 

5994 handle: A `Tensor` of type `string`. 

5995 name: A name for the operation (optional). 

5996 

5997 Returns: 

5998 The created Operation. 

5999 """ 

6000 _ctx = _context._context or _context.context() 

6001 tld = _ctx._thread_local_data 

6002 if tld.is_eager: 

6003 try: 

6004 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6005 _ctx, "TensorArrayCloseV2", name, handle) 

6006 return _result 

6007 except _core._NotOkStatusException as e: 

6008 _ops.raise_from_not_ok_status(e, name) 

6009 except _core._FallbackException: 

6010 pass 

6011 try: 

6012 return tensor_array_close_v2_eager_fallback( 

6013 handle, name=name, ctx=_ctx) 

6014 except _core._SymbolicException: 

6015 pass # Add nodes to the TensorFlow graph. 

6016 # Add nodes to the TensorFlow graph. 

6017 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6018 "TensorArrayCloseV2", handle=handle, name=name) 

6019 return _op 

6020TensorArrayCloseV2 = tf_export("raw_ops.TensorArrayCloseV2")(_ops.to_raw_op(tensor_array_close_v2)) 

6021 

6022 

6023def tensor_array_close_v2_eager_fallback(handle, name, ctx): 

6024 handle = _ops.convert_to_tensor(handle, _dtypes.string) 

6025 _inputs_flat = [handle] 

6026 _attrs = None 

6027 _result = _execute.execute(b"TensorArrayCloseV2", 0, inputs=_inputs_flat, 

6028 attrs=_attrs, ctx=ctx, name=name) 

6029 _result = None 

6030 return _result 

6031 

6032 

6033def tensor_array_close_v3(handle, name=None): 

6034 r"""Delete the TensorArray from its resource container. 

6035 

6036 This enables the user to close and release the resource in the middle 

6037 of a step/run. 

6038 

6039 Args: 

6040 handle: A `Tensor` of type `resource`. 

6041 The handle to a TensorArray (output of TensorArray or TensorArrayGrad). 

6042 name: A name for the operation (optional). 

6043 

6044 Returns: 

6045 The created Operation. 

6046 """ 

6047 _ctx = _context._context or _context.context() 

6048 tld = _ctx._thread_local_data 

6049 if tld.is_eager: 

6050 try: 

6051 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6052 _ctx, "TensorArrayCloseV3", name, handle) 

6053 return _result 

6054 except _core._NotOkStatusException as e: 

6055 _ops.raise_from_not_ok_status(e, name) 

6056 except _core._FallbackException: 

6057 pass 

6058 try: 

6059 return tensor_array_close_v3_eager_fallback( 

6060 handle, name=name, ctx=_ctx) 

6061 except _core._SymbolicException: 

6062 pass # Add nodes to the TensorFlow graph. 

6063 # Add nodes to the TensorFlow graph. 

6064 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6065 "TensorArrayCloseV3", handle=handle, name=name) 

6066 return _op 

6067TensorArrayCloseV3 = tf_export("raw_ops.TensorArrayCloseV3")(_ops.to_raw_op(tensor_array_close_v3)) 

6068 

6069 

6070def tensor_array_close_v3_eager_fallback(handle, name, ctx): 

6071 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

6072 _inputs_flat = [handle] 

6073 _attrs = None 

6074 _result = _execute.execute(b"TensorArrayCloseV3", 0, inputs=_inputs_flat, 

6075 attrs=_attrs, ctx=ctx, name=name) 

6076 _result = None 

6077 return _result 

6078 

6079_TensorArrayConcatOutput = collections.namedtuple( 

6080 "TensorArrayConcat", 

6081 ["value", "lengths"]) 

6082 

6083 

6084def tensor_array_concat(handle, flow_in, dtype, element_shape_except0=None, name=None): 

6085 r"""TODO: add doc. 

6086 

6087 Args: 

6088 handle: A `Tensor` of type mutable `string`. 

6089 flow_in: A `Tensor` of type `float32`. 

6090 dtype: A `tf.DType`. 

6091 element_shape_except0: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. 

6092 name: A name for the operation (optional). 

6093 

6094 Returns: 

6095 A tuple of `Tensor` objects (value, lengths). 

6096 

6097 value: A `Tensor` of type `dtype`. 

6098 lengths: A `Tensor` of type `int64`. 

6099 """ 

6100 _ctx = _context._context or _context.context() 

6101 tld = _ctx._thread_local_data 

6102 if tld.is_eager: 

6103 raise RuntimeError("tensor_array_concat op does not support eager execution. Arg 'handle' is a ref.") 

6104 # Add nodes to the TensorFlow graph. 

6105 dtype = _execute.make_type(dtype, "dtype") 

6106 if element_shape_except0 is None: 

6107 element_shape_except0 = None 

6108 element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0") 

6109 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6110 "TensorArrayConcat", handle=handle, flow_in=flow_in, dtype=dtype, 

6111 element_shape_except0=element_shape_except0, 

6112 name=name) 

6113 _result = _outputs[:] 

6114 if _execute.must_record_gradient(): 

6115 _attrs = ("dtype", _op._get_attr_type("dtype"), "element_shape_except0", 

6116 _op.get_attr("element_shape_except0")) 

6117 _inputs_flat = _op.inputs 

6118 _execute.record_gradient( 

6119 "TensorArrayConcat", _inputs_flat, _attrs, _result) 

6120 _result = _TensorArrayConcatOutput._make(_result) 

6121 return _result 

6122 

6123TensorArrayConcat = tf_export("raw_ops.TensorArrayConcat")(_ops.to_raw_op(tensor_array_concat)) 

6124 

6125 

6126def tensor_array_concat_eager_fallback(handle, flow_in, dtype, element_shape_except0, name, ctx): 

6127 raise RuntimeError("tensor_array_concat op does not support eager execution. Arg 'handle' is a ref.") 

6128_TensorArrayConcatV2Output = collections.namedtuple( 

6129 "TensorArrayConcatV2", 

6130 ["value", "lengths"]) 

6131 

6132 

6133def tensor_array_concat_v2(handle, flow_in, dtype, element_shape_except0=None, name=None): 

6134 r"""Deprecated. Use TensorArrayConcatV3 

6135 

6136 Args: 

6137 handle: A `Tensor` of type `string`. 

6138 flow_in: A `Tensor` of type `float32`. 

6139 dtype: A `tf.DType`. 

6140 element_shape_except0: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. 

6141 name: A name for the operation (optional). 

6142 

6143 Returns: 

6144 A tuple of `Tensor` objects (value, lengths). 

6145 

6146 value: A `Tensor` of type `dtype`. 

6147 lengths: A `Tensor` of type `int64`. 

6148 """ 

6149 _ctx = _context._context or _context.context() 

6150 tld = _ctx._thread_local_data 

6151 if tld.is_eager: 

6152 try: 

6153 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6154 _ctx, "TensorArrayConcatV2", name, handle, flow_in, "dtype", dtype, 

6155 "element_shape_except0", element_shape_except0) 

6156 _result = _TensorArrayConcatV2Output._make(_result) 

6157 return _result 

6158 except _core._NotOkStatusException as e: 

6159 _ops.raise_from_not_ok_status(e, name) 

6160 except _core._FallbackException: 

6161 pass 

6162 try: 

6163 return tensor_array_concat_v2_eager_fallback( 

6164 handle, flow_in, dtype=dtype, 

6165 element_shape_except0=element_shape_except0, name=name, ctx=_ctx) 

6166 except _core._SymbolicException: 

6167 pass # Add nodes to the TensorFlow graph. 

6168 # Add nodes to the TensorFlow graph. 

6169 dtype = _execute.make_type(dtype, "dtype") 

6170 if element_shape_except0 is None: 

6171 element_shape_except0 = None 

6172 element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0") 

6173 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6174 "TensorArrayConcatV2", handle=handle, flow_in=flow_in, dtype=dtype, 

6175 element_shape_except0=element_shape_except0, 

6176 name=name) 

6177 _result = _outputs[:] 

6178 if _execute.must_record_gradient(): 

6179 _attrs = ("dtype", _op._get_attr_type("dtype"), "element_shape_except0", 

6180 _op.get_attr("element_shape_except0")) 

6181 _inputs_flat = _op.inputs 

6182 _execute.record_gradient( 

6183 "TensorArrayConcatV2", _inputs_flat, _attrs, _result) 

6184 _result = _TensorArrayConcatV2Output._make(_result) 

6185 return _result 

6186 

6187TensorArrayConcatV2 = tf_export("raw_ops.TensorArrayConcatV2")(_ops.to_raw_op(tensor_array_concat_v2)) 

6188 

6189 

6190def tensor_array_concat_v2_eager_fallback(handle, flow_in, dtype, element_shape_except0, name, ctx): 

6191 dtype = _execute.make_type(dtype, "dtype") 

6192 if element_shape_except0 is None: 

6193 element_shape_except0 = None 

6194 element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0") 

6195 handle = _ops.convert_to_tensor(handle, _dtypes.string) 

6196 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

6197 _inputs_flat = [handle, flow_in] 

6198 _attrs = ("dtype", dtype, "element_shape_except0", element_shape_except0) 

6199 _result = _execute.execute(b"TensorArrayConcatV2", 2, inputs=_inputs_flat, 

6200 attrs=_attrs, ctx=ctx, name=name) 

6201 if _execute.must_record_gradient(): 

6202 _execute.record_gradient( 

6203 "TensorArrayConcatV2", _inputs_flat, _attrs, _result) 

6204 _result = _TensorArrayConcatV2Output._make(_result) 

6205 return _result 

6206 

6207_TensorArrayConcatV3Output = collections.namedtuple( 

6208 "TensorArrayConcatV3", 

6209 ["value", "lengths"]) 

6210 

6211 

6212def tensor_array_concat_v3(handle, flow_in, dtype, element_shape_except0=None, name=None): 

6213 r"""Concat the elements from the TensorArray into value `value`. 

6214 

6215 Takes `T` elements of shapes 

6216 

6217 ``` 

6218 (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) 

6219 ``` 

6220 

6221 and concatenates them into a Tensor of shape: 

6222 

6223 ``` 

6224 (n0 + n1 + ... + n(T-1) x d0 x d1 x ...) 

6225 ``` 

6226 

6227 All elements must have the same shape (excepting the first dimension). 

6228 

6229 Args: 

6230 handle: A `Tensor` of type `resource`. The handle to a TensorArray. 

6231 flow_in: A `Tensor` of type `float32`. 

6232 A float scalar that enforces proper chaining of operations. 

6233 dtype: A `tf.DType`. The type of the elem that is returned. 

6234 element_shape_except0: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. 

6235 The expected shape of an element, if known, 

6236 excluding the first dimension. Used to validate the shapes of 

6237 TensorArray elements. If this shape is not fully specified, concatenating 

6238 zero-size TensorArrays is an error. 

6239 name: A name for the operation (optional). 

6240 

6241 Returns: 

6242 A tuple of `Tensor` objects (value, lengths). 

6243 

6244 value: A `Tensor` of type `dtype`. 

6245 lengths: A `Tensor` of type `int64`. 

6246 """ 

6247 _ctx = _context._context or _context.context() 

6248 tld = _ctx._thread_local_data 

6249 if tld.is_eager: 

6250 try: 

6251 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6252 _ctx, "TensorArrayConcatV3", name, handle, flow_in, "dtype", dtype, 

6253 "element_shape_except0", element_shape_except0) 

6254 _result = _TensorArrayConcatV3Output._make(_result) 

6255 return _result 

6256 except _core._NotOkStatusException as e: 

6257 _ops.raise_from_not_ok_status(e, name) 

6258 except _core._FallbackException: 

6259 pass 

6260 try: 

6261 return tensor_array_concat_v3_eager_fallback( 

6262 handle, flow_in, dtype=dtype, 

6263 element_shape_except0=element_shape_except0, name=name, ctx=_ctx) 

6264 except _core._SymbolicException: 

6265 pass # Add nodes to the TensorFlow graph. 

6266 # Add nodes to the TensorFlow graph. 

6267 dtype = _execute.make_type(dtype, "dtype") 

6268 if element_shape_except0 is None: 

6269 element_shape_except0 = None 

6270 element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0") 

6271 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6272 "TensorArrayConcatV3", handle=handle, flow_in=flow_in, dtype=dtype, 

6273 element_shape_except0=element_shape_except0, 

6274 name=name) 

6275 _result = _outputs[:] 

6276 if _execute.must_record_gradient(): 

6277 _attrs = ("dtype", _op._get_attr_type("dtype"), "element_shape_except0", 

6278 _op.get_attr("element_shape_except0")) 

6279 _inputs_flat = _op.inputs 

6280 _execute.record_gradient( 

6281 "TensorArrayConcatV3", _inputs_flat, _attrs, _result) 

6282 _result = _TensorArrayConcatV3Output._make(_result) 

6283 return _result 

6284 

6285TensorArrayConcatV3 = tf_export("raw_ops.TensorArrayConcatV3")(_ops.to_raw_op(tensor_array_concat_v3)) 

6286 

6287 

6288def tensor_array_concat_v3_eager_fallback(handle, flow_in, dtype, element_shape_except0, name, ctx): 

6289 dtype = _execute.make_type(dtype, "dtype") 

6290 if element_shape_except0 is None: 

6291 element_shape_except0 = None 

6292 element_shape_except0 = _execute.make_shape(element_shape_except0, "element_shape_except0") 

6293 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

6294 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

6295 _inputs_flat = [handle, flow_in] 

6296 _attrs = ("dtype", dtype, "element_shape_except0", element_shape_except0) 

6297 _result = _execute.execute(b"TensorArrayConcatV3", 2, inputs=_inputs_flat, 

6298 attrs=_attrs, ctx=ctx, name=name) 

6299 if _execute.must_record_gradient(): 

6300 _execute.record_gradient( 

6301 "TensorArrayConcatV3", _inputs_flat, _attrs, _result) 

6302 _result = _TensorArrayConcatV3Output._make(_result) 

6303 return _result 

6304 

6305 

6306def tensor_array_gather(handle, indices, flow_in, dtype, element_shape=None, name=None): 

6307 r"""TODO: add doc. 

6308 

6309 Args: 

6310 handle: A `Tensor` of type mutable `string`. 

6311 indices: A `Tensor` of type `int32`. 

6312 flow_in: A `Tensor` of type `float32`. 

6313 dtype: A `tf.DType`. 

6314 element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. 

6315 name: A name for the operation (optional). 

6316 

6317 Returns: 

6318 A `Tensor` of type `dtype`. 

6319 """ 

6320 _ctx = _context._context or _context.context() 

6321 tld = _ctx._thread_local_data 

6322 if tld.is_eager: 

6323 raise RuntimeError("tensor_array_gather op does not support eager execution. Arg 'handle' is a ref.") 

6324 # Add nodes to the TensorFlow graph. 

6325 dtype = _execute.make_type(dtype, "dtype") 

6326 if element_shape is None: 

6327 element_shape = None 

6328 element_shape = _execute.make_shape(element_shape, "element_shape") 

6329 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6330 "TensorArrayGather", handle=handle, indices=indices, flow_in=flow_in, 

6331 dtype=dtype, element_shape=element_shape, 

6332 name=name) 

6333 _result = _outputs[:] 

6334 if _execute.must_record_gradient(): 

6335 _attrs = ("dtype", _op._get_attr_type("dtype"), "element_shape", 

6336 _op.get_attr("element_shape")) 

6337 _inputs_flat = _op.inputs 

6338 _execute.record_gradient( 

6339 "TensorArrayGather", _inputs_flat, _attrs, _result) 

6340 _result, = _result 

6341 return _result 

6342 

6343TensorArrayGather = tf_export("raw_ops.TensorArrayGather")(_ops.to_raw_op(tensor_array_gather)) 

6344 

6345 

6346def tensor_array_gather_eager_fallback(handle, indices, flow_in, dtype, element_shape, name, ctx): 

6347 raise RuntimeError("tensor_array_gather op does not support eager execution. Arg 'handle' is a ref.") 

6348 

6349def tensor_array_gather_v2(handle, indices, flow_in, dtype, element_shape=None, name=None): 

6350 r"""Deprecated. Use TensorArrayGatherV3 

6351 

6352 Args: 

6353 handle: A `Tensor` of type `string`. 

6354 indices: A `Tensor` of type `int32`. 

6355 flow_in: A `Tensor` of type `float32`. 

6356 dtype: A `tf.DType`. 

6357 element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. 

6358 name: A name for the operation (optional). 

6359 

6360 Returns: 

6361 A `Tensor` of type `dtype`. 

6362 """ 

6363 _ctx = _context._context or _context.context() 

6364 tld = _ctx._thread_local_data 

6365 if tld.is_eager: 

6366 try: 

6367 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6368 _ctx, "TensorArrayGatherV2", name, handle, indices, flow_in, "dtype", 

6369 dtype, "element_shape", element_shape) 

6370 return _result 

6371 except _core._NotOkStatusException as e: 

6372 _ops.raise_from_not_ok_status(e, name) 

6373 except _core._FallbackException: 

6374 pass 

6375 try: 

6376 return tensor_array_gather_v2_eager_fallback( 

6377 handle, indices, flow_in, dtype=dtype, element_shape=element_shape, 

6378 name=name, ctx=_ctx) 

6379 except _core._SymbolicException: 

6380 pass # Add nodes to the TensorFlow graph. 

6381 # Add nodes to the TensorFlow graph. 

6382 dtype = _execute.make_type(dtype, "dtype") 

6383 if element_shape is None: 

6384 element_shape = None 

6385 element_shape = _execute.make_shape(element_shape, "element_shape") 

6386 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6387 "TensorArrayGatherV2", handle=handle, indices=indices, 

6388 flow_in=flow_in, dtype=dtype, 

6389 element_shape=element_shape, name=name) 

6390 _result = _outputs[:] 

6391 if _execute.must_record_gradient(): 

6392 _attrs = ("dtype", _op._get_attr_type("dtype"), "element_shape", 

6393 _op.get_attr("element_shape")) 

6394 _inputs_flat = _op.inputs 

6395 _execute.record_gradient( 

6396 "TensorArrayGatherV2", _inputs_flat, _attrs, _result) 

6397 _result, = _result 

6398 return _result 

6399 

6400TensorArrayGatherV2 = tf_export("raw_ops.TensorArrayGatherV2")(_ops.to_raw_op(tensor_array_gather_v2)) 

6401 

6402 

6403def tensor_array_gather_v2_eager_fallback(handle, indices, flow_in, dtype, element_shape, name, ctx): 

6404 dtype = _execute.make_type(dtype, "dtype") 

6405 if element_shape is None: 

6406 element_shape = None 

6407 element_shape = _execute.make_shape(element_shape, "element_shape") 

6408 handle = _ops.convert_to_tensor(handle, _dtypes.string) 

6409 indices = _ops.convert_to_tensor(indices, _dtypes.int32) 

6410 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

6411 _inputs_flat = [handle, indices, flow_in] 

6412 _attrs = ("dtype", dtype, "element_shape", element_shape) 

6413 _result = _execute.execute(b"TensorArrayGatherV2", 1, inputs=_inputs_flat, 

6414 attrs=_attrs, ctx=ctx, name=name) 

6415 if _execute.must_record_gradient(): 

6416 _execute.record_gradient( 

6417 "TensorArrayGatherV2", _inputs_flat, _attrs, _result) 

6418 _result, = _result 

6419 return _result 

6420 

6421 

6422def tensor_array_gather_v3(handle, indices, flow_in, dtype, element_shape=None, name=None): 

6423 r"""Gather specific elements from the TensorArray into output `value`. 

6424 

6425 All elements selected by `indices` must have the same shape. 

6426 

6427 Args: 

6428 handle: A `Tensor` of type `resource`. The handle to a TensorArray. 

6429 indices: A `Tensor` of type `int32`. 

6430 The locations in the TensorArray from which to read tensor elements. 

6431 flow_in: A `Tensor` of type `float32`. 

6432 A float scalar that enforces proper chaining of operations. 

6433 dtype: A `tf.DType`. The type of the elem that is returned. 

6434 element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. 

6435 The expected shape of an element, if known. Used to 

6436 validate the shapes of TensorArray elements. If this shape is not 

6437 fully specified, gathering zero-size TensorArrays is an error. 

6438 name: A name for the operation (optional). 

6439 

6440 Returns: 

6441 A `Tensor` of type `dtype`. 

6442 """ 

6443 _ctx = _context._context or _context.context() 

6444 tld = _ctx._thread_local_data 

6445 if tld.is_eager: 

6446 try: 

6447 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6448 _ctx, "TensorArrayGatherV3", name, handle, indices, flow_in, "dtype", 

6449 dtype, "element_shape", element_shape) 

6450 return _result 

6451 except _core._NotOkStatusException as e: 

6452 _ops.raise_from_not_ok_status(e, name) 

6453 except _core._FallbackException: 

6454 pass 

6455 try: 

6456 return tensor_array_gather_v3_eager_fallback( 

6457 handle, indices, flow_in, dtype=dtype, element_shape=element_shape, 

6458 name=name, ctx=_ctx) 

6459 except _core._SymbolicException: 

6460 pass # Add nodes to the TensorFlow graph. 

6461 # Add nodes to the TensorFlow graph. 

6462 dtype = _execute.make_type(dtype, "dtype") 

6463 if element_shape is None: 

6464 element_shape = None 

6465 element_shape = _execute.make_shape(element_shape, "element_shape") 

6466 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6467 "TensorArrayGatherV3", handle=handle, indices=indices, 

6468 flow_in=flow_in, dtype=dtype, 

6469 element_shape=element_shape, name=name) 

6470 _result = _outputs[:] 

6471 if _execute.must_record_gradient(): 

6472 _attrs = ("dtype", _op._get_attr_type("dtype"), "element_shape", 

6473 _op.get_attr("element_shape")) 

6474 _inputs_flat = _op.inputs 

6475 _execute.record_gradient( 

6476 "TensorArrayGatherV3", _inputs_flat, _attrs, _result) 

6477 _result, = _result 

6478 return _result 

6479 

6480TensorArrayGatherV3 = tf_export("raw_ops.TensorArrayGatherV3")(_ops.to_raw_op(tensor_array_gather_v3)) 

6481 

6482 

6483def tensor_array_gather_v3_eager_fallback(handle, indices, flow_in, dtype, element_shape, name, ctx): 

6484 dtype = _execute.make_type(dtype, "dtype") 

6485 if element_shape is None: 

6486 element_shape = None 

6487 element_shape = _execute.make_shape(element_shape, "element_shape") 

6488 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

6489 indices = _ops.convert_to_tensor(indices, _dtypes.int32) 

6490 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

6491 _inputs_flat = [handle, indices, flow_in] 

6492 _attrs = ("dtype", dtype, "element_shape", element_shape) 

6493 _result = _execute.execute(b"TensorArrayGatherV3", 1, inputs=_inputs_flat, 

6494 attrs=_attrs, ctx=ctx, name=name) 

6495 if _execute.must_record_gradient(): 

6496 _execute.record_gradient( 

6497 "TensorArrayGatherV3", _inputs_flat, _attrs, _result) 

6498 _result, = _result 

6499 return _result 

6500 

6501 

6502def tensor_array_grad(handle, flow_in, source, name=None): 

6503 r"""TODO: add doc. 

6504 

6505 Args: 

6506 handle: A `Tensor` of type `string`. 

6507 flow_in: A `Tensor` of type `float32`. 

6508 source: A `string`. 

6509 name: A name for the operation (optional). 

6510 

6511 Returns: 

6512 A `Tensor` of type mutable `string`. 

6513 """ 

6514 _ctx = _context._context or _context.context() 

6515 tld = _ctx._thread_local_data 

6516 if tld.is_eager: 

6517 raise RuntimeError("tensor_array_grad op does not support eager execution. Arg 'grad_handle' is a ref.") 

6518 # Add nodes to the TensorFlow graph. 

6519 source = _execute.make_str(source, "source") 

6520 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6521 "TensorArrayGrad", handle=handle, flow_in=flow_in, source=source, 

6522 name=name) 

6523 _result = _outputs[:] 

6524 if _execute.must_record_gradient(): 

6525 _attrs = ("source", _op.get_attr("source")) 

6526 _inputs_flat = _op.inputs 

6527 _execute.record_gradient( 

6528 "TensorArrayGrad", _inputs_flat, _attrs, _result) 

6529 _result, = _result 

6530 return _result 

6531 

6532TensorArrayGrad = tf_export("raw_ops.TensorArrayGrad")(_ops.to_raw_op(tensor_array_grad)) 

6533 

6534 

6535def tensor_array_grad_eager_fallback(handle, flow_in, source, name, ctx): 

6536 raise RuntimeError("tensor_array_grad op does not support eager execution. Arg 'grad_handle' is a ref.") 

6537 

6538def tensor_array_grad_v2(handle, flow_in, source, name=None): 

6539 r"""Deprecated. Use TensorArrayGradV3 

6540 

6541 Args: 

6542 handle: A `Tensor` of type `string`. 

6543 flow_in: A `Tensor` of type `float32`. 

6544 source: A `string`. 

6545 name: A name for the operation (optional). 

6546 

6547 Returns: 

6548 A `Tensor` of type `string`. 

6549 """ 

6550 _ctx = _context._context or _context.context() 

6551 tld = _ctx._thread_local_data 

6552 if tld.is_eager: 

6553 try: 

6554 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6555 _ctx, "TensorArrayGradV2", name, handle, flow_in, "source", source) 

6556 return _result 

6557 except _core._NotOkStatusException as e: 

6558 _ops.raise_from_not_ok_status(e, name) 

6559 except _core._FallbackException: 

6560 pass 

6561 try: 

6562 return tensor_array_grad_v2_eager_fallback( 

6563 handle, flow_in, source=source, name=name, ctx=_ctx) 

6564 except _core._SymbolicException: 

6565 pass # Add nodes to the TensorFlow graph. 

6566 # Add nodes to the TensorFlow graph. 

6567 source = _execute.make_str(source, "source") 

6568 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6569 "TensorArrayGradV2", handle=handle, flow_in=flow_in, source=source, 

6570 name=name) 

6571 _result = _outputs[:] 

6572 if _execute.must_record_gradient(): 

6573 _attrs = ("source", _op.get_attr("source")) 

6574 _inputs_flat = _op.inputs 

6575 _execute.record_gradient( 

6576 "TensorArrayGradV2", _inputs_flat, _attrs, _result) 

6577 _result, = _result 

6578 return _result 

6579 

6580TensorArrayGradV2 = tf_export("raw_ops.TensorArrayGradV2")(_ops.to_raw_op(tensor_array_grad_v2)) 

6581 

6582 

6583def tensor_array_grad_v2_eager_fallback(handle, flow_in, source, name, ctx): 

6584 source = _execute.make_str(source, "source") 

6585 handle = _ops.convert_to_tensor(handle, _dtypes.string) 

6586 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

6587 _inputs_flat = [handle, flow_in] 

6588 _attrs = ("source", source) 

6589 _result = _execute.execute(b"TensorArrayGradV2", 1, inputs=_inputs_flat, 

6590 attrs=_attrs, ctx=ctx, name=name) 

6591 if _execute.must_record_gradient(): 

6592 _execute.record_gradient( 

6593 "TensorArrayGradV2", _inputs_flat, _attrs, _result) 

6594 _result, = _result 

6595 return _result 

6596 

6597_TensorArrayGradV3Output = collections.namedtuple( 

6598 "TensorArrayGradV3", 

6599 ["grad_handle", "flow_out"]) 

6600 

6601 

6602def tensor_array_grad_v3(handle, flow_in, source, name=None): 

6603 r"""Creates a TensorArray for storing the gradients of values in the given handle. 

6604 

6605 If the given TensorArray gradient already exists, returns a reference to it. 

6606 

6607 Locks the size of the original TensorArray by disabling its dynamic size flag. 

6608 

6609 **A note about the input flow_in:** 

6610 

6611 The handle flow_in forces the execution of the gradient lookup to occur 

6612 only after certain other operations have occurred. For example, when 

6613 the forward TensorArray is dynamically sized, writes to this TensorArray 

6614 may resize the object. The gradient TensorArray is statically sized based 

6615 on the size of the forward TensorArray when this operation executes. 

6616 Furthermore, the size of the forward TensorArray is frozen by this call. 

6617 As a result, the flow is used to ensure that the call to generate the gradient 

6618 TensorArray only happens after all writes are executed. 

6619 

6620 In the case of dynamically sized TensorArrays, gradient computation should 

6621 only be performed on read operations that have themselves been chained via 

6622 flow to occur only after all writes have executed. That way the final size 

6623 of the forward TensorArray is known when this operation is called. 

6624 

6625 **A note about the source attribute:** 

6626 

6627 TensorArray gradient calls use an accumulator TensorArray object. If 

6628 multiple gradients are calculated and run in the same session, the multiple 

6629 gradient nodes may accidentally flow through the same accumulator TensorArray. 

6630 This double counts and generally breaks the TensorArray gradient flow. 

6631 

6632 The solution is to identify which gradient call this particular 

6633 TensorArray gradient is being called in. This is performed by identifying 

6634 a unique string (e.g. "gradients", "gradients_1", ...) from the input 

6635 gradient Tensor's name. This string is used as a suffix when creating 

6636 the TensorArray gradient object here (the attribute `source`). 

6637 

6638 The attribute `source` is added as a suffix to the forward TensorArray's 

6639 name when performing the creation / lookup, so that each separate gradient 

6640 calculation gets its own TensorArray accumulator. 

6641 

6642 Args: 

6643 handle: A `Tensor` of type `resource`. 

6644 The handle to the forward TensorArray. 

6645 flow_in: A `Tensor` of type `float32`. 

6646 A float scalar that enforces proper chaining of operations. 

6647 source: A `string`. 

6648 The gradient source string, used to decide which gradient TensorArray 

6649 to return. 

6650 name: A name for the operation (optional). 

6651 

6652 Returns: 

6653 A tuple of `Tensor` objects (grad_handle, flow_out). 

6654 

6655 grad_handle: A `Tensor` of type `resource`. 

6656 flow_out: A `Tensor` of type `float32`. 

6657 """ 

6658 _ctx = _context._context or _context.context() 

6659 tld = _ctx._thread_local_data 

6660 if tld.is_eager: 

6661 try: 

6662 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6663 _ctx, "TensorArrayGradV3", name, handle, flow_in, "source", source) 

6664 _result = _TensorArrayGradV3Output._make(_result) 

6665 return _result 

6666 except _core._NotOkStatusException as e: 

6667 _ops.raise_from_not_ok_status(e, name) 

6668 except _core._FallbackException: 

6669 pass 

6670 try: 

6671 return tensor_array_grad_v3_eager_fallback( 

6672 handle, flow_in, source=source, name=name, ctx=_ctx) 

6673 except _core._SymbolicException: 

6674 pass # Add nodes to the TensorFlow graph. 

6675 # Add nodes to the TensorFlow graph. 

6676 source = _execute.make_str(source, "source") 

6677 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6678 "TensorArrayGradV3", handle=handle, flow_in=flow_in, source=source, 

6679 name=name) 

6680 _result = _outputs[:] 

6681 if _execute.must_record_gradient(): 

6682 _attrs = ("source", _op.get_attr("source")) 

6683 _inputs_flat = _op.inputs 

6684 _execute.record_gradient( 

6685 "TensorArrayGradV3", _inputs_flat, _attrs, _result) 

6686 _result = _TensorArrayGradV3Output._make(_result) 

6687 return _result 

6688 

6689TensorArrayGradV3 = tf_export("raw_ops.TensorArrayGradV3")(_ops.to_raw_op(tensor_array_grad_v3)) 

6690 

6691 

6692def tensor_array_grad_v3_eager_fallback(handle, flow_in, source, name, ctx): 

6693 source = _execute.make_str(source, "source") 

6694 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

6695 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

6696 _inputs_flat = [handle, flow_in] 

6697 _attrs = ("source", source) 

6698 _result = _execute.execute(b"TensorArrayGradV3", 2, inputs=_inputs_flat, 

6699 attrs=_attrs, ctx=ctx, name=name) 

6700 if _execute.must_record_gradient(): 

6701 _execute.record_gradient( 

6702 "TensorArrayGradV3", _inputs_flat, _attrs, _result) 

6703 _result = _TensorArrayGradV3Output._make(_result) 

6704 return _result 

6705 

6706_TensorArrayGradWithShapeOutput = collections.namedtuple( 

6707 "TensorArrayGradWithShape", 

6708 ["grad_handle", "flow_out"]) 

6709 

6710 

6711def tensor_array_grad_with_shape(handle, flow_in, shape_to_prepend, source, name=None): 

6712 r"""Creates a TensorArray for storing multiple gradients of values in the given handle. 

6713 

6714 Similar to TensorArrayGradV3. However it creates an accumulator with an 

6715 expanded shape compared to the input TensorArray whose gradient is being 

6716 computed. This enables multiple gradients for the same TensorArray to be 

6717 calculated using the same accumulator. 

6718 

6719 Args: 

6720 handle: A `Tensor` of type `resource`. 

6721 The handle to the forward TensorArray. 

6722 flow_in: A `Tensor` of type `float32`. 

6723 A float scalar that enforces proper chaining of operations. 

6724 shape_to_prepend: A `Tensor` of type `int32`. 

6725 An int32 vector representing a shape. Elements in the gradient accumulator will 

6726 have shape which is this shape_to_prepend value concatenated with shape of the 

6727 elements in the TensorArray corresponding to the input handle. 

6728 source: A `string`. 

6729 The gradient source string, used to decide which gradient TensorArray 

6730 to return. 

6731 name: A name for the operation (optional). 

6732 

6733 Returns: 

6734 A tuple of `Tensor` objects (grad_handle, flow_out). 

6735 

6736 grad_handle: A `Tensor` of type `resource`. 

6737 flow_out: A `Tensor` of type `float32`. 

6738 """ 

6739 _ctx = _context._context or _context.context() 

6740 tld = _ctx._thread_local_data 

6741 if tld.is_eager: 

6742 try: 

6743 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6744 _ctx, "TensorArrayGradWithShape", name, handle, flow_in, 

6745 shape_to_prepend, "source", source) 

6746 _result = _TensorArrayGradWithShapeOutput._make(_result) 

6747 return _result 

6748 except _core._NotOkStatusException as e: 

6749 _ops.raise_from_not_ok_status(e, name) 

6750 except _core._FallbackException: 

6751 pass 

6752 try: 

6753 return tensor_array_grad_with_shape_eager_fallback( 

6754 handle, flow_in, shape_to_prepend, source=source, name=name, 

6755 ctx=_ctx) 

6756 except _core._SymbolicException: 

6757 pass # Add nodes to the TensorFlow graph. 

6758 # Add nodes to the TensorFlow graph. 

6759 source = _execute.make_str(source, "source") 

6760 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6761 "TensorArrayGradWithShape", handle=handle, flow_in=flow_in, 

6762 shape_to_prepend=shape_to_prepend, 

6763 source=source, name=name) 

6764 _result = _outputs[:] 

6765 if _execute.must_record_gradient(): 

6766 _attrs = ("source", _op.get_attr("source")) 

6767 _inputs_flat = _op.inputs 

6768 _execute.record_gradient( 

6769 "TensorArrayGradWithShape", _inputs_flat, _attrs, _result) 

6770 _result = _TensorArrayGradWithShapeOutput._make(_result) 

6771 return _result 

6772 

6773TensorArrayGradWithShape = tf_export("raw_ops.TensorArrayGradWithShape")(_ops.to_raw_op(tensor_array_grad_with_shape)) 

6774 

6775 

6776def tensor_array_grad_with_shape_eager_fallback(handle, flow_in, shape_to_prepend, source, name, ctx): 

6777 source = _execute.make_str(source, "source") 

6778 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

6779 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

6780 shape_to_prepend = _ops.convert_to_tensor(shape_to_prepend, _dtypes.int32) 

6781 _inputs_flat = [handle, flow_in, shape_to_prepend] 

6782 _attrs = ("source", source) 

6783 _result = _execute.execute(b"TensorArrayGradWithShape", 2, 

6784 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

6785 name=name) 

6786 if _execute.must_record_gradient(): 

6787 _execute.record_gradient( 

6788 "TensorArrayGradWithShape", _inputs_flat, _attrs, _result) 

6789 _result = _TensorArrayGradWithShapeOutput._make(_result) 

6790 return _result 

6791 

6792 

6793def tensor_array_pack(handle, flow_in, dtype, element_shape=None, name=None): 

6794 r"""TODO: add doc. 

6795 

6796 Args: 

6797 handle: A `Tensor` of type mutable `string`. 

6798 flow_in: A `Tensor` of type `float32`. 

6799 dtype: A `tf.DType`. 

6800 element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. 

6801 name: A name for the operation (optional). 

6802 

6803 Returns: 

6804 A `Tensor` of type `dtype`. 

6805 """ 

6806 _ctx = _context._context or _context.context() 

6807 tld = _ctx._thread_local_data 

6808 if tld.is_eager: 

6809 raise RuntimeError("tensor_array_pack op does not support eager execution. Arg 'handle' is a ref.") 

6810 # Add nodes to the TensorFlow graph. 

6811 dtype = _execute.make_type(dtype, "dtype") 

6812 if element_shape is None: 

6813 element_shape = None 

6814 element_shape = _execute.make_shape(element_shape, "element_shape") 

6815 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6816 "TensorArrayPack", handle=handle, flow_in=flow_in, dtype=dtype, 

6817 element_shape=element_shape, name=name) 

6818 _result = _outputs[:] 

6819 if _execute.must_record_gradient(): 

6820 _attrs = ("dtype", _op._get_attr_type("dtype"), "element_shape", 

6821 _op.get_attr("element_shape")) 

6822 _inputs_flat = _op.inputs 

6823 _execute.record_gradient( 

6824 "TensorArrayPack", _inputs_flat, _attrs, _result) 

6825 _result, = _result 

6826 return _result 

6827 

6828TensorArrayPack = tf_export("raw_ops.TensorArrayPack")(_ops.to_raw_op(tensor_array_pack)) 

6829 

6830 

6831def tensor_array_pack_eager_fallback(handle, flow_in, dtype, element_shape, name, ctx): 

6832 raise RuntimeError("tensor_array_pack op does not support eager execution. Arg 'handle' is a ref.") 

6833 

6834def tensor_array_read(handle, index, flow_in, dtype, name=None): 

6835 r"""TODO: add doc. 

6836 

6837 Args: 

6838 handle: A `Tensor` of type mutable `string`. 

6839 index: A `Tensor` of type `int32`. 

6840 flow_in: A `Tensor` of type `float32`. 

6841 dtype: A `tf.DType`. 

6842 name: A name for the operation (optional). 

6843 

6844 Returns: 

6845 A `Tensor` of type `dtype`. 

6846 """ 

6847 _ctx = _context._context or _context.context() 

6848 tld = _ctx._thread_local_data 

6849 if tld.is_eager: 

6850 raise RuntimeError("tensor_array_read op does not support eager execution. Arg 'handle' is a ref.") 

6851 # Add nodes to the TensorFlow graph. 

6852 dtype = _execute.make_type(dtype, "dtype") 

6853 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6854 "TensorArrayRead", handle=handle, index=index, flow_in=flow_in, 

6855 dtype=dtype, name=name) 

6856 _result = _outputs[:] 

6857 if _execute.must_record_gradient(): 

6858 _attrs = ("dtype", _op._get_attr_type("dtype")) 

6859 _inputs_flat = _op.inputs 

6860 _execute.record_gradient( 

6861 "TensorArrayRead", _inputs_flat, _attrs, _result) 

6862 _result, = _result 

6863 return _result 

6864 

6865TensorArrayRead = tf_export("raw_ops.TensorArrayRead")(_ops.to_raw_op(tensor_array_read)) 

6866 

6867 

6868def tensor_array_read_eager_fallback(handle, index, flow_in, dtype, name, ctx): 

6869 raise RuntimeError("tensor_array_read op does not support eager execution. Arg 'handle' is a ref.") 

6870 

6871def tensor_array_read_v2(handle, index, flow_in, dtype, name=None): 

6872 r"""Deprecated. Use TensorArrayReadV3 

6873 

6874 Args: 

6875 handle: A `Tensor` of type `string`. 

6876 index: A `Tensor` of type `int32`. 

6877 flow_in: A `Tensor` of type `float32`. 

6878 dtype: A `tf.DType`. 

6879 name: A name for the operation (optional). 

6880 

6881 Returns: 

6882 A `Tensor` of type `dtype`. 

6883 """ 

6884 _ctx = _context._context or _context.context() 

6885 tld = _ctx._thread_local_data 

6886 if tld.is_eager: 

6887 try: 

6888 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6889 _ctx, "TensorArrayReadV2", name, handle, index, flow_in, "dtype", 

6890 dtype) 

6891 return _result 

6892 except _core._NotOkStatusException as e: 

6893 _ops.raise_from_not_ok_status(e, name) 

6894 except _core._FallbackException: 

6895 pass 

6896 try: 

6897 return tensor_array_read_v2_eager_fallback( 

6898 handle, index, flow_in, dtype=dtype, name=name, ctx=_ctx) 

6899 except _core._SymbolicException: 

6900 pass # Add nodes to the TensorFlow graph. 

6901 # Add nodes to the TensorFlow graph. 

6902 dtype = _execute.make_type(dtype, "dtype") 

6903 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6904 "TensorArrayReadV2", handle=handle, index=index, flow_in=flow_in, 

6905 dtype=dtype, name=name) 

6906 _result = _outputs[:] 

6907 if _execute.must_record_gradient(): 

6908 _attrs = ("dtype", _op._get_attr_type("dtype")) 

6909 _inputs_flat = _op.inputs 

6910 _execute.record_gradient( 

6911 "TensorArrayReadV2", _inputs_flat, _attrs, _result) 

6912 _result, = _result 

6913 return _result 

6914 

6915TensorArrayReadV2 = tf_export("raw_ops.TensorArrayReadV2")(_ops.to_raw_op(tensor_array_read_v2)) 

6916 

6917 

6918def tensor_array_read_v2_eager_fallback(handle, index, flow_in, dtype, name, ctx): 

6919 dtype = _execute.make_type(dtype, "dtype") 

6920 handle = _ops.convert_to_tensor(handle, _dtypes.string) 

6921 index = _ops.convert_to_tensor(index, _dtypes.int32) 

6922 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

6923 _inputs_flat = [handle, index, flow_in] 

6924 _attrs = ("dtype", dtype) 

6925 _result = _execute.execute(b"TensorArrayReadV2", 1, inputs=_inputs_flat, 

6926 attrs=_attrs, ctx=ctx, name=name) 

6927 if _execute.must_record_gradient(): 

6928 _execute.record_gradient( 

6929 "TensorArrayReadV2", _inputs_flat, _attrs, _result) 

6930 _result, = _result 

6931 return _result 

6932 

6933 

6934def tensor_array_read_v3(handle, index, flow_in, dtype, name=None): 

6935 r"""Read an element from the TensorArray into output `value`. 

6936 

6937 Args: 

6938 handle: A `Tensor` of type `resource`. The handle to a TensorArray. 

6939 index: A `Tensor` of type `int32`. 

6940 flow_in: A `Tensor` of type `float32`. 

6941 A float scalar that enforces proper chaining of operations. 

6942 dtype: A `tf.DType`. The type of the elem that is returned. 

6943 name: A name for the operation (optional). 

6944 

6945 Returns: 

6946 A `Tensor` of type `dtype`. 

6947 """ 

6948 _ctx = _context._context or _context.context() 

6949 tld = _ctx._thread_local_data 

6950 if tld.is_eager: 

6951 try: 

6952 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6953 _ctx, "TensorArrayReadV3", name, handle, index, flow_in, "dtype", 

6954 dtype) 

6955 return _result 

6956 except _core._NotOkStatusException as e: 

6957 _ops.raise_from_not_ok_status(e, name) 

6958 except _core._FallbackException: 

6959 pass 

6960 try: 

6961 return tensor_array_read_v3_eager_fallback( 

6962 handle, index, flow_in, dtype=dtype, name=name, ctx=_ctx) 

6963 except _core._SymbolicException: 

6964 pass # Add nodes to the TensorFlow graph. 

6965 # Add nodes to the TensorFlow graph. 

6966 dtype = _execute.make_type(dtype, "dtype") 

6967 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6968 "TensorArrayReadV3", handle=handle, index=index, flow_in=flow_in, 

6969 dtype=dtype, name=name) 

6970 _result = _outputs[:] 

6971 if _execute.must_record_gradient(): 

6972 _attrs = ("dtype", _op._get_attr_type("dtype")) 

6973 _inputs_flat = _op.inputs 

6974 _execute.record_gradient( 

6975 "TensorArrayReadV3", _inputs_flat, _attrs, _result) 

6976 _result, = _result 

6977 return _result 

6978 

6979TensorArrayReadV3 = tf_export("raw_ops.TensorArrayReadV3")(_ops.to_raw_op(tensor_array_read_v3)) 

6980 

6981 

6982def tensor_array_read_v3_eager_fallback(handle, index, flow_in, dtype, name, ctx): 

6983 dtype = _execute.make_type(dtype, "dtype") 

6984 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

6985 index = _ops.convert_to_tensor(index, _dtypes.int32) 

6986 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

6987 _inputs_flat = [handle, index, flow_in] 

6988 _attrs = ("dtype", dtype) 

6989 _result = _execute.execute(b"TensorArrayReadV3", 1, inputs=_inputs_flat, 

6990 attrs=_attrs, ctx=ctx, name=name) 

6991 if _execute.must_record_gradient(): 

6992 _execute.record_gradient( 

6993 "TensorArrayReadV3", _inputs_flat, _attrs, _result) 

6994 _result, = _result 

6995 return _result 

6996 

6997 

6998def tensor_array_scatter(handle, indices, value, flow_in, name=None): 

6999 r"""TODO: add doc. 

7000 

7001 Args: 

7002 handle: A `Tensor` of type mutable `string`. 

7003 indices: A `Tensor` of type `int32`. 

7004 value: A `Tensor`. 

7005 flow_in: A `Tensor` of type `float32`. 

7006 name: A name for the operation (optional). 

7007 

7008 Returns: 

7009 A `Tensor` of type `float32`. 

7010 """ 

7011 _ctx = _context._context or _context.context() 

7012 tld = _ctx._thread_local_data 

7013 if tld.is_eager: 

7014 raise RuntimeError("tensor_array_scatter op does not support eager execution. Arg 'handle' is a ref.") 

7015 # Add nodes to the TensorFlow graph. 

7016 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7017 "TensorArrayScatter", handle=handle, indices=indices, value=value, 

7018 flow_in=flow_in, name=name) 

7019 _result = _outputs[:] 

7020 if _execute.must_record_gradient(): 

7021 _attrs = ("T", _op._get_attr_type("T")) 

7022 _inputs_flat = _op.inputs 

7023 _execute.record_gradient( 

7024 "TensorArrayScatter", _inputs_flat, _attrs, _result) 

7025 _result, = _result 

7026 return _result 

7027 

7028TensorArrayScatter = tf_export("raw_ops.TensorArrayScatter")(_ops.to_raw_op(tensor_array_scatter)) 

7029 

7030 

7031def tensor_array_scatter_eager_fallback(handle, indices, value, flow_in, name, ctx): 

7032 raise RuntimeError("tensor_array_scatter op does not support eager execution. Arg 'handle' is a ref.") 

7033 

7034def tensor_array_scatter_v2(handle, indices, value, flow_in, name=None): 

7035 r"""Deprecated. Use TensorArrayScatterV3 

7036 

7037 Args: 

7038 handle: A `Tensor` of type `string`. 

7039 indices: A `Tensor` of type `int32`. 

7040 value: A `Tensor`. 

7041 flow_in: A `Tensor` of type `float32`. 

7042 name: A name for the operation (optional). 

7043 

7044 Returns: 

7045 A `Tensor` of type `float32`. 

7046 """ 

7047 _ctx = _context._context or _context.context() 

7048 tld = _ctx._thread_local_data 

7049 if tld.is_eager: 

7050 try: 

7051 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7052 _ctx, "TensorArrayScatterV2", name, handle, indices, value, flow_in) 

7053 return _result 

7054 except _core._NotOkStatusException as e: 

7055 _ops.raise_from_not_ok_status(e, name) 

7056 except _core._FallbackException: 

7057 pass 

7058 try: 

7059 return tensor_array_scatter_v2_eager_fallback( 

7060 handle, indices, value, flow_in, name=name, ctx=_ctx) 

7061 except _core._SymbolicException: 

7062 pass # Add nodes to the TensorFlow graph. 

7063 # Add nodes to the TensorFlow graph. 

7064 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7065 "TensorArrayScatterV2", handle=handle, indices=indices, value=value, 

7066 flow_in=flow_in, name=name) 

7067 _result = _outputs[:] 

7068 if _execute.must_record_gradient(): 

7069 _attrs = ("T", _op._get_attr_type("T")) 

7070 _inputs_flat = _op.inputs 

7071 _execute.record_gradient( 

7072 "TensorArrayScatterV2", _inputs_flat, _attrs, _result) 

7073 _result, = _result 

7074 return _result 

7075 

7076TensorArrayScatterV2 = tf_export("raw_ops.TensorArrayScatterV2")(_ops.to_raw_op(tensor_array_scatter_v2)) 

7077 

7078 

7079def tensor_array_scatter_v2_eager_fallback(handle, indices, value, flow_in, name, ctx): 

7080 _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, []) 

7081 handle = _ops.convert_to_tensor(handle, _dtypes.string) 

7082 indices = _ops.convert_to_tensor(indices, _dtypes.int32) 

7083 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

7084 _inputs_flat = [handle, indices, value, flow_in] 

7085 _attrs = ("T", _attr_T) 

7086 _result = _execute.execute(b"TensorArrayScatterV2", 1, inputs=_inputs_flat, 

7087 attrs=_attrs, ctx=ctx, name=name) 

7088 if _execute.must_record_gradient(): 

7089 _execute.record_gradient( 

7090 "TensorArrayScatterV2", _inputs_flat, _attrs, _result) 

7091 _result, = _result 

7092 return _result 

7093 

7094 

7095def tensor_array_scatter_v3(handle, indices, value, flow_in, name=None): 

7096 r"""Scatter the data from the input value into specific TensorArray elements. 

7097 

7098 `indices` must be a vector, its length must match the first dim of `value`. 

7099 

7100 Args: 

7101 handle: A `Tensor` of type `resource`. The handle to a TensorArray. 

7102 indices: A `Tensor` of type `int32`. 

7103 The locations at which to write the tensor elements. 

7104 value: A `Tensor`. The concatenated tensor to write to the TensorArray. 

7105 flow_in: A `Tensor` of type `float32`. 

7106 A float scalar that enforces proper chaining of operations. 

7107 name: A name for the operation (optional). 

7108 

7109 Returns: 

7110 A `Tensor` of type `float32`. 

7111 """ 

7112 _ctx = _context._context or _context.context() 

7113 tld = _ctx._thread_local_data 

7114 if tld.is_eager: 

7115 try: 

7116 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7117 _ctx, "TensorArrayScatterV3", name, handle, indices, value, flow_in) 

7118 return _result 

7119 except _core._NotOkStatusException as e: 

7120 _ops.raise_from_not_ok_status(e, name) 

7121 except _core._FallbackException: 

7122 pass 

7123 try: 

7124 return tensor_array_scatter_v3_eager_fallback( 

7125 handle, indices, value, flow_in, name=name, ctx=_ctx) 

7126 except _core._SymbolicException: 

7127 pass # Add nodes to the TensorFlow graph. 

7128 # Add nodes to the TensorFlow graph. 

7129 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7130 "TensorArrayScatterV3", handle=handle, indices=indices, value=value, 

7131 flow_in=flow_in, name=name) 

7132 _result = _outputs[:] 

7133 if _execute.must_record_gradient(): 

7134 _attrs = ("T", _op._get_attr_type("T")) 

7135 _inputs_flat = _op.inputs 

7136 _execute.record_gradient( 

7137 "TensorArrayScatterV3", _inputs_flat, _attrs, _result) 

7138 _result, = _result 

7139 return _result 

7140 

7141TensorArrayScatterV3 = tf_export("raw_ops.TensorArrayScatterV3")(_ops.to_raw_op(tensor_array_scatter_v3)) 

7142 

7143 

7144def tensor_array_scatter_v3_eager_fallback(handle, indices, value, flow_in, name, ctx): 

7145 _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, []) 

7146 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

7147 indices = _ops.convert_to_tensor(indices, _dtypes.int32) 

7148 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

7149 _inputs_flat = [handle, indices, value, flow_in] 

7150 _attrs = ("T", _attr_T) 

7151 _result = _execute.execute(b"TensorArrayScatterV3", 1, inputs=_inputs_flat, 

7152 attrs=_attrs, ctx=ctx, name=name) 

7153 if _execute.must_record_gradient(): 

7154 _execute.record_gradient( 

7155 "TensorArrayScatterV3", _inputs_flat, _attrs, _result) 

7156 _result, = _result 

7157 return _result 

7158 

7159 

7160def tensor_array_size(handle, flow_in, name=None): 

7161 r"""TODO: add doc. 

7162 

7163 Args: 

7164 handle: A `Tensor` of type mutable `string`. 

7165 flow_in: A `Tensor` of type `float32`. 

7166 name: A name for the operation (optional). 

7167 

7168 Returns: 

7169 A `Tensor` of type `int32`. 

7170 """ 

7171 _ctx = _context._context or _context.context() 

7172 tld = _ctx._thread_local_data 

7173 if tld.is_eager: 

7174 raise RuntimeError("tensor_array_size op does not support eager execution. Arg 'handle' is a ref.") 

7175 # Add nodes to the TensorFlow graph. 

7176 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7177 "TensorArraySize", handle=handle, flow_in=flow_in, name=name) 

7178 _result = _outputs[:] 

7179 if _execute.must_record_gradient(): 

7180 _attrs = () 

7181 _inputs_flat = _op.inputs 

7182 _execute.record_gradient( 

7183 "TensorArraySize", _inputs_flat, _attrs, _result) 

7184 _result, = _result 

7185 return _result 

7186 

7187TensorArraySize = tf_export("raw_ops.TensorArraySize")(_ops.to_raw_op(tensor_array_size)) 

7188 

7189 

7190def tensor_array_size_eager_fallback(handle, flow_in, name, ctx): 

7191 raise RuntimeError("tensor_array_size op does not support eager execution. Arg 'handle' is a ref.") 

7192 

7193def tensor_array_size_v2(handle, flow_in, name=None): 

7194 r"""Deprecated. Use TensorArraySizeV3 

7195 

7196 Args: 

7197 handle: A `Tensor` of type `string`. 

7198 flow_in: A `Tensor` of type `float32`. 

7199 name: A name for the operation (optional). 

7200 

7201 Returns: 

7202 A `Tensor` of type `int32`. 

7203 """ 

7204 _ctx = _context._context or _context.context() 

7205 tld = _ctx._thread_local_data 

7206 if tld.is_eager: 

7207 try: 

7208 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7209 _ctx, "TensorArraySizeV2", name, handle, flow_in) 

7210 return _result 

7211 except _core._NotOkStatusException as e: 

7212 _ops.raise_from_not_ok_status(e, name) 

7213 except _core._FallbackException: 

7214 pass 

7215 try: 

7216 return tensor_array_size_v2_eager_fallback( 

7217 handle, flow_in, name=name, ctx=_ctx) 

7218 except _core._SymbolicException: 

7219 pass # Add nodes to the TensorFlow graph. 

7220 # Add nodes to the TensorFlow graph. 

7221 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7222 "TensorArraySizeV2", handle=handle, flow_in=flow_in, name=name) 

7223 _result = _outputs[:] 

7224 if _execute.must_record_gradient(): 

7225 _attrs = () 

7226 _inputs_flat = _op.inputs 

7227 _execute.record_gradient( 

7228 "TensorArraySizeV2", _inputs_flat, _attrs, _result) 

7229 _result, = _result 

7230 return _result 

7231 

7232TensorArraySizeV2 = tf_export("raw_ops.TensorArraySizeV2")(_ops.to_raw_op(tensor_array_size_v2)) 

7233 

7234 

7235def tensor_array_size_v2_eager_fallback(handle, flow_in, name, ctx): 

7236 handle = _ops.convert_to_tensor(handle, _dtypes.string) 

7237 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

7238 _inputs_flat = [handle, flow_in] 

7239 _attrs = None 

7240 _result = _execute.execute(b"TensorArraySizeV2", 1, inputs=_inputs_flat, 

7241 attrs=_attrs, ctx=ctx, name=name) 

7242 if _execute.must_record_gradient(): 

7243 _execute.record_gradient( 

7244 "TensorArraySizeV2", _inputs_flat, _attrs, _result) 

7245 _result, = _result 

7246 return _result 

7247 

7248 

7249def tensor_array_size_v3(handle, flow_in, name=None): 

7250 r"""Get the current size of the TensorArray. 

7251 

7252 Args: 

7253 handle: A `Tensor` of type `resource`. 

7254 The handle to a TensorArray (output of TensorArray or TensorArrayGrad). 

7255 flow_in: A `Tensor` of type `float32`. 

7256 A float scalar that enforces proper chaining of operations. 

7257 name: A name for the operation (optional). 

7258 

7259 Returns: 

7260 A `Tensor` of type `int32`. 

7261 """ 

7262 _ctx = _context._context or _context.context() 

7263 tld = _ctx._thread_local_data 

7264 if tld.is_eager: 

7265 try: 

7266 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7267 _ctx, "TensorArraySizeV3", name, handle, flow_in) 

7268 return _result 

7269 except _core._NotOkStatusException as e: 

7270 _ops.raise_from_not_ok_status(e, name) 

7271 except _core._FallbackException: 

7272 pass 

7273 try: 

7274 return tensor_array_size_v3_eager_fallback( 

7275 handle, flow_in, name=name, ctx=_ctx) 

7276 except _core._SymbolicException: 

7277 pass # Add nodes to the TensorFlow graph. 

7278 # Add nodes to the TensorFlow graph. 

7279 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7280 "TensorArraySizeV3", handle=handle, flow_in=flow_in, name=name) 

7281 _result = _outputs[:] 

7282 if _execute.must_record_gradient(): 

7283 _attrs = () 

7284 _inputs_flat = _op.inputs 

7285 _execute.record_gradient( 

7286 "TensorArraySizeV3", _inputs_flat, _attrs, _result) 

7287 _result, = _result 

7288 return _result 

7289 

7290TensorArraySizeV3 = tf_export("raw_ops.TensorArraySizeV3")(_ops.to_raw_op(tensor_array_size_v3)) 

7291 

7292 

7293def tensor_array_size_v3_eager_fallback(handle, flow_in, name, ctx): 

7294 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

7295 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

7296 _inputs_flat = [handle, flow_in] 

7297 _attrs = None 

7298 _result = _execute.execute(b"TensorArraySizeV3", 1, inputs=_inputs_flat, 

7299 attrs=_attrs, ctx=ctx, name=name) 

7300 if _execute.must_record_gradient(): 

7301 _execute.record_gradient( 

7302 "TensorArraySizeV3", _inputs_flat, _attrs, _result) 

7303 _result, = _result 

7304 return _result 

7305 

7306 

7307def tensor_array_split(handle, value, lengths, flow_in, name=None): 

7308 r"""TODO: add doc. 

7309 

7310 Args: 

7311 handle: A `Tensor` of type mutable `string`. 

7312 value: A `Tensor`. 

7313 lengths: A `Tensor` of type `int64`. 

7314 flow_in: A `Tensor` of type `float32`. 

7315 name: A name for the operation (optional). 

7316 

7317 Returns: 

7318 A `Tensor` of type `float32`. 

7319 """ 

7320 _ctx = _context._context or _context.context() 

7321 tld = _ctx._thread_local_data 

7322 if tld.is_eager: 

7323 raise RuntimeError("tensor_array_split op does not support eager execution. Arg 'handle' is a ref.") 

7324 # Add nodes to the TensorFlow graph. 

7325 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7326 "TensorArraySplit", handle=handle, value=value, lengths=lengths, 

7327 flow_in=flow_in, name=name) 

7328 _result = _outputs[:] 

7329 if _execute.must_record_gradient(): 

7330 _attrs = ("T", _op._get_attr_type("T")) 

7331 _inputs_flat = _op.inputs 

7332 _execute.record_gradient( 

7333 "TensorArraySplit", _inputs_flat, _attrs, _result) 

7334 _result, = _result 

7335 return _result 

7336 

7337TensorArraySplit = tf_export("raw_ops.TensorArraySplit")(_ops.to_raw_op(tensor_array_split)) 

7338 

7339 

7340def tensor_array_split_eager_fallback(handle, value, lengths, flow_in, name, ctx): 

7341 raise RuntimeError("tensor_array_split op does not support eager execution. Arg 'handle' is a ref.") 

7342 

7343def tensor_array_split_v2(handle, value, lengths, flow_in, name=None): 

7344 r"""Deprecated. Use TensorArraySplitV3 

7345 

7346 Args: 

7347 handle: A `Tensor` of type `string`. 

7348 value: A `Tensor`. 

7349 lengths: A `Tensor` of type `int64`. 

7350 flow_in: A `Tensor` of type `float32`. 

7351 name: A name for the operation (optional). 

7352 

7353 Returns: 

7354 A `Tensor` of type `float32`. 

7355 """ 

7356 _ctx = _context._context or _context.context() 

7357 tld = _ctx._thread_local_data 

7358 if tld.is_eager: 

7359 try: 

7360 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7361 _ctx, "TensorArraySplitV2", name, handle, value, lengths, flow_in) 

7362 return _result 

7363 except _core._NotOkStatusException as e: 

7364 _ops.raise_from_not_ok_status(e, name) 

7365 except _core._FallbackException: 

7366 pass 

7367 try: 

7368 return tensor_array_split_v2_eager_fallback( 

7369 handle, value, lengths, flow_in, name=name, ctx=_ctx) 

7370 except _core._SymbolicException: 

7371 pass # Add nodes to the TensorFlow graph. 

7372 # Add nodes to the TensorFlow graph. 

7373 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7374 "TensorArraySplitV2", handle=handle, value=value, lengths=lengths, 

7375 flow_in=flow_in, name=name) 

7376 _result = _outputs[:] 

7377 if _execute.must_record_gradient(): 

7378 _attrs = ("T", _op._get_attr_type("T")) 

7379 _inputs_flat = _op.inputs 

7380 _execute.record_gradient( 

7381 "TensorArraySplitV2", _inputs_flat, _attrs, _result) 

7382 _result, = _result 

7383 return _result 

7384 

7385TensorArraySplitV2 = tf_export("raw_ops.TensorArraySplitV2")(_ops.to_raw_op(tensor_array_split_v2)) 

7386 

7387 

7388def tensor_array_split_v2_eager_fallback(handle, value, lengths, flow_in, name, ctx): 

7389 _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, []) 

7390 handle = _ops.convert_to_tensor(handle, _dtypes.string) 

7391 lengths = _ops.convert_to_tensor(lengths, _dtypes.int64) 

7392 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

7393 _inputs_flat = [handle, value, lengths, flow_in] 

7394 _attrs = ("T", _attr_T) 

7395 _result = _execute.execute(b"TensorArraySplitV2", 1, inputs=_inputs_flat, 

7396 attrs=_attrs, ctx=ctx, name=name) 

7397 if _execute.must_record_gradient(): 

7398 _execute.record_gradient( 

7399 "TensorArraySplitV2", _inputs_flat, _attrs, _result) 

7400 _result, = _result 

7401 return _result 

7402 

7403 

7404def tensor_array_split_v3(handle, value, lengths, flow_in, name=None): 

7405 r"""Split the data from the input value into TensorArray elements. 

7406 

7407 Assuming that `lengths` takes on values 

7408 

7409 ``` 

7410 (n0, n1, ..., n(T-1)) 

7411 ``` 

7412 

7413 and that `value` has shape 

7414 

7415 ``` 

7416 (n0 + n1 + ... + n(T-1) x d0 x d1 x ...), 

7417 ``` 

7418 

7419 this splits values into a TensorArray with T tensors. 

7420 

7421 TensorArray index t will be the subtensor of values with starting position 

7422 

7423 ``` 

7424 (n0 + n1 + ... + n(t-1), 0, 0, ...) 

7425 ``` 

7426 

7427 and having size 

7428 

7429 ``` 

7430 nt x d0 x d1 x ... 

7431 ``` 

7432 

7433 Args: 

7434 handle: A `Tensor` of type `resource`. The handle to a TensorArray. 

7435 value: A `Tensor`. The concatenated tensor to write to the TensorArray. 

7436 lengths: A `Tensor` of type `int64`. 

7437 The vector of lengths, how to split the rows of value into the 

7438 TensorArray. 

7439 flow_in: A `Tensor` of type `float32`. 

7440 A float scalar that enforces proper chaining of operations. 

7441 name: A name for the operation (optional). 

7442 

7443 Returns: 

7444 A `Tensor` of type `float32`. 

7445 """ 

7446 _ctx = _context._context or _context.context() 

7447 tld = _ctx._thread_local_data 

7448 if tld.is_eager: 

7449 try: 

7450 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7451 _ctx, "TensorArraySplitV3", name, handle, value, lengths, flow_in) 

7452 return _result 

7453 except _core._NotOkStatusException as e: 

7454 _ops.raise_from_not_ok_status(e, name) 

7455 except _core._FallbackException: 

7456 pass 

7457 try: 

7458 return tensor_array_split_v3_eager_fallback( 

7459 handle, value, lengths, flow_in, name=name, ctx=_ctx) 

7460 except _core._SymbolicException: 

7461 pass # Add nodes to the TensorFlow graph. 

7462 # Add nodes to the TensorFlow graph. 

7463 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7464 "TensorArraySplitV3", handle=handle, value=value, lengths=lengths, 

7465 flow_in=flow_in, name=name) 

7466 _result = _outputs[:] 

7467 if _execute.must_record_gradient(): 

7468 _attrs = ("T", _op._get_attr_type("T")) 

7469 _inputs_flat = _op.inputs 

7470 _execute.record_gradient( 

7471 "TensorArraySplitV3", _inputs_flat, _attrs, _result) 

7472 _result, = _result 

7473 return _result 

7474 

7475TensorArraySplitV3 = tf_export("raw_ops.TensorArraySplitV3")(_ops.to_raw_op(tensor_array_split_v3)) 

7476 

7477 

7478def tensor_array_split_v3_eager_fallback(handle, value, lengths, flow_in, name, ctx): 

7479 _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, []) 

7480 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

7481 lengths = _ops.convert_to_tensor(lengths, _dtypes.int64) 

7482 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

7483 _inputs_flat = [handle, value, lengths, flow_in] 

7484 _attrs = ("T", _attr_T) 

7485 _result = _execute.execute(b"TensorArraySplitV3", 1, inputs=_inputs_flat, 

7486 attrs=_attrs, ctx=ctx, name=name) 

7487 if _execute.must_record_gradient(): 

7488 _execute.record_gradient( 

7489 "TensorArraySplitV3", _inputs_flat, _attrs, _result) 

7490 _result, = _result 

7491 return _result 

7492 

7493 

7494def tensor_array_unpack(handle, value, flow_in, name=None): 

7495 r"""TODO: add doc. 

7496 

7497 Args: 

7498 handle: A `Tensor` of type mutable `string`. 

7499 value: A `Tensor`. 

7500 flow_in: A `Tensor` of type `float32`. 

7501 name: A name for the operation (optional). 

7502 

7503 Returns: 

7504 A `Tensor` of type `float32`. 

7505 """ 

7506 _ctx = _context._context or _context.context() 

7507 tld = _ctx._thread_local_data 

7508 if tld.is_eager: 

7509 raise RuntimeError("tensor_array_unpack op does not support eager execution. Arg 'handle' is a ref.") 

7510 # Add nodes to the TensorFlow graph. 

7511 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7512 "TensorArrayUnpack", handle=handle, value=value, flow_in=flow_in, 

7513 name=name) 

7514 _result = _outputs[:] 

7515 if _execute.must_record_gradient(): 

7516 _attrs = ("T", _op._get_attr_type("T")) 

7517 _inputs_flat = _op.inputs 

7518 _execute.record_gradient( 

7519 "TensorArrayUnpack", _inputs_flat, _attrs, _result) 

7520 _result, = _result 

7521 return _result 

7522 

7523TensorArrayUnpack = tf_export("raw_ops.TensorArrayUnpack")(_ops.to_raw_op(tensor_array_unpack)) 

7524 

7525 

7526def tensor_array_unpack_eager_fallback(handle, value, flow_in, name, ctx): 

7527 raise RuntimeError("tensor_array_unpack op does not support eager execution. Arg 'handle' is a ref.") 

7528 

7529def tensor_array_v2(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, tensor_array_name="", name=None): 

7530 r"""Deprecated. Use TensorArrayV3 

7531 

7532 Args: 

7533 size: A `Tensor` of type `int32`. 

7534 dtype: A `tf.DType`. 

7535 element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. 

7536 dynamic_size: An optional `bool`. Defaults to `False`. 

7537 clear_after_read: An optional `bool`. Defaults to `True`. 

7538 tensor_array_name: An optional `string`. Defaults to `""`. 

7539 name: A name for the operation (optional). 

7540 

7541 Returns: 

7542 A `Tensor` of type `string`. 

7543 """ 

7544 _ctx = _context._context or _context.context() 

7545 tld = _ctx._thread_local_data 

7546 if tld.is_eager: 

7547 try: 

7548 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7549 _ctx, "TensorArrayV2", name, size, "dtype", dtype, "element_shape", 

7550 element_shape, "dynamic_size", dynamic_size, "clear_after_read", 

7551 clear_after_read, "tensor_array_name", tensor_array_name) 

7552 return _result 

7553 except _core._NotOkStatusException as e: 

7554 _ops.raise_from_not_ok_status(e, name) 

7555 except _core._FallbackException: 

7556 pass 

7557 try: 

7558 return tensor_array_v2_eager_fallback( 

7559 size, dtype=dtype, element_shape=element_shape, 

7560 dynamic_size=dynamic_size, clear_after_read=clear_after_read, 

7561 tensor_array_name=tensor_array_name, name=name, ctx=_ctx) 

7562 except _core._SymbolicException: 

7563 pass # Add nodes to the TensorFlow graph. 

7564 # Add nodes to the TensorFlow graph. 

7565 dtype = _execute.make_type(dtype, "dtype") 

7566 if element_shape is None: 

7567 element_shape = None 

7568 element_shape = _execute.make_shape(element_shape, "element_shape") 

7569 if dynamic_size is None: 

7570 dynamic_size = False 

7571 dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size") 

7572 if clear_after_read is None: 

7573 clear_after_read = True 

7574 clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read") 

7575 if tensor_array_name is None: 

7576 tensor_array_name = "" 

7577 tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name") 

7578 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7579 "TensorArrayV2", size=size, dtype=dtype, element_shape=element_shape, 

7580 dynamic_size=dynamic_size, 

7581 clear_after_read=clear_after_read, 

7582 tensor_array_name=tensor_array_name, name=name) 

7583 _result = _outputs[:] 

7584 if _execute.must_record_gradient(): 

7585 _attrs = ("dtype", _op._get_attr_type("dtype"), "element_shape", 

7586 _op.get_attr("element_shape"), "dynamic_size", 

7587 _op._get_attr_bool("dynamic_size"), "clear_after_read", 

7588 _op._get_attr_bool("clear_after_read"), "tensor_array_name", 

7589 _op.get_attr("tensor_array_name")) 

7590 _inputs_flat = _op.inputs 

7591 _execute.record_gradient( 

7592 "TensorArrayV2", _inputs_flat, _attrs, _result) 

7593 _result, = _result 

7594 return _result 

7595 

7596TensorArrayV2 = tf_export("raw_ops.TensorArrayV2")(_ops.to_raw_op(tensor_array_v2)) 

7597 

7598 

7599def tensor_array_v2_eager_fallback(size, dtype, element_shape, dynamic_size, clear_after_read, tensor_array_name, name, ctx): 

7600 dtype = _execute.make_type(dtype, "dtype") 

7601 if element_shape is None: 

7602 element_shape = None 

7603 element_shape = _execute.make_shape(element_shape, "element_shape") 

7604 if dynamic_size is None: 

7605 dynamic_size = False 

7606 dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size") 

7607 if clear_after_read is None: 

7608 clear_after_read = True 

7609 clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read") 

7610 if tensor_array_name is None: 

7611 tensor_array_name = "" 

7612 tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name") 

7613 size = _ops.convert_to_tensor(size, _dtypes.int32) 

7614 _inputs_flat = [size] 

7615 _attrs = ("dtype", dtype, "element_shape", element_shape, "dynamic_size", 

7616 dynamic_size, "clear_after_read", clear_after_read, "tensor_array_name", 

7617 tensor_array_name) 

7618 _result = _execute.execute(b"TensorArrayV2", 1, inputs=_inputs_flat, 

7619 attrs=_attrs, ctx=ctx, name=name) 

7620 if _execute.must_record_gradient(): 

7621 _execute.record_gradient( 

7622 "TensorArrayV2", _inputs_flat, _attrs, _result) 

7623 _result, = _result 

7624 return _result 

7625 

7626_TensorArrayV3Output = collections.namedtuple( 

7627 "TensorArrayV3", 

7628 ["handle", "flow"]) 

7629 

7630 

7631def tensor_array_v3(size, dtype, element_shape=None, dynamic_size=False, clear_after_read=True, identical_element_shapes=False, tensor_array_name="", name=None): 

7632 r"""An array of Tensors of given size. 

7633 

7634 Write data via Write and read via Read or Pack. 

7635 

7636 Args: 

7637 size: A `Tensor` of type `int32`. The size of the array. 

7638 dtype: A `tf.DType`. The type of the elements on the tensor_array. 

7639 element_shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. 

7640 The expected shape of an element, if known. Used to 

7641 validate the shapes of TensorArray elements. If this shape is not 

7642 fully specified, gathering zero-size TensorArrays is an error. 

7643 dynamic_size: An optional `bool`. Defaults to `False`. 

7644 A boolean that determines whether writes to the TensorArray 

7645 are allowed to grow the size. By default, this is not allowed. 

7646 clear_after_read: An optional `bool`. Defaults to `True`. 

7647 If true (default), Tensors in the TensorArray are cleared 

7648 after being read. This disables multiple read semantics but allows early 

7649 release of memory. 

7650 identical_element_shapes: An optional `bool`. Defaults to `False`. 

7651 If true (default is false), then all 

7652 elements in the TensorArray will be expected to have identical shapes. 

7653 This allows certain behaviors, like dynamically checking for 

7654 consistent shapes on write, and being able to fill in properly 

7655 shaped zero tensors on stack -- even if the element_shape attribute 

7656 is not fully defined. 

7657 tensor_array_name: An optional `string`. Defaults to `""`. 

7658 Overrides the name used for the temporary tensor_array 

7659 resource. Default value is the name of the 'TensorArray' op (which 

7660 is guaranteed unique). 

7661 name: A name for the operation (optional). 

7662 

7663 Returns: 

7664 A tuple of `Tensor` objects (handle, flow). 

7665 

7666 handle: A `Tensor` of type `resource`. 

7667 flow: A `Tensor` of type `float32`. 

7668 """ 

7669 _ctx = _context._context or _context.context() 

7670 tld = _ctx._thread_local_data 

7671 if tld.is_eager: 

7672 try: 

7673 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7674 _ctx, "TensorArrayV3", name, size, "dtype", dtype, "element_shape", 

7675 element_shape, "dynamic_size", dynamic_size, "clear_after_read", 

7676 clear_after_read, "identical_element_shapes", 

7677 identical_element_shapes, "tensor_array_name", tensor_array_name) 

7678 _result = _TensorArrayV3Output._make(_result) 

7679 return _result 

7680 except _core._NotOkStatusException as e: 

7681 _ops.raise_from_not_ok_status(e, name) 

7682 except _core._FallbackException: 

7683 pass 

7684 try: 

7685 return tensor_array_v3_eager_fallback( 

7686 size, dtype=dtype, element_shape=element_shape, 

7687 dynamic_size=dynamic_size, clear_after_read=clear_after_read, 

7688 identical_element_shapes=identical_element_shapes, 

7689 tensor_array_name=tensor_array_name, name=name, ctx=_ctx) 

7690 except _core._SymbolicException: 

7691 pass # Add nodes to the TensorFlow graph. 

7692 # Add nodes to the TensorFlow graph. 

7693 dtype = _execute.make_type(dtype, "dtype") 

7694 if element_shape is None: 

7695 element_shape = None 

7696 element_shape = _execute.make_shape(element_shape, "element_shape") 

7697 if dynamic_size is None: 

7698 dynamic_size = False 

7699 dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size") 

7700 if clear_after_read is None: 

7701 clear_after_read = True 

7702 clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read") 

7703 if identical_element_shapes is None: 

7704 identical_element_shapes = False 

7705 identical_element_shapes = _execute.make_bool(identical_element_shapes, "identical_element_shapes") 

7706 if tensor_array_name is None: 

7707 tensor_array_name = "" 

7708 tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name") 

7709 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7710 "TensorArrayV3", size=size, dtype=dtype, element_shape=element_shape, 

7711 dynamic_size=dynamic_size, 

7712 clear_after_read=clear_after_read, 

7713 identical_element_shapes=identical_element_shapes, 

7714 tensor_array_name=tensor_array_name, name=name) 

7715 _result = _outputs[:] 

7716 if _execute.must_record_gradient(): 

7717 _attrs = ("dtype", _op._get_attr_type("dtype"), "element_shape", 

7718 _op.get_attr("element_shape"), "dynamic_size", 

7719 _op._get_attr_bool("dynamic_size"), "clear_after_read", 

7720 _op._get_attr_bool("clear_after_read"), 

7721 "identical_element_shapes", 

7722 _op._get_attr_bool("identical_element_shapes"), 

7723 "tensor_array_name", _op.get_attr("tensor_array_name")) 

7724 _inputs_flat = _op.inputs 

7725 _execute.record_gradient( 

7726 "TensorArrayV3", _inputs_flat, _attrs, _result) 

7727 _result = _TensorArrayV3Output._make(_result) 

7728 return _result 

7729 

7730TensorArrayV3 = tf_export("raw_ops.TensorArrayV3")(_ops.to_raw_op(tensor_array_v3)) 

7731 

7732 

7733def tensor_array_v3_eager_fallback(size, dtype, element_shape, dynamic_size, clear_after_read, identical_element_shapes, tensor_array_name, name, ctx): 

7734 dtype = _execute.make_type(dtype, "dtype") 

7735 if element_shape is None: 

7736 element_shape = None 

7737 element_shape = _execute.make_shape(element_shape, "element_shape") 

7738 if dynamic_size is None: 

7739 dynamic_size = False 

7740 dynamic_size = _execute.make_bool(dynamic_size, "dynamic_size") 

7741 if clear_after_read is None: 

7742 clear_after_read = True 

7743 clear_after_read = _execute.make_bool(clear_after_read, "clear_after_read") 

7744 if identical_element_shapes is None: 

7745 identical_element_shapes = False 

7746 identical_element_shapes = _execute.make_bool(identical_element_shapes, "identical_element_shapes") 

7747 if tensor_array_name is None: 

7748 tensor_array_name = "" 

7749 tensor_array_name = _execute.make_str(tensor_array_name, "tensor_array_name") 

7750 size = _ops.convert_to_tensor(size, _dtypes.int32) 

7751 _inputs_flat = [size] 

7752 _attrs = ("dtype", dtype, "element_shape", element_shape, "dynamic_size", 

7753 dynamic_size, "clear_after_read", clear_after_read, 

7754 "identical_element_shapes", identical_element_shapes, "tensor_array_name", 

7755 tensor_array_name) 

7756 _result = _execute.execute(b"TensorArrayV3", 2, inputs=_inputs_flat, 

7757 attrs=_attrs, ctx=ctx, name=name) 

7758 if _execute.must_record_gradient(): 

7759 _execute.record_gradient( 

7760 "TensorArrayV3", _inputs_flat, _attrs, _result) 

7761 _result = _TensorArrayV3Output._make(_result) 

7762 return _result 

7763 

7764 

7765def tensor_array_write(handle, index, value, flow_in, name=None): 

7766 r"""TODO: add doc. 

7767 

7768 Args: 

7769 handle: A `Tensor` of type mutable `string`. 

7770 index: A `Tensor` of type `int32`. 

7771 value: A `Tensor`. 

7772 flow_in: A `Tensor` of type `float32`. 

7773 name: A name for the operation (optional). 

7774 

7775 Returns: 

7776 A `Tensor` of type `float32`. 

7777 """ 

7778 _ctx = _context._context or _context.context() 

7779 tld = _ctx._thread_local_data 

7780 if tld.is_eager: 

7781 raise RuntimeError("tensor_array_write op does not support eager execution. Arg 'handle' is a ref.") 

7782 # Add nodes to the TensorFlow graph. 

7783 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7784 "TensorArrayWrite", handle=handle, index=index, value=value, 

7785 flow_in=flow_in, name=name) 

7786 _result = _outputs[:] 

7787 if _execute.must_record_gradient(): 

7788 _attrs = ("T", _op._get_attr_type("T")) 

7789 _inputs_flat = _op.inputs 

7790 _execute.record_gradient( 

7791 "TensorArrayWrite", _inputs_flat, _attrs, _result) 

7792 _result, = _result 

7793 return _result 

7794 

7795TensorArrayWrite = tf_export("raw_ops.TensorArrayWrite")(_ops.to_raw_op(tensor_array_write)) 

7796 

7797 

7798def tensor_array_write_eager_fallback(handle, index, value, flow_in, name, ctx): 

7799 raise RuntimeError("tensor_array_write op does not support eager execution. Arg 'handle' is a ref.") 

7800 

7801def tensor_array_write_v2(handle, index, value, flow_in, name=None): 

7802 r"""Deprecated. Use TensorArrayGradV3 

7803 

7804 Args: 

7805 handle: A `Tensor` of type `string`. 

7806 index: A `Tensor` of type `int32`. 

7807 value: A `Tensor`. 

7808 flow_in: A `Tensor` of type `float32`. 

7809 name: A name for the operation (optional). 

7810 

7811 Returns: 

7812 A `Tensor` of type `float32`. 

7813 """ 

7814 _ctx = _context._context or _context.context() 

7815 tld = _ctx._thread_local_data 

7816 if tld.is_eager: 

7817 try: 

7818 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7819 _ctx, "TensorArrayWriteV2", name, handle, index, value, flow_in) 

7820 return _result 

7821 except _core._NotOkStatusException as e: 

7822 _ops.raise_from_not_ok_status(e, name) 

7823 except _core._FallbackException: 

7824 pass 

7825 try: 

7826 return tensor_array_write_v2_eager_fallback( 

7827 handle, index, value, flow_in, name=name, ctx=_ctx) 

7828 except _core._SymbolicException: 

7829 pass # Add nodes to the TensorFlow graph. 

7830 # Add nodes to the TensorFlow graph. 

7831 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7832 "TensorArrayWriteV2", handle=handle, index=index, value=value, 

7833 flow_in=flow_in, name=name) 

7834 _result = _outputs[:] 

7835 if _execute.must_record_gradient(): 

7836 _attrs = ("T", _op._get_attr_type("T")) 

7837 _inputs_flat = _op.inputs 

7838 _execute.record_gradient( 

7839 "TensorArrayWriteV2", _inputs_flat, _attrs, _result) 

7840 _result, = _result 

7841 return _result 

7842 

7843TensorArrayWriteV2 = tf_export("raw_ops.TensorArrayWriteV2")(_ops.to_raw_op(tensor_array_write_v2)) 

7844 

7845 

7846def tensor_array_write_v2_eager_fallback(handle, index, value, flow_in, name, ctx): 

7847 _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, []) 

7848 handle = _ops.convert_to_tensor(handle, _dtypes.string) 

7849 index = _ops.convert_to_tensor(index, _dtypes.int32) 

7850 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

7851 _inputs_flat = [handle, index, value, flow_in] 

7852 _attrs = ("T", _attr_T) 

7853 _result = _execute.execute(b"TensorArrayWriteV2", 1, inputs=_inputs_flat, 

7854 attrs=_attrs, ctx=ctx, name=name) 

7855 if _execute.must_record_gradient(): 

7856 _execute.record_gradient( 

7857 "TensorArrayWriteV2", _inputs_flat, _attrs, _result) 

7858 _result, = _result 

7859 return _result 

7860 

7861 

7862def tensor_array_write_v3(handle, index, value, flow_in, name=None): 

7863 r"""Push an element onto the tensor_array. 

7864 

7865 Args: 

7866 handle: A `Tensor` of type `resource`. The handle to a TensorArray. 

7867 index: A `Tensor` of type `int32`. 

7868 The position to write to inside the TensorArray. 

7869 value: A `Tensor`. The tensor to write to the TensorArray. 

7870 flow_in: A `Tensor` of type `float32`. 

7871 A float scalar that enforces proper chaining of operations. 

7872 name: A name for the operation (optional). 

7873 

7874 Returns: 

7875 A `Tensor` of type `float32`. 

7876 """ 

7877 _ctx = _context._context or _context.context() 

7878 tld = _ctx._thread_local_data 

7879 if tld.is_eager: 

7880 try: 

7881 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7882 _ctx, "TensorArrayWriteV3", name, handle, index, value, flow_in) 

7883 return _result 

7884 except _core._NotOkStatusException as e: 

7885 _ops.raise_from_not_ok_status(e, name) 

7886 except _core._FallbackException: 

7887 pass 

7888 try: 

7889 return tensor_array_write_v3_eager_fallback( 

7890 handle, index, value, flow_in, name=name, ctx=_ctx) 

7891 except _core._SymbolicException: 

7892 pass # Add nodes to the TensorFlow graph. 

7893 # Add nodes to the TensorFlow graph. 

7894 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7895 "TensorArrayWriteV3", handle=handle, index=index, value=value, 

7896 flow_in=flow_in, name=name) 

7897 _result = _outputs[:] 

7898 if _execute.must_record_gradient(): 

7899 _attrs = ("T", _op._get_attr_type("T")) 

7900 _inputs_flat = _op.inputs 

7901 _execute.record_gradient( 

7902 "TensorArrayWriteV3", _inputs_flat, _attrs, _result) 

7903 _result, = _result 

7904 return _result 

7905 

7906TensorArrayWriteV3 = tf_export("raw_ops.TensorArrayWriteV3")(_ops.to_raw_op(tensor_array_write_v3)) 

7907 

7908 

7909def tensor_array_write_v3_eager_fallback(handle, index, value, flow_in, name, ctx): 

7910 _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, []) 

7911 handle = _ops.convert_to_tensor(handle, _dtypes.resource) 

7912 index = _ops.convert_to_tensor(index, _dtypes.int32) 

7913 flow_in = _ops.convert_to_tensor(flow_in, _dtypes.float32) 

7914 _inputs_flat = [handle, index, value, flow_in] 

7915 _attrs = ("T", _attr_T) 

7916 _result = _execute.execute(b"TensorArrayWriteV3", 1, inputs=_inputs_flat, 

7917 attrs=_attrs, ctx=ctx, name=name) 

7918 if _execute.must_record_gradient(): 

7919 _execute.record_gradient( 

7920 "TensorArrayWriteV3", _inputs_flat, _attrs, _result) 

7921 _result, = _result 

7922 return _result 

7923 

7924 

7925def unstage(dtypes, capacity=0, memory_limit=0, container="", shared_name="", name=None): 

7926 r"""Op is similar to a lightweight Dequeue. 

7927 

7928 The basic functionality is similar to dequeue with many fewer 

7929 capabilities and options. This Op is optimized for performance. 

7930 

7931 Args: 

7932 dtypes: A list of `tf.DTypes` that has length `>= 1`. 

7933 capacity: An optional `int` that is `>= 0`. Defaults to `0`. 

7934 memory_limit: An optional `int` that is `>= 0`. Defaults to `0`. 

7935 container: An optional `string`. Defaults to `""`. 

7936 shared_name: An optional `string`. Defaults to `""`. 

7937 name: A name for the operation (optional). 

7938 

7939 Returns: 

7940 A list of `Tensor` objects of type `dtypes`. 

7941 """ 

7942 _ctx = _context._context or _context.context() 

7943 tld = _ctx._thread_local_data 

7944 if tld.is_eager: 

7945 try: 

7946 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7947 _ctx, "Unstage", name, "capacity", capacity, "memory_limit", 

7948 memory_limit, "dtypes", dtypes, "container", container, "shared_name", 

7949 shared_name) 

7950 return _result 

7951 except _core._NotOkStatusException as e: 

7952 _ops.raise_from_not_ok_status(e, name) 

7953 except _core._FallbackException: 

7954 pass 

7955 try: 

7956 return unstage_eager_fallback( 

7957 capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, 

7958 container=container, shared_name=shared_name, name=name, ctx=_ctx) 

7959 except _core._SymbolicException: 

7960 pass # Add nodes to the TensorFlow graph. 

7961 # Add nodes to the TensorFlow graph. 

7962 if not isinstance(dtypes, (list, tuple)): 

7963 raise TypeError( 

7964 "Expected list for 'dtypes' argument to " 

7965 "'unstage' Op, not %r." % dtypes) 

7966 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

7967 if capacity is None: 

7968 capacity = 0 

7969 capacity = _execute.make_int(capacity, "capacity") 

7970 if memory_limit is None: 

7971 memory_limit = 0 

7972 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

7973 if container is None: 

7974 container = "" 

7975 container = _execute.make_str(container, "container") 

7976 if shared_name is None: 

7977 shared_name = "" 

7978 shared_name = _execute.make_str(shared_name, "shared_name") 

7979 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7980 "Unstage", dtypes=dtypes, capacity=capacity, 

7981 memory_limit=memory_limit, container=container, 

7982 shared_name=shared_name, name=name) 

7983 _result = _outputs[:] 

7984 if not _result: 

7985 return _op 

7986 if _execute.must_record_gradient(): 

7987 _attrs = ("capacity", _op._get_attr_int("capacity"), "memory_limit", 

7988 _op._get_attr_int("memory_limit"), "dtypes", 

7989 _op.get_attr("dtypes"), "container", _op.get_attr("container"), 

7990 "shared_name", _op.get_attr("shared_name")) 

7991 _inputs_flat = _op.inputs 

7992 _execute.record_gradient( 

7993 "Unstage", _inputs_flat, _attrs, _result) 

7994 return _result 

7995 

7996Unstage = tf_export("raw_ops.Unstage")(_ops.to_raw_op(unstage)) 

7997 

7998 

7999def unstage_eager_fallback(dtypes, capacity, memory_limit, container, shared_name, name, ctx): 

8000 if not isinstance(dtypes, (list, tuple)): 

8001 raise TypeError( 

8002 "Expected list for 'dtypes' argument to " 

8003 "'unstage' Op, not %r." % dtypes) 

8004 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

8005 if capacity is None: 

8006 capacity = 0 

8007 capacity = _execute.make_int(capacity, "capacity") 

8008 if memory_limit is None: 

8009 memory_limit = 0 

8010 memory_limit = _execute.make_int(memory_limit, "memory_limit") 

8011 if container is None: 

8012 container = "" 

8013 container = _execute.make_str(container, "container") 

8014 if shared_name is None: 

8015 shared_name = "" 

8016 shared_name = _execute.make_str(shared_name, "shared_name") 

8017 _inputs_flat = [] 

8018 _attrs = ("capacity", capacity, "memory_limit", memory_limit, "dtypes", 

8019 dtypes, "container", container, "shared_name", shared_name) 

8020 _result = _execute.execute(b"Unstage", len(dtypes), inputs=_inputs_flat, 

8021 attrs=_attrs, ctx=ctx, name=name) 

8022 if _execute.must_record_gradient(): 

8023 _execute.record_gradient( 

8024 "Unstage", _inputs_flat, _attrs, _result) 

8025 return _result 

8026