Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_tpu_ops.py: 7%

3237 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1"""Python wrappers around TensorFlow ops. 

2 

3This file is MACHINE GENERATED! Do not edit. 

4""" 

5 

6import collections 

7 

8from tensorflow.python import pywrap_tfe as pywrap_tfe 

9from tensorflow.python.eager import context as _context 

10from tensorflow.python.eager import core as _core 

11from tensorflow.python.eager import execute as _execute 

12from tensorflow.python.framework import dtypes as _dtypes 

13from tensorflow.security.fuzzing.py import annotation_types as _atypes 

14 

15from tensorflow.python.framework import op_def_registry as _op_def_registry 

16from tensorflow.python.framework import ops as _ops 

17from tensorflow.python.framework import op_def_library as _op_def_library 

18from tensorflow.python.util.deprecation import deprecated_endpoints 

19from tensorflow.python.util import dispatch as _dispatch 

20from tensorflow.python.util.tf_export import tf_export 

21 

22from typing import TypeVar 

23 

24def all_to_all(input, group_assignment, concat_dimension, split_dimension, split_count, name=None): 

25 r"""An Op to exchange data across TPU replicas. 

26 

27 On each replica, the input is split into `split_count` blocks along 

28 `split_dimension` and send to the other replicas given group_assignment. After 

29 receiving `split_count` - 1 blocks from other replicas, we concatenate the 

30 blocks along `concat_dimension` as the output. 

31 

32 For example, suppose there are 2 TPU replicas: 

33 replica 0 receives input: `[[A, B]]` 

34 replica 1 receives input: `[[C, D]]` 

35 

36 group_assignment=`[[0, 1]]` 

37 concat_dimension=0 

38 split_dimension=1 

39 split_count=2 

40 

41 replica 0's output: `[[A], [C]]` 

42 replica 1's output: `[[B], [D]]` 

43 

44 Args: 

45 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`. 

46 The local input to the sum. 

47 group_assignment: A `Tensor` of type `int32`. An int32 tensor with shape 

48 [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the 

49 replica ids in the ith subgroup. 

50 concat_dimension: An `int`. The dimension number to concatenate. 

51 split_dimension: An `int`. The dimension number to split. 

52 split_count: An `int`. 

53 The number of splits, this number must equal to the sub-group 

54 size(group_assignment.get_shape()[1]) 

55 name: A name for the operation (optional). 

56 

57 Returns: 

58 A `Tensor`. Has the same type as `input`. 

59 """ 

60 _ctx = _context._context or _context.context() 

61 tld = _ctx._thread_local_data 

62 if tld.is_eager: 

63 try: 

64 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

65 _ctx, "AllToAll", name, input, group_assignment, "concat_dimension", 

66 concat_dimension, "split_dimension", split_dimension, "split_count", 

67 split_count) 

68 return _result 

69 except _core._NotOkStatusException as e: 

70 _ops.raise_from_not_ok_status(e, name) 

71 except _core._FallbackException: 

72 pass 

73 try: 

74 return all_to_all_eager_fallback( 

75 input, group_assignment, concat_dimension=concat_dimension, 

76 split_dimension=split_dimension, split_count=split_count, name=name, 

77 ctx=_ctx) 

78 except _core._SymbolicException: 

79 pass # Add nodes to the TensorFlow graph. 

80 # Add nodes to the TensorFlow graph. 

81 concat_dimension = _execute.make_int(concat_dimension, "concat_dimension") 

82 split_dimension = _execute.make_int(split_dimension, "split_dimension") 

83 split_count = _execute.make_int(split_count, "split_count") 

84 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

85 "AllToAll", input=input, group_assignment=group_assignment, 

86 concat_dimension=concat_dimension, 

87 split_dimension=split_dimension, split_count=split_count, 

88 name=name) 

89 _result = _outputs[:] 

90 if _execute.must_record_gradient(): 

91 _attrs = ("T", _op._get_attr_type("T"), "concat_dimension", 

92 _op._get_attr_int("concat_dimension"), "split_dimension", 

93 _op._get_attr_int("split_dimension"), "split_count", 

94 _op._get_attr_int("split_count")) 

95 _inputs_flat = _op.inputs 

96 _execute.record_gradient( 

97 "AllToAll", _inputs_flat, _attrs, _result) 

98 _result, = _result 

99 return _result 

100 

101AllToAll = tf_export("raw_ops.AllToAll")(_ops.to_raw_op(all_to_all)) 

102 

103 

104def all_to_all_eager_fallback(input, group_assignment, concat_dimension, split_dimension, split_count, name, ctx): 

105 concat_dimension = _execute.make_int(concat_dimension, "concat_dimension") 

106 split_dimension = _execute.make_int(split_dimension, "split_dimension") 

107 split_count = _execute.make_int(split_count, "split_count") 

108 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.bool, ]) 

109 group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32) 

110 _inputs_flat = [input, group_assignment] 

111 _attrs = ("T", _attr_T, "concat_dimension", concat_dimension, 

112 "split_dimension", split_dimension, "split_count", split_count) 

113 _result = _execute.execute(b"AllToAll", 1, inputs=_inputs_flat, 

114 attrs=_attrs, ctx=ctx, name=name) 

115 if _execute.must_record_gradient(): 

116 _execute.record_gradient( 

117 "AllToAll", _inputs_flat, _attrs, _result) 

118 _result, = _result 

119 return _result 

120 

121 

122def assign_variable_xla_concat_nd(resource, inputs, num_concats, paddings=[], name=None): 

123 r"""Concats input tensor across all dimensions. 

124 

125 An op which merges slices the input tensor based on the given num_splits 

126 attribute, strips paddings optionally, and writes the merged tensor without 

127 paddings to the resource variable. 

128 

129 This op may be generated via the TPU bridge. 

130 

131 For example, with `input` tensor: 

132 ``` 

133 [[0, 1], 

134 [4, 5]] 

135 [[2, 3], 

136 [6, 7]] 

137 [[8, 9], 

138 [12, 13]] 

139 [[10, 11], 

140 [14, 15]] 

141 ``` 

142 `num_splits`: 

143 ``` 

144 [2, 2] 

145 ``` 

146 and `paddings`: 

147 ``` 

148 [1, 1] 

149 ``` 

150 the expected `outputs` is: 

151 ``` 

152 [[0, 1, 2], 

153 [4, 5, 6], 

154 [8, 9, 10]] 

155 ``` 

156 

157 Args: 

158 resource: A `Tensor` of type `resource`. 

159 Resource variable for concatenated input tensors across all dimensions. 

160 } 

161 in_arg { 

162 name: "inputs" 

163 description: <<END 

164 Input tensor slices in row-major order to merge across all dimensions. All 

165 inputs must have the same shape. 

166 } 

167 out_arg { 

168 name: "output" 

169 description: <<END 

170 Output tensor formed from merging input slices based on num_concats defined. 

171 inputs: A list of at least 1 `Tensor` objects with the same type. 

172 num_concats: A list of `ints`. Number of ways to merge per dimension. 

173 paddings: An optional list of `ints`. Defaults to `[]`. 

174 Optional list of right paddings per dimension to strip from the final merged 

175 tensor. These paddings must not exceed the dimension size of the merged result 

176 prior to stripping paddings. 

177 name: A name for the operation (optional). 

178 

179 Returns: 

180 The created Operation. 

181 """ 

182 _ctx = _context._context or _context.context() 

183 tld = _ctx._thread_local_data 

184 if tld.is_eager: 

185 try: 

186 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

187 _ctx, "AssignVariableXlaConcatND", name, resource, inputs, 

188 "num_concats", num_concats, "paddings", paddings) 

189 return _result 

190 except _core._NotOkStatusException as e: 

191 _ops.raise_from_not_ok_status(e, name) 

192 except _core._FallbackException: 

193 pass 

194 try: 

195 return assign_variable_xla_concat_nd_eager_fallback( 

196 resource, inputs, num_concats=num_concats, paddings=paddings, 

197 name=name, ctx=_ctx) 

198 except _core._SymbolicException: 

199 pass # Add nodes to the TensorFlow graph. 

200 # Add nodes to the TensorFlow graph. 

201 if not isinstance(inputs, (list, tuple)): 

202 raise TypeError( 

203 "Expected list for 'inputs' argument to " 

204 "'assign_variable_xla_concat_nd' Op, not %r." % inputs) 

205 _attr_N = len(inputs) 

206 if not isinstance(num_concats, (list, tuple)): 

207 raise TypeError( 

208 "Expected list for 'num_concats' argument to " 

209 "'assign_variable_xla_concat_nd' Op, not %r." % num_concats) 

210 num_concats = [_execute.make_int(_i, "num_concats") for _i in num_concats] 

211 if paddings is None: 

212 paddings = [] 

213 if not isinstance(paddings, (list, tuple)): 

214 raise TypeError( 

215 "Expected list for 'paddings' argument to " 

216 "'assign_variable_xla_concat_nd' Op, not %r." % paddings) 

217 paddings = [_execute.make_int(_i, "paddings") for _i in paddings] 

218 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

219 "AssignVariableXlaConcatND", resource=resource, inputs=inputs, 

220 num_concats=num_concats, 

221 paddings=paddings, name=name) 

222 return _op 

223AssignVariableXlaConcatND = tf_export("raw_ops.AssignVariableXlaConcatND")(_ops.to_raw_op(assign_variable_xla_concat_nd)) 

224 

225 

226def assign_variable_xla_concat_nd_eager_fallback(resource, inputs, num_concats, paddings, name, ctx): 

227 if not isinstance(inputs, (list, tuple)): 

228 raise TypeError( 

229 "Expected list for 'inputs' argument to " 

230 "'assign_variable_xla_concat_nd' Op, not %r." % inputs) 

231 _attr_N = len(inputs) 

232 if not isinstance(num_concats, (list, tuple)): 

233 raise TypeError( 

234 "Expected list for 'num_concats' argument to " 

235 "'assign_variable_xla_concat_nd' Op, not %r." % num_concats) 

236 num_concats = [_execute.make_int(_i, "num_concats") for _i in num_concats] 

237 if paddings is None: 

238 paddings = [] 

239 if not isinstance(paddings, (list, tuple)): 

240 raise TypeError( 

241 "Expected list for 'paddings' argument to " 

242 "'assign_variable_xla_concat_nd' Op, not %r." % paddings) 

243 paddings = [_execute.make_int(_i, "paddings") for _i in paddings] 

244 _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), ctx, []) 

245 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

246 _inputs_flat = [resource] + list(inputs) 

247 _attrs = ("T", _attr_T, "N", _attr_N, "num_concats", num_concats, 

248 "paddings", paddings) 

249 _result = _execute.execute(b"AssignVariableXlaConcatND", 0, 

250 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

251 name=name) 

252 _result = None 

253 return _result 

254 

255 

256def collective_permute(input, source_target_pairs, name=None): 

257 r"""An Op to permute tensors across replicated TPU instances. 

258 

259 Each instance supplies its own input. 

260 

261 For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing 

262 source_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs: 

263 `[D, A, B, C]`. 

264 

265 Args: 

266 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

267 The local input to be permuted. Currently only supports float and 

268 bfloat16. 

269 source_target_pairs: A `Tensor` of type `int32`. 

270 A tensor with shape [num_pairs, 2]. 

271 name: A name for the operation (optional). 

272 

273 Returns: 

274 A `Tensor`. Has the same type as `input`. 

275 """ 

276 _ctx = _context._context or _context.context() 

277 tld = _ctx._thread_local_data 

278 if tld.is_eager: 

279 try: 

280 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

281 _ctx, "CollectivePermute", name, input, source_target_pairs) 

282 return _result 

283 except _core._NotOkStatusException as e: 

284 _ops.raise_from_not_ok_status(e, name) 

285 except _core._FallbackException: 

286 pass 

287 try: 

288 return collective_permute_eager_fallback( 

289 input, source_target_pairs, name=name, ctx=_ctx) 

290 except _core._SymbolicException: 

291 pass # Add nodes to the TensorFlow graph. 

292 # Add nodes to the TensorFlow graph. 

293 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

294 "CollectivePermute", input=input, 

295 source_target_pairs=source_target_pairs, 

296 name=name) 

297 _result = _outputs[:] 

298 if _execute.must_record_gradient(): 

299 _attrs = ("T", _op._get_attr_type("T")) 

300 _inputs_flat = _op.inputs 

301 _execute.record_gradient( 

302 "CollectivePermute", _inputs_flat, _attrs, _result) 

303 _result, = _result 

304 return _result 

305 

306CollectivePermute = tf_export("raw_ops.CollectivePermute")(_ops.to_raw_op(collective_permute)) 

307 

308 

309def collective_permute_eager_fallback(input, source_target_pairs, name, ctx): 

310 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

311 source_target_pairs = _ops.convert_to_tensor(source_target_pairs, _dtypes.int32) 

312 _inputs_flat = [input, source_target_pairs] 

313 _attrs = ("T", _attr_T) 

314 _result = _execute.execute(b"CollectivePermute", 1, inputs=_inputs_flat, 

315 attrs=_attrs, ctx=ctx, name=name) 

316 if _execute.must_record_gradient(): 

317 _execute.record_gradient( 

318 "CollectivePermute", _inputs_flat, _attrs, _result) 

319 _result, = _result 

320 return _result 

321 

322 

323def configure_distributed_tpu(embedding_config="", tpu_embedding_config="", is_global_init=False, enable_whole_mesh_compilations=False, compilation_failure_closes_chips=True, tpu_cancellation_closes_chips=0, name=None): 

324 r"""Sets up the centralized structures for a distributed TPU system. 

325 

326 Args: 

327 embedding_config: An optional `string`. Defaults to `""`. 

328 Reserved. Do not use. 

329 tpu_embedding_config: An optional `string`. Defaults to `""`. 

330 Serialized tensorflow.tpu.TPUEmbeddingConfiguration that 

331 describes the embedding lookups of the program. 

332 is_global_init: An optional `bool`. Defaults to `False`. 

333 Reserved. Do not use. 

334 enable_whole_mesh_compilations: An optional `bool`. Defaults to `False`. 

335 compilation_failure_closes_chips: An optional `bool`. Defaults to `True`. 

336 tpu_cancellation_closes_chips: An optional `int`. Defaults to `0`. 

337 name: A name for the operation (optional). 

338 

339 Returns: 

340 A `Tensor` of type `string`. 

341 """ 

342 _ctx = _context._context or _context.context() 

343 tld = _ctx._thread_local_data 

344 if tld.is_eager: 

345 try: 

346 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

347 _ctx, "ConfigureDistributedTPU", name, "embedding_config", 

348 embedding_config, "tpu_embedding_config", tpu_embedding_config, 

349 "is_global_init", is_global_init, "enable_whole_mesh_compilations", 

350 enable_whole_mesh_compilations, "compilation_failure_closes_chips", 

351 compilation_failure_closes_chips, "tpu_cancellation_closes_chips", 

352 tpu_cancellation_closes_chips) 

353 return _result 

354 except _core._NotOkStatusException as e: 

355 _ops.raise_from_not_ok_status(e, name) 

356 except _core._FallbackException: 

357 pass 

358 try: 

359 return configure_distributed_tpu_eager_fallback( 

360 embedding_config=embedding_config, 

361 tpu_embedding_config=tpu_embedding_config, 

362 is_global_init=is_global_init, 

363 enable_whole_mesh_compilations=enable_whole_mesh_compilations, 

364 compilation_failure_closes_chips=compilation_failure_closes_chips, 

365 tpu_cancellation_closes_chips=tpu_cancellation_closes_chips, 

366 name=name, ctx=_ctx) 

367 except _core._SymbolicException: 

368 pass # Add nodes to the TensorFlow graph. 

369 # Add nodes to the TensorFlow graph. 

370 if embedding_config is None: 

371 embedding_config = "" 

372 embedding_config = _execute.make_str(embedding_config, "embedding_config") 

373 if tpu_embedding_config is None: 

374 tpu_embedding_config = "" 

375 tpu_embedding_config = _execute.make_str(tpu_embedding_config, "tpu_embedding_config") 

376 if is_global_init is None: 

377 is_global_init = False 

378 is_global_init = _execute.make_bool(is_global_init, "is_global_init") 

379 if enable_whole_mesh_compilations is None: 

380 enable_whole_mesh_compilations = False 

381 enable_whole_mesh_compilations = _execute.make_bool(enable_whole_mesh_compilations, "enable_whole_mesh_compilations") 

382 if compilation_failure_closes_chips is None: 

383 compilation_failure_closes_chips = True 

384 compilation_failure_closes_chips = _execute.make_bool(compilation_failure_closes_chips, "compilation_failure_closes_chips") 

385 if tpu_cancellation_closes_chips is None: 

386 tpu_cancellation_closes_chips = 0 

387 tpu_cancellation_closes_chips = _execute.make_int(tpu_cancellation_closes_chips, "tpu_cancellation_closes_chips") 

388 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

389 "ConfigureDistributedTPU", embedding_config=embedding_config, 

390 tpu_embedding_config=tpu_embedding_config, 

391 is_global_init=is_global_init, 

392 enable_whole_mesh_compilations=enable_whole_mesh_compilations, 

393 compilation_failure_closes_chips=compilation_failure_closes_chips, 

394 tpu_cancellation_closes_chips=tpu_cancellation_closes_chips, 

395 name=name) 

396 _result = _outputs[:] 

397 if _execute.must_record_gradient(): 

398 _attrs = ("embedding_config", _op.get_attr("embedding_config"), 

399 "tpu_embedding_config", _op.get_attr("tpu_embedding_config"), 

400 "is_global_init", _op._get_attr_bool("is_global_init"), 

401 "enable_whole_mesh_compilations", 

402 _op._get_attr_bool("enable_whole_mesh_compilations"), 

403 "compilation_failure_closes_chips", 

404 _op._get_attr_bool("compilation_failure_closes_chips"), 

405 "tpu_cancellation_closes_chips", 

406 _op._get_attr_int("tpu_cancellation_closes_chips")) 

407 _inputs_flat = _op.inputs 

408 _execute.record_gradient( 

409 "ConfigureDistributedTPU", _inputs_flat, _attrs, _result) 

410 _result, = _result 

411 return _result 

412 

413ConfigureDistributedTPU = tf_export("raw_ops.ConfigureDistributedTPU")(_ops.to_raw_op(configure_distributed_tpu)) 

414 

415 

416def configure_distributed_tpu_eager_fallback(embedding_config, tpu_embedding_config, is_global_init, enable_whole_mesh_compilations, compilation_failure_closes_chips, tpu_cancellation_closes_chips, name, ctx): 

417 if embedding_config is None: 

418 embedding_config = "" 

419 embedding_config = _execute.make_str(embedding_config, "embedding_config") 

420 if tpu_embedding_config is None: 

421 tpu_embedding_config = "" 

422 tpu_embedding_config = _execute.make_str(tpu_embedding_config, "tpu_embedding_config") 

423 if is_global_init is None: 

424 is_global_init = False 

425 is_global_init = _execute.make_bool(is_global_init, "is_global_init") 

426 if enable_whole_mesh_compilations is None: 

427 enable_whole_mesh_compilations = False 

428 enable_whole_mesh_compilations = _execute.make_bool(enable_whole_mesh_compilations, "enable_whole_mesh_compilations") 

429 if compilation_failure_closes_chips is None: 

430 compilation_failure_closes_chips = True 

431 compilation_failure_closes_chips = _execute.make_bool(compilation_failure_closes_chips, "compilation_failure_closes_chips") 

432 if tpu_cancellation_closes_chips is None: 

433 tpu_cancellation_closes_chips = 0 

434 tpu_cancellation_closes_chips = _execute.make_int(tpu_cancellation_closes_chips, "tpu_cancellation_closes_chips") 

435 _inputs_flat = [] 

436 _attrs = ("embedding_config", embedding_config, "tpu_embedding_config", 

437 tpu_embedding_config, "is_global_init", is_global_init, 

438 "enable_whole_mesh_compilations", enable_whole_mesh_compilations, 

439 "compilation_failure_closes_chips", compilation_failure_closes_chips, 

440 "tpu_cancellation_closes_chips", tpu_cancellation_closes_chips) 

441 _result = _execute.execute(b"ConfigureDistributedTPU", 1, 

442 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

443 name=name) 

444 if _execute.must_record_gradient(): 

445 _execute.record_gradient( 

446 "ConfigureDistributedTPU", _inputs_flat, _attrs, _result) 

447 _result, = _result 

448 return _result 

449 

450 

451def configure_tpu_embedding(config, name=None): 

452 r"""Sets up TPUEmbedding in a distributed TPU system. 

453 

454 Args: 

455 config: A `string`. 

456 Serialized tensorflow.tpu.TPUEmbeddingConfiguration that 

457 describes the embedding lookups of the program. 

458 name: A name for the operation (optional). 

459 

460 Returns: 

461 The created Operation. 

462 """ 

463 _ctx = _context._context or _context.context() 

464 tld = _ctx._thread_local_data 

465 if tld.is_eager: 

466 try: 

467 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

468 _ctx, "ConfigureTPUEmbedding", name, "config", config) 

469 return _result 

470 except _core._NotOkStatusException as e: 

471 _ops.raise_from_not_ok_status(e, name) 

472 except _core._FallbackException: 

473 pass 

474 try: 

475 return configure_tpu_embedding_eager_fallback( 

476 config=config, name=name, ctx=_ctx) 

477 except _core._SymbolicException: 

478 pass # Add nodes to the TensorFlow graph. 

479 # Add nodes to the TensorFlow graph. 

480 config = _execute.make_str(config, "config") 

481 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

482 "ConfigureTPUEmbedding", config=config, name=name) 

483 return _op 

484ConfigureTPUEmbedding = tf_export("raw_ops.ConfigureTPUEmbedding")(_ops.to_raw_op(configure_tpu_embedding)) 

485 

486 

487def configure_tpu_embedding_eager_fallback(config, name, ctx): 

488 config = _execute.make_str(config, "config") 

489 _inputs_flat = [] 

490 _attrs = ("config", config) 

491 _result = _execute.execute(b"ConfigureTPUEmbedding", 0, inputs=_inputs_flat, 

492 attrs=_attrs, ctx=ctx, name=name) 

493 _result = None 

494 return _result 

495 

496 

497def cross_replica_sum(input, group_assignment, name=None): 

498 r"""An Op to sum inputs across replicated TPU instances. 

499 

500 Each instance supplies its own input. 

501 

502 For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`. 

503 Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0, 

504 and `B, D, F, H` as group 1. Thus we get the outputs: 

505 `[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`. 

506 

507 Args: 

508 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `uint32`. 

509 The local input to the sum. 

510 group_assignment: A `Tensor` of type `int32`. An int32 tensor with shape 

511 [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the 

512 replica ids in the ith subgroup. 

513 name: A name for the operation (optional). 

514 

515 Returns: 

516 A `Tensor`. Has the same type as `input`. 

517 """ 

518 _ctx = _context._context or _context.context() 

519 tld = _ctx._thread_local_data 

520 if tld.is_eager: 

521 try: 

522 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

523 _ctx, "CrossReplicaSum", name, input, group_assignment) 

524 return _result 

525 except _core._NotOkStatusException as e: 

526 _ops.raise_from_not_ok_status(e, name) 

527 except _core._FallbackException: 

528 pass 

529 try: 

530 return cross_replica_sum_eager_fallback( 

531 input, group_assignment, name=name, ctx=_ctx) 

532 except _core._SymbolicException: 

533 pass # Add nodes to the TensorFlow graph. 

534 # Add nodes to the TensorFlow graph. 

535 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

536 "CrossReplicaSum", input=input, group_assignment=group_assignment, 

537 name=name) 

538 _result = _outputs[:] 

539 if _execute.must_record_gradient(): 

540 _attrs = ("T", _op._get_attr_type("T")) 

541 _inputs_flat = _op.inputs 

542 _execute.record_gradient( 

543 "CrossReplicaSum", _inputs_flat, _attrs, _result) 

544 _result, = _result 

545 return _result 

546 

547CrossReplicaSum = tf_export("raw_ops.CrossReplicaSum")(_ops.to_raw_op(cross_replica_sum)) 

548 

549 

550def cross_replica_sum_eager_fallback(input, group_assignment, name, ctx): 

551 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint32, ]) 

552 group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32) 

553 _inputs_flat = [input, group_assignment] 

554 _attrs = ("T", _attr_T) 

555 _result = _execute.execute(b"CrossReplicaSum", 1, inputs=_inputs_flat, 

556 attrs=_attrs, ctx=ctx, name=name) 

557 if _execute.must_record_gradient(): 

558 _execute.record_gradient( 

559 "CrossReplicaSum", _inputs_flat, _attrs, _result) 

560 _result, = _result 

561 return _result 

562 

563 

564def dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch(sample_indices_or_row_splits, embedding_indices, aggregation_weights, mode_override, device_ordinal, combiners=[], name=None): 

565 r"""Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). 

566 

567 embedding_indices[i] and aggregation_weights[i] correspond 

568 to the ith feature. 

569 

570 The tensors at corresponding positions in the three input lists (sample_indices, 

571 embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 

572 with dim_size() equal to the total number of lookups into the table described by 

573 the corresponding feature. 

574 

575 Args: 

576 sample_indices_or_row_splits: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`. 

577 A list of rank 2 Tensors specifying the training example to which the 

578 corresponding embedding_indices and aggregation_weights values belong. 

579 If the size of its first dimension is 0, we assume each embedding_indices 

580 belongs to a different sample. Both int32 and int64 are allowed and will 

581 be converted to int32 internally. 

582 

583 Or a list of rank 1 Tensors specifying the row splits for splitting 

584 embedding_indices and aggregation_weights into rows. It corresponds to 

585 ids.row_splits in embedding_lookup(), when ids is a RaggedTensor. When 

586 enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged. 

587 the row splits is 1-D dense tensor. When empty, we assume a dense tensor is 

588 passed to the op Both int32 and int64 are allowed and will be converted to 

589 int32 internally. 

590 embedding_indices: A list with the same length as `sample_indices_or_row_splits` of `Tensor` objects with the same type in: `int32`, `int64`. 

591 A list of rank 1 Tensors, indices into the embedding 

592 tables. Both int32 and int64 are allowed and will be converted to 

593 int32 internally. 

594 aggregation_weights: A list with the same length as `sample_indices_or_row_splits` of `Tensor` objects with the same type in: `float32`, `float64`. 

595 A list of rank 1 Tensors containing per training 

596 example aggregation weights. Both float32 and float64 are allowed and will 

597 be converted to float32 internally. 

598 mode_override: A `Tensor` of type `string`. 

599 A string input that overrides the mode specified in the 

600 TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 

601 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set 

602 in TPUEmbeddingConfiguration is used, otherwise mode_override is used. 

603 device_ordinal: A `Tensor` of type `int32`. 

604 The TPU device to use. Should be >= 0 and less than the number 

605 of TPU cores in the task on which the node is placed. 

606 combiners: An optional list of `strings`. Defaults to `[]`. 

607 A list of string scalars, one for each embedding table that specify 

608 how to normalize the embedding activations after weighted summation. 

609 Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have 

610 the sum of the weights be 0 for 'mean' or the sum of the squared weights be 

611 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for 

612 all tables. 

613 name: A name for the operation (optional). 

614 

615 Returns: 

616 The created Operation. 

617 """ 

618 _ctx = _context._context or _context.context() 

619 tld = _ctx._thread_local_data 

620 if tld.is_eager: 

621 try: 

622 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

623 _ctx, "DynamicEnqueueTPUEmbeddingArbitraryTensorBatch", name, 

624 sample_indices_or_row_splits, embedding_indices, aggregation_weights, 

625 mode_override, device_ordinal, "combiners", combiners) 

626 return _result 

627 except _core._NotOkStatusException as e: 

628 _ops.raise_from_not_ok_status(e, name) 

629 except _core._FallbackException: 

630 pass 

631 try: 

632 return dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch_eager_fallback( 

633 sample_indices_or_row_splits, embedding_indices, 

634 aggregation_weights, mode_override, device_ordinal, 

635 combiners=combiners, name=name, ctx=_ctx) 

636 except _core._SymbolicException: 

637 pass # Add nodes to the TensorFlow graph. 

638 # Add nodes to the TensorFlow graph. 

639 if not isinstance(sample_indices_or_row_splits, (list, tuple)): 

640 raise TypeError( 

641 "Expected list for 'sample_indices_or_row_splits' argument to " 

642 "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % sample_indices_or_row_splits) 

643 _attr_N = len(sample_indices_or_row_splits) 

644 if not isinstance(embedding_indices, (list, tuple)): 

645 raise TypeError( 

646 "Expected list for 'embedding_indices' argument to " 

647 "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % embedding_indices) 

648 if len(embedding_indices) != _attr_N: 

649 raise ValueError( 

650 "List argument 'embedding_indices' to 'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " 

651 "must match length %d of argument 'sample_indices_or_row_splits'." % 

652 (len(embedding_indices), _attr_N)) 

653 if not isinstance(aggregation_weights, (list, tuple)): 

654 raise TypeError( 

655 "Expected list for 'aggregation_weights' argument to " 

656 "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % aggregation_weights) 

657 if len(aggregation_weights) != _attr_N: 

658 raise ValueError( 

659 "List argument 'aggregation_weights' to 'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " 

660 "must match length %d of argument 'sample_indices_or_row_splits'." % 

661 (len(aggregation_weights), _attr_N)) 

662 if combiners is None: 

663 combiners = [] 

664 if not isinstance(combiners, (list, tuple)): 

665 raise TypeError( 

666 "Expected list for 'combiners' argument to " 

667 "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % combiners) 

668 combiners = [_execute.make_str(_s, "combiners") for _s in combiners] 

669 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

670 "DynamicEnqueueTPUEmbeddingArbitraryTensorBatch", sample_indices_or_row_splits=sample_indices_or_row_splits, 

671 embedding_indices=embedding_indices, 

672 aggregation_weights=aggregation_weights, 

673 mode_override=mode_override, 

674 device_ordinal=device_ordinal, 

675 combiners=combiners, 

676 name=name) 

677 return _op 

678DynamicEnqueueTPUEmbeddingArbitraryTensorBatch = tf_export("raw_ops.DynamicEnqueueTPUEmbeddingArbitraryTensorBatch")(_ops.to_raw_op(dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch)) 

679 

680 

681def dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch_eager_fallback(sample_indices_or_row_splits, embedding_indices, aggregation_weights, mode_override, device_ordinal, combiners, name, ctx): 

682 if not isinstance(sample_indices_or_row_splits, (list, tuple)): 

683 raise TypeError( 

684 "Expected list for 'sample_indices_or_row_splits' argument to " 

685 "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % sample_indices_or_row_splits) 

686 _attr_N = len(sample_indices_or_row_splits) 

687 if not isinstance(embedding_indices, (list, tuple)): 

688 raise TypeError( 

689 "Expected list for 'embedding_indices' argument to " 

690 "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % embedding_indices) 

691 if len(embedding_indices) != _attr_N: 

692 raise ValueError( 

693 "List argument 'embedding_indices' to 'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " 

694 "must match length %d of argument 'sample_indices_or_row_splits'." % 

695 (len(embedding_indices), _attr_N)) 

696 if not isinstance(aggregation_weights, (list, tuple)): 

697 raise TypeError( 

698 "Expected list for 'aggregation_weights' argument to " 

699 "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % aggregation_weights) 

700 if len(aggregation_weights) != _attr_N: 

701 raise ValueError( 

702 "List argument 'aggregation_weights' to 'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " 

703 "must match length %d of argument 'sample_indices_or_row_splits'." % 

704 (len(aggregation_weights), _attr_N)) 

705 if combiners is None: 

706 combiners = [] 

707 if not isinstance(combiners, (list, tuple)): 

708 raise TypeError( 

709 "Expected list for 'combiners' argument to " 

710 "'dynamic_enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % combiners) 

711 combiners = [_execute.make_str(_s, "combiners") for _s in combiners] 

712 _attr_T1, sample_indices_or_row_splits = _execute.args_to_matching_eager(list(sample_indices_or_row_splits), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

713 _attr_T2, embedding_indices = _execute.args_to_matching_eager(list(embedding_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

714 _attr_T3, aggregation_weights = _execute.args_to_matching_eager(list(aggregation_weights), ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32) 

715 mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) 

716 device_ordinal = _ops.convert_to_tensor(device_ordinal, _dtypes.int32) 

717 _inputs_flat = list(sample_indices_or_row_splits) + list(embedding_indices) + list(aggregation_weights) + [mode_override, device_ordinal] 

718 _attrs = ("T1", _attr_T1, "T2", _attr_T2, "T3", _attr_T3, "N", _attr_N, 

719 "combiners", combiners) 

720 _result = _execute.execute(b"DynamicEnqueueTPUEmbeddingArbitraryTensorBatch", 

721 0, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

722 name=name) 

723 _result = None 

724 return _result 

725 

726 

727def enqueue_tpu_embedding_arbitrary_tensor_batch(sample_indices_or_row_splits, embedding_indices, aggregation_weights, mode_override, device_ordinal=-1, combiners=[], name=None): 

728 r"""Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). 

729 

730 embedding_indices[i] and aggregation_weights[i] correspond 

731 to the ith feature. 

732 

733 The tensors at corresponding positions in the three input lists (sample_indices, 

734 embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 

735 with dim_size() equal to the total number of lookups into the table described by 

736 the corresponding feature. 

737 

738 Args: 

739 sample_indices_or_row_splits: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`. 

740 A list of rank 2 Tensors specifying the training example to which the 

741 corresponding embedding_indices and aggregation_weights values belong. 

742 If the size of its first dimension is 0, we assume each embedding_indices 

743 belongs to a different sample. Both int32 and int64 are allowed and will 

744 be converted to int32 internally. 

745 

746 Or a list of rank 1 Tensors specifying the row splits for splitting 

747 embedding_indices and aggregation_weights into rows. It corresponds to 

748 ids.row_splits in embedding_lookup(), when ids is a RaggedTensor. When 

749 enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged. 

750 the row splits is 1-D dense tensor. When empty, we assume a dense tensor is 

751 passed to the op Both int32 and int64 are allowed and will be converted to 

752 int32 internally. 

753 embedding_indices: A list with the same length as `sample_indices_or_row_splits` of `Tensor` objects with the same type in: `int32`, `int64`. 

754 A list of rank 1 Tensors, indices into the embedding 

755 tables. Both int32 and int64 are allowed and will be converted to 

756 int32 internally. 

757 aggregation_weights: A list with the same length as `sample_indices_or_row_splits` of `Tensor` objects with the same type in: `float32`, `float64`. 

758 A list of rank 1 Tensors containing per training 

759 example aggregation weights. Both float32 and float64 are allowed and will 

760 be converted to float32 internally. 

761 mode_override: A `Tensor` of type `string`. 

762 A string input that overrides the mode specified in the 

763 TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 

764 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set 

765 in TPUEmbeddingConfiguration is used, otherwise mode_override is used. 

766 device_ordinal: An optional `int`. Defaults to `-1`. 

767 The TPU device to use. Should be >= 0 and less than the number 

768 of TPU cores in the task on which the node is placed. 

769 combiners: An optional list of `strings`. Defaults to `[]`. 

770 A list of string scalars, one for each embedding table that specify 

771 how to normalize the embedding activations after weighted summation. 

772 Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have 

773 the sum of the weights be 0 for 'mean' or the sum of the squared weights be 

774 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for 

775 all tables. 

776 name: A name for the operation (optional). 

777 

778 Returns: 

779 The created Operation. 

780 """ 

781 _ctx = _context._context or _context.context() 

782 tld = _ctx._thread_local_data 

783 if tld.is_eager: 

784 try: 

785 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

786 _ctx, "EnqueueTPUEmbeddingArbitraryTensorBatch", name, 

787 sample_indices_or_row_splits, embedding_indices, aggregation_weights, 

788 mode_override, "device_ordinal", device_ordinal, "combiners", 

789 combiners) 

790 return _result 

791 except _core._NotOkStatusException as e: 

792 _ops.raise_from_not_ok_status(e, name) 

793 except _core._FallbackException: 

794 pass 

795 try: 

796 return enqueue_tpu_embedding_arbitrary_tensor_batch_eager_fallback( 

797 sample_indices_or_row_splits, embedding_indices, 

798 aggregation_weights, mode_override, device_ordinal=device_ordinal, 

799 combiners=combiners, name=name, ctx=_ctx) 

800 except _core._SymbolicException: 

801 pass # Add nodes to the TensorFlow graph. 

802 # Add nodes to the TensorFlow graph. 

803 if not isinstance(sample_indices_or_row_splits, (list, tuple)): 

804 raise TypeError( 

805 "Expected list for 'sample_indices_or_row_splits' argument to " 

806 "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % sample_indices_or_row_splits) 

807 _attr_N = len(sample_indices_or_row_splits) 

808 if not isinstance(embedding_indices, (list, tuple)): 

809 raise TypeError( 

810 "Expected list for 'embedding_indices' argument to " 

811 "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % embedding_indices) 

812 if len(embedding_indices) != _attr_N: 

813 raise ValueError( 

814 "List argument 'embedding_indices' to 'enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " 

815 "must match length %d of argument 'sample_indices_or_row_splits'." % 

816 (len(embedding_indices), _attr_N)) 

817 if not isinstance(aggregation_weights, (list, tuple)): 

818 raise TypeError( 

819 "Expected list for 'aggregation_weights' argument to " 

820 "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % aggregation_weights) 

821 if len(aggregation_weights) != _attr_N: 

822 raise ValueError( 

823 "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " 

824 "must match length %d of argument 'sample_indices_or_row_splits'." % 

825 (len(aggregation_weights), _attr_N)) 

826 if device_ordinal is None: 

827 device_ordinal = -1 

828 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

829 if combiners is None: 

830 combiners = [] 

831 if not isinstance(combiners, (list, tuple)): 

832 raise TypeError( 

833 "Expected list for 'combiners' argument to " 

834 "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % combiners) 

835 combiners = [_execute.make_str(_s, "combiners") for _s in combiners] 

836 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

837 "EnqueueTPUEmbeddingArbitraryTensorBatch", sample_indices_or_row_splits=sample_indices_or_row_splits, 

838 embedding_indices=embedding_indices, 

839 aggregation_weights=aggregation_weights, 

840 mode_override=mode_override, 

841 device_ordinal=device_ordinal, 

842 combiners=combiners, 

843 name=name) 

844 return _op 

845EnqueueTPUEmbeddingArbitraryTensorBatch = tf_export("raw_ops.EnqueueTPUEmbeddingArbitraryTensorBatch")(_ops.to_raw_op(enqueue_tpu_embedding_arbitrary_tensor_batch)) 

846 

847 

848def enqueue_tpu_embedding_arbitrary_tensor_batch_eager_fallback(sample_indices_or_row_splits, embedding_indices, aggregation_weights, mode_override, device_ordinal, combiners, name, ctx): 

849 if not isinstance(sample_indices_or_row_splits, (list, tuple)): 

850 raise TypeError( 

851 "Expected list for 'sample_indices_or_row_splits' argument to " 

852 "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % sample_indices_or_row_splits) 

853 _attr_N = len(sample_indices_or_row_splits) 

854 if not isinstance(embedding_indices, (list, tuple)): 

855 raise TypeError( 

856 "Expected list for 'embedding_indices' argument to " 

857 "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % embedding_indices) 

858 if len(embedding_indices) != _attr_N: 

859 raise ValueError( 

860 "List argument 'embedding_indices' to 'enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " 

861 "must match length %d of argument 'sample_indices_or_row_splits'." % 

862 (len(embedding_indices), _attr_N)) 

863 if not isinstance(aggregation_weights, (list, tuple)): 

864 raise TypeError( 

865 "Expected list for 'aggregation_weights' argument to " 

866 "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % aggregation_weights) 

867 if len(aggregation_weights) != _attr_N: 

868 raise ValueError( 

869 "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_arbitrary_tensor_batch' Op with length %d " 

870 "must match length %d of argument 'sample_indices_or_row_splits'." % 

871 (len(aggregation_weights), _attr_N)) 

872 if device_ordinal is None: 

873 device_ordinal = -1 

874 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

875 if combiners is None: 

876 combiners = [] 

877 if not isinstance(combiners, (list, tuple)): 

878 raise TypeError( 

879 "Expected list for 'combiners' argument to " 

880 "'enqueue_tpu_embedding_arbitrary_tensor_batch' Op, not %r." % combiners) 

881 combiners = [_execute.make_str(_s, "combiners") for _s in combiners] 

882 _attr_T1, sample_indices_or_row_splits = _execute.args_to_matching_eager(list(sample_indices_or_row_splits), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

883 _attr_T2, embedding_indices = _execute.args_to_matching_eager(list(embedding_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

884 _attr_T3, aggregation_weights = _execute.args_to_matching_eager(list(aggregation_weights), ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32) 

885 mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) 

886 _inputs_flat = list(sample_indices_or_row_splits) + list(embedding_indices) + list(aggregation_weights) + [mode_override] 

887 _attrs = ("T1", _attr_T1, "T2", _attr_T2, "T3", _attr_T3, "N", _attr_N, 

888 "device_ordinal", device_ordinal, "combiners", combiners) 

889 _result = _execute.execute(b"EnqueueTPUEmbeddingArbitraryTensorBatch", 0, 

890 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

891 name=name) 

892 _result = None 

893 return _result 

894 

895 

896def enqueue_tpu_embedding_integer_batch(batch, mode_override, device_ordinal=-1, name=None): 

897 r"""An op that enqueues a list of input batch tensors to TPUEmbedding. 

898 

899 Args: 

900 batch: A list of at least 1 `Tensor` objects with type `int32`. 

901 A list of 1D tensors, one for each embedding table, containing the 

902 indices into the tables. 

903 mode_override: A `Tensor` of type `string`. 

904 A string input that overrides the mode specified in the 

905 TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 

906 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set 

907 in TPUEmbeddingConfiguration is used, otherwise mode_override is used. 

908 device_ordinal: An optional `int`. Defaults to `-1`. 

909 The TPU device to use. Should be >= 0 and less than the number 

910 of TPU cores in the task on which the node is placed. 

911 name: A name for the operation (optional). 

912 

913 Returns: 

914 The created Operation. 

915 """ 

916 _ctx = _context._context or _context.context() 

917 tld = _ctx._thread_local_data 

918 if tld.is_eager: 

919 try: 

920 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

921 _ctx, "EnqueueTPUEmbeddingIntegerBatch", name, batch, mode_override, 

922 "device_ordinal", device_ordinal) 

923 return _result 

924 except _core._NotOkStatusException as e: 

925 _ops.raise_from_not_ok_status(e, name) 

926 except _core._FallbackException: 

927 pass 

928 try: 

929 return enqueue_tpu_embedding_integer_batch_eager_fallback( 

930 batch, mode_override, device_ordinal=device_ordinal, name=name, 

931 ctx=_ctx) 

932 except _core._SymbolicException: 

933 pass # Add nodes to the TensorFlow graph. 

934 # Add nodes to the TensorFlow graph. 

935 if not isinstance(batch, (list, tuple)): 

936 raise TypeError( 

937 "Expected list for 'batch' argument to " 

938 "'enqueue_tpu_embedding_integer_batch' Op, not %r." % batch) 

939 _attr_N = len(batch) 

940 if device_ordinal is None: 

941 device_ordinal = -1 

942 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

943 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

944 "EnqueueTPUEmbeddingIntegerBatch", batch=batch, 

945 mode_override=mode_override, 

946 device_ordinal=device_ordinal, 

947 name=name) 

948 return _op 

949EnqueueTPUEmbeddingIntegerBatch = tf_export("raw_ops.EnqueueTPUEmbeddingIntegerBatch")(_ops.to_raw_op(enqueue_tpu_embedding_integer_batch)) 

950 

951 

952def enqueue_tpu_embedding_integer_batch_eager_fallback(batch, mode_override, device_ordinal, name, ctx): 

953 if not isinstance(batch, (list, tuple)): 

954 raise TypeError( 

955 "Expected list for 'batch' argument to " 

956 "'enqueue_tpu_embedding_integer_batch' Op, not %r." % batch) 

957 _attr_N = len(batch) 

958 if device_ordinal is None: 

959 device_ordinal = -1 

960 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

961 batch = _ops.convert_n_to_tensor(batch, _dtypes.int32) 

962 mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) 

963 _inputs_flat = list(batch) + [mode_override] 

964 _attrs = ("N", _attr_N, "device_ordinal", device_ordinal) 

965 _result = _execute.execute(b"EnqueueTPUEmbeddingIntegerBatch", 0, 

966 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

967 name=name) 

968 _result = None 

969 return _result 

970 

971 

972def enqueue_tpu_embedding_ragged_tensor_batch(sample_splits, embedding_indices, aggregation_weights, mode_override, table_ids, device_ordinal=-1, combiners=[], max_sequence_lengths=[], num_features=[], name=None): 

973 r"""Eases the porting of code that uses tf.nn.embedding_lookup(). 

974 

975 sample_splits[i], embedding_indices[i] and aggregation_weights[i] correspond 

976 to the ith feature. table_ids[i] indicates which embedding table to look up ith 

977 feature. 

978 

979 The tensors at corresponding positions in two of the input lists, 

980 embedding_indices and aggregation_weights, must have the same shape, i.e. rank 1 

981 with dim_size() equal to the total number of lookups into the table described by 

982 the corresponding feature. 

983 

984 Args: 

985 sample_splits: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`. 

986 A list of rank 1 Tensors specifying the break points for splitting 

987 embedding_indices and aggregation_weights into rows. 

988 It corresponds to ids.row_splits in embedding_lookup(), when ids is a 

989 RaggedTensor. 

990 embedding_indices: A list with the same length as `sample_splits` of `Tensor` objects with the same type in: `int32`, `int64`. 

991 A list of rank 1 Tensors, indices into the embedding tables. 

992 It corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor. 

993 aggregation_weights: A list with the same length as `sample_splits` of `Tensor` objects with the same type in: `float32`, `float64`. 

994 A list of rank 1 Tensors containing per training example 

995 aggregation weights. It corresponds to the values field of a RaggedTensor 

996 with the same row_splits as ids in embedding_lookup(), when ids is a 

997 RaggedTensor. 

998 mode_override: A `Tensor` of type `string`. 

999 A string input that overrides the mode specified in the 

1000 TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 

1001 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set 

1002 in TPUEmbeddingConfiguration is used, otherwise mode_override is used. 

1003 table_ids: A list of `ints`. 

1004 A list of integers specifying the identifier of the embedding table 

1005 (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the 

1006 corresponding input. The ith input is looked up using table_ids[i]. The size 

1007 of the table_ids list must be equal to that of sample_indices, 

1008 embedding_indices and aggregation_weights. 

1009 device_ordinal: An optional `int`. Defaults to `-1`. 

1010 The TPU device to use. Should be >= 0 and less than the number 

1011 of TPU cores in the task on which the node is placed. 

1012 combiners: An optional list of `strings`. Defaults to `[]`. 

1013 A list of string scalars, one for each embedding table that specify 

1014 how to normalize the embedding activations after weighted summation. 

1015 Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have 

1016 the sum of the weights be 0 for 'mean' or the sum of the squared weights be 

1017 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for 

1018 all tables. 

1019 max_sequence_lengths: An optional list of `ints`. Defaults to `[]`. 

1020 num_features: An optional list of `ints`. Defaults to `[]`. 

1021 name: A name for the operation (optional). 

1022 

1023 Returns: 

1024 The created Operation. 

1025 """ 

1026 _ctx = _context._context or _context.context() 

1027 tld = _ctx._thread_local_data 

1028 if tld.is_eager: 

1029 try: 

1030 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1031 _ctx, "EnqueueTPUEmbeddingRaggedTensorBatch", name, sample_splits, 

1032 embedding_indices, aggregation_weights, mode_override, 

1033 "device_ordinal", device_ordinal, "combiners", combiners, "table_ids", 

1034 table_ids, "max_sequence_lengths", max_sequence_lengths, 

1035 "num_features", num_features) 

1036 return _result 

1037 except _core._NotOkStatusException as e: 

1038 _ops.raise_from_not_ok_status(e, name) 

1039 except _core._FallbackException: 

1040 pass 

1041 try: 

1042 return enqueue_tpu_embedding_ragged_tensor_batch_eager_fallback( 

1043 sample_splits, embedding_indices, aggregation_weights, 

1044 mode_override, device_ordinal=device_ordinal, combiners=combiners, 

1045 table_ids=table_ids, max_sequence_lengths=max_sequence_lengths, 

1046 num_features=num_features, name=name, ctx=_ctx) 

1047 except _core._SymbolicException: 

1048 pass # Add nodes to the TensorFlow graph. 

1049 # Add nodes to the TensorFlow graph. 

1050 if not isinstance(sample_splits, (list, tuple)): 

1051 raise TypeError( 

1052 "Expected list for 'sample_splits' argument to " 

1053 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % sample_splits) 

1054 _attr_N = len(sample_splits) 

1055 if not isinstance(embedding_indices, (list, tuple)): 

1056 raise TypeError( 

1057 "Expected list for 'embedding_indices' argument to " 

1058 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % embedding_indices) 

1059 if len(embedding_indices) != _attr_N: 

1060 raise ValueError( 

1061 "List argument 'embedding_indices' to 'enqueue_tpu_embedding_ragged_tensor_batch' Op with length %d " 

1062 "must match length %d of argument 'sample_splits'." % 

1063 (len(embedding_indices), _attr_N)) 

1064 if not isinstance(aggregation_weights, (list, tuple)): 

1065 raise TypeError( 

1066 "Expected list for 'aggregation_weights' argument to " 

1067 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % aggregation_weights) 

1068 if len(aggregation_weights) != _attr_N: 

1069 raise ValueError( 

1070 "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_ragged_tensor_batch' Op with length %d " 

1071 "must match length %d of argument 'sample_splits'." % 

1072 (len(aggregation_weights), _attr_N)) 

1073 if not isinstance(table_ids, (list, tuple)): 

1074 raise TypeError( 

1075 "Expected list for 'table_ids' argument to " 

1076 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % table_ids) 

1077 table_ids = [_execute.make_int(_i, "table_ids") for _i in table_ids] 

1078 if device_ordinal is None: 

1079 device_ordinal = -1 

1080 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

1081 if combiners is None: 

1082 combiners = [] 

1083 if not isinstance(combiners, (list, tuple)): 

1084 raise TypeError( 

1085 "Expected list for 'combiners' argument to " 

1086 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % combiners) 

1087 combiners = [_execute.make_str(_s, "combiners") for _s in combiners] 

1088 if max_sequence_lengths is None: 

1089 max_sequence_lengths = [] 

1090 if not isinstance(max_sequence_lengths, (list, tuple)): 

1091 raise TypeError( 

1092 "Expected list for 'max_sequence_lengths' argument to " 

1093 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % max_sequence_lengths) 

1094 max_sequence_lengths = [_execute.make_int(_i, "max_sequence_lengths") for _i in max_sequence_lengths] 

1095 if num_features is None: 

1096 num_features = [] 

1097 if not isinstance(num_features, (list, tuple)): 

1098 raise TypeError( 

1099 "Expected list for 'num_features' argument to " 

1100 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % num_features) 

1101 num_features = [_execute.make_int(_i, "num_features") for _i in num_features] 

1102 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1103 "EnqueueTPUEmbeddingRaggedTensorBatch", sample_splits=sample_splits, 

1104 embedding_indices=embedding_indices, 

1105 aggregation_weights=aggregation_weights, 

1106 mode_override=mode_override, 

1107 table_ids=table_ids, 

1108 device_ordinal=device_ordinal, 

1109 combiners=combiners, 

1110 max_sequence_lengths=max_sequence_lengths, 

1111 num_features=num_features, 

1112 name=name) 

1113 return _op 

1114EnqueueTPUEmbeddingRaggedTensorBatch = tf_export("raw_ops.EnqueueTPUEmbeddingRaggedTensorBatch")(_ops.to_raw_op(enqueue_tpu_embedding_ragged_tensor_batch)) 

1115 

1116 

1117def enqueue_tpu_embedding_ragged_tensor_batch_eager_fallback(sample_splits, embedding_indices, aggregation_weights, mode_override, table_ids, device_ordinal, combiners, max_sequence_lengths, num_features, name, ctx): 

1118 if not isinstance(sample_splits, (list, tuple)): 

1119 raise TypeError( 

1120 "Expected list for 'sample_splits' argument to " 

1121 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % sample_splits) 

1122 _attr_N = len(sample_splits) 

1123 if not isinstance(embedding_indices, (list, tuple)): 

1124 raise TypeError( 

1125 "Expected list for 'embedding_indices' argument to " 

1126 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % embedding_indices) 

1127 if len(embedding_indices) != _attr_N: 

1128 raise ValueError( 

1129 "List argument 'embedding_indices' to 'enqueue_tpu_embedding_ragged_tensor_batch' Op with length %d " 

1130 "must match length %d of argument 'sample_splits'." % 

1131 (len(embedding_indices), _attr_N)) 

1132 if not isinstance(aggregation_weights, (list, tuple)): 

1133 raise TypeError( 

1134 "Expected list for 'aggregation_weights' argument to " 

1135 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % aggregation_weights) 

1136 if len(aggregation_weights) != _attr_N: 

1137 raise ValueError( 

1138 "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_ragged_tensor_batch' Op with length %d " 

1139 "must match length %d of argument 'sample_splits'." % 

1140 (len(aggregation_weights), _attr_N)) 

1141 if not isinstance(table_ids, (list, tuple)): 

1142 raise TypeError( 

1143 "Expected list for 'table_ids' argument to " 

1144 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % table_ids) 

1145 table_ids = [_execute.make_int(_i, "table_ids") for _i in table_ids] 

1146 if device_ordinal is None: 

1147 device_ordinal = -1 

1148 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

1149 if combiners is None: 

1150 combiners = [] 

1151 if not isinstance(combiners, (list, tuple)): 

1152 raise TypeError( 

1153 "Expected list for 'combiners' argument to " 

1154 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % combiners) 

1155 combiners = [_execute.make_str(_s, "combiners") for _s in combiners] 

1156 if max_sequence_lengths is None: 

1157 max_sequence_lengths = [] 

1158 if not isinstance(max_sequence_lengths, (list, tuple)): 

1159 raise TypeError( 

1160 "Expected list for 'max_sequence_lengths' argument to " 

1161 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % max_sequence_lengths) 

1162 max_sequence_lengths = [_execute.make_int(_i, "max_sequence_lengths") for _i in max_sequence_lengths] 

1163 if num_features is None: 

1164 num_features = [] 

1165 if not isinstance(num_features, (list, tuple)): 

1166 raise TypeError( 

1167 "Expected list for 'num_features' argument to " 

1168 "'enqueue_tpu_embedding_ragged_tensor_batch' Op, not %r." % num_features) 

1169 num_features = [_execute.make_int(_i, "num_features") for _i in num_features] 

1170 _attr_T1, sample_splits = _execute.args_to_matching_eager(list(sample_splits), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

1171 _attr_T2, embedding_indices = _execute.args_to_matching_eager(list(embedding_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

1172 _attr_T3, aggregation_weights = _execute.args_to_matching_eager(list(aggregation_weights), ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32) 

1173 mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) 

1174 _inputs_flat = list(sample_splits) + list(embedding_indices) + list(aggregation_weights) + [mode_override] 

1175 _attrs = ("T1", _attr_T1, "T2", _attr_T2, "T3", _attr_T3, "N", _attr_N, 

1176 "device_ordinal", device_ordinal, "combiners", combiners, "table_ids", 

1177 table_ids, "max_sequence_lengths", max_sequence_lengths, "num_features", 

1178 num_features) 

1179 _result = _execute.execute(b"EnqueueTPUEmbeddingRaggedTensorBatch", 0, 

1180 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1181 name=name) 

1182 _result = None 

1183 return _result 

1184 

1185 

1186def enqueue_tpu_embedding_sparse_batch(sample_indices, embedding_indices, aggregation_weights, mode_override, device_ordinal=-1, combiners=[], name=None): 

1187 r"""An op that enqueues TPUEmbedding input indices from a SparseTensor. 

1188 

1189 This Op eases the porting of code that uses embedding_lookup_sparse(), 

1190 although some Python preprocessing of the SparseTensor arguments to 

1191 embedding_lookup_sparse() is required to produce the arguments to this Op, 

1192 since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training 

1193 step. 

1194 

1195 The tensors at corresponding positions in the three input lists 

1196 must have the same shape, i.e. rank 1 with dim_size() equal to the total 

1197 number of lookups into the table described by the corresponding table_id. 

1198 

1199 Args: 

1200 sample_indices: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`. 

1201 A list of rank 1 Tensors specifying the training example and 

1202 feature to which the corresponding embedding_indices and aggregation_weights 

1203 values belong. sample_indices[i] must equal b * nf + f, where nf is the 

1204 number of features from the corresponding table, f is in [0, nf), and 

1205 b is in [0, batch size). 

1206 embedding_indices: A list with the same length as `sample_indices` of `Tensor` objects with the same type in: `int32`, `int64`. 

1207 A list of rank 1 Tensors, indices into the embedding tables. 

1208 aggregation_weights: A list with the same length as `sample_indices` of `Tensor` objects with the same type in: `float32`, `float64`. 

1209 A list of rank 1 Tensors containing per sample -- i.e. per 

1210 (training example, feature) -- aggregation weights. 

1211 mode_override: A `Tensor` of type `string`. 

1212 A string input that overrides the mode specified in the 

1213 TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 

1214 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set 

1215 in TPUEmbeddingConfiguration is used, otherwise mode_override is used. 

1216 device_ordinal: An optional `int`. Defaults to `-1`. 

1217 The TPU device to use. Should be >= 0 and less than the number 

1218 of TPU cores in the task on which the node is placed. 

1219 combiners: An optional list of `strings`. Defaults to `[]`. 

1220 A list of string scalars, one for each embedding table that specify 

1221 how to normalize the embedding activations after weighted summation. 

1222 Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have 

1223 the sum of the weights be 0 for 'mean' or the sum of the squared weights be 

1224 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for 

1225 all tables. 

1226 name: A name for the operation (optional). 

1227 

1228 Returns: 

1229 The created Operation. 

1230 """ 

1231 _ctx = _context._context or _context.context() 

1232 tld = _ctx._thread_local_data 

1233 if tld.is_eager: 

1234 try: 

1235 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1236 _ctx, "EnqueueTPUEmbeddingSparseBatch", name, sample_indices, 

1237 embedding_indices, aggregation_weights, mode_override, 

1238 "device_ordinal", device_ordinal, "combiners", combiners) 

1239 return _result 

1240 except _core._NotOkStatusException as e: 

1241 _ops.raise_from_not_ok_status(e, name) 

1242 except _core._FallbackException: 

1243 pass 

1244 try: 

1245 return enqueue_tpu_embedding_sparse_batch_eager_fallback( 

1246 sample_indices, embedding_indices, aggregation_weights, 

1247 mode_override, device_ordinal=device_ordinal, combiners=combiners, 

1248 name=name, ctx=_ctx) 

1249 except _core._SymbolicException: 

1250 pass # Add nodes to the TensorFlow graph. 

1251 # Add nodes to the TensorFlow graph. 

1252 if not isinstance(sample_indices, (list, tuple)): 

1253 raise TypeError( 

1254 "Expected list for 'sample_indices' argument to " 

1255 "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % sample_indices) 

1256 _attr_N = len(sample_indices) 

1257 if not isinstance(embedding_indices, (list, tuple)): 

1258 raise TypeError( 

1259 "Expected list for 'embedding_indices' argument to " 

1260 "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % embedding_indices) 

1261 if len(embedding_indices) != _attr_N: 

1262 raise ValueError( 

1263 "List argument 'embedding_indices' to 'enqueue_tpu_embedding_sparse_batch' Op with length %d " 

1264 "must match length %d of argument 'sample_indices'." % 

1265 (len(embedding_indices), _attr_N)) 

1266 if not isinstance(aggregation_weights, (list, tuple)): 

1267 raise TypeError( 

1268 "Expected list for 'aggregation_weights' argument to " 

1269 "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % aggregation_weights) 

1270 if len(aggregation_weights) != _attr_N: 

1271 raise ValueError( 

1272 "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_sparse_batch' Op with length %d " 

1273 "must match length %d of argument 'sample_indices'." % 

1274 (len(aggregation_weights), _attr_N)) 

1275 if device_ordinal is None: 

1276 device_ordinal = -1 

1277 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

1278 if combiners is None: 

1279 combiners = [] 

1280 if not isinstance(combiners, (list, tuple)): 

1281 raise TypeError( 

1282 "Expected list for 'combiners' argument to " 

1283 "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % combiners) 

1284 combiners = [_execute.make_str(_s, "combiners") for _s in combiners] 

1285 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1286 "EnqueueTPUEmbeddingSparseBatch", sample_indices=sample_indices, 

1287 embedding_indices=embedding_indices, 

1288 aggregation_weights=aggregation_weights, 

1289 mode_override=mode_override, 

1290 device_ordinal=device_ordinal, 

1291 combiners=combiners, name=name) 

1292 return _op 

1293EnqueueTPUEmbeddingSparseBatch = tf_export("raw_ops.EnqueueTPUEmbeddingSparseBatch")(_ops.to_raw_op(enqueue_tpu_embedding_sparse_batch)) 

1294 

1295 

1296def enqueue_tpu_embedding_sparse_batch_eager_fallback(sample_indices, embedding_indices, aggregation_weights, mode_override, device_ordinal, combiners, name, ctx): 

1297 if not isinstance(sample_indices, (list, tuple)): 

1298 raise TypeError( 

1299 "Expected list for 'sample_indices' argument to " 

1300 "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % sample_indices) 

1301 _attr_N = len(sample_indices) 

1302 if not isinstance(embedding_indices, (list, tuple)): 

1303 raise TypeError( 

1304 "Expected list for 'embedding_indices' argument to " 

1305 "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % embedding_indices) 

1306 if len(embedding_indices) != _attr_N: 

1307 raise ValueError( 

1308 "List argument 'embedding_indices' to 'enqueue_tpu_embedding_sparse_batch' Op with length %d " 

1309 "must match length %d of argument 'sample_indices'." % 

1310 (len(embedding_indices), _attr_N)) 

1311 if not isinstance(aggregation_weights, (list, tuple)): 

1312 raise TypeError( 

1313 "Expected list for 'aggregation_weights' argument to " 

1314 "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % aggregation_weights) 

1315 if len(aggregation_weights) != _attr_N: 

1316 raise ValueError( 

1317 "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_sparse_batch' Op with length %d " 

1318 "must match length %d of argument 'sample_indices'." % 

1319 (len(aggregation_weights), _attr_N)) 

1320 if device_ordinal is None: 

1321 device_ordinal = -1 

1322 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

1323 if combiners is None: 

1324 combiners = [] 

1325 if not isinstance(combiners, (list, tuple)): 

1326 raise TypeError( 

1327 "Expected list for 'combiners' argument to " 

1328 "'enqueue_tpu_embedding_sparse_batch' Op, not %r." % combiners) 

1329 combiners = [_execute.make_str(_s, "combiners") for _s in combiners] 

1330 _attr_T1, sample_indices = _execute.args_to_matching_eager(list(sample_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

1331 _attr_T2, embedding_indices = _execute.args_to_matching_eager(list(embedding_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

1332 _attr_T3, aggregation_weights = _execute.args_to_matching_eager(list(aggregation_weights), ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32) 

1333 mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) 

1334 _inputs_flat = list(sample_indices) + list(embedding_indices) + list(aggregation_weights) + [mode_override] 

1335 _attrs = ("T1", _attr_T1, "T2", _attr_T2, "T3", _attr_T3, "N", _attr_N, 

1336 "device_ordinal", device_ordinal, "combiners", combiners) 

1337 _result = _execute.execute(b"EnqueueTPUEmbeddingSparseBatch", 0, 

1338 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1339 name=name) 

1340 _result = None 

1341 return _result 

1342 

1343 

1344def enqueue_tpu_embedding_sparse_tensor_batch(sample_indices, embedding_indices, aggregation_weights, mode_override, table_ids, device_ordinal=-1, combiners=[], max_sequence_lengths=[], num_features=[], name=None): 

1345 r"""Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). 

1346 

1347 sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond 

1348 to the ith feature. table_ids[i] indicates which embedding table to look up ith 

1349 feature. 

1350 

1351 The tensors at corresponding positions in the three input lists (sample_indices, 

1352 embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 

1353 with dim_size() equal to the total number of lookups into the table described by 

1354 the corresponding feature. 

1355 

1356 Args: 

1357 sample_indices: A list of at least 1 `Tensor` objects with the same type in: `int32`, `int64`. 

1358 A list of rank 1 Tensors specifying the training example to 

1359 which the corresponding embedding_indices and aggregation_weights values 

1360 belong. It corresponds to sp_ids.indices[:,0] in embedding_lookup_sparse(). 

1361 embedding_indices: A list with the same length as `sample_indices` of `Tensor` objects with the same type in: `int32`, `int64`. 

1362 A list of rank 1 Tensors, indices into the embedding tables. 

1363 It corresponds to sp_ids.values in embedding_lookup_sparse(). 

1364 aggregation_weights: A list with the same length as `sample_indices` of `Tensor` objects with the same type in: `float32`, `float64`. 

1365 A list of rank 1 Tensors containing per training example 

1366 aggregation weights. It corresponds to sp_weights.values in 

1367 embedding_lookup_sparse(). 

1368 mode_override: A `Tensor` of type `string`. 

1369 A string input that overrides the mode specified in the 

1370 TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 

1371 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set 

1372 in TPUEmbeddingConfiguration is used, otherwise mode_override is used. 

1373 table_ids: A list of `ints`. 

1374 A list of integers specifying the identifier of the embedding table 

1375 (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the 

1376 corresponding input. The ith input is looked up using table_ids[i]. The size 

1377 of the table_ids list must be equal to that of sample_indices, 

1378 embedding_indices and aggregation_weights. 

1379 device_ordinal: An optional `int`. Defaults to `-1`. 

1380 The TPU device to use. Should be >= 0 and less than the number 

1381 of TPU cores in the task on which the node is placed. 

1382 combiners: An optional list of `strings`. Defaults to `[]`. 

1383 A list of string scalars, one for each embedding table that specify 

1384 how to normalize the embedding activations after weighted summation. 

1385 Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have 

1386 the sum of the weights be 0 for 'mean' or the sum of the squared weights be 

1387 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for 

1388 all tables. 

1389 max_sequence_lengths: An optional list of `ints`. Defaults to `[]`. 

1390 num_features: An optional list of `ints`. Defaults to `[]`. 

1391 name: A name for the operation (optional). 

1392 

1393 Returns: 

1394 The created Operation. 

1395 """ 

1396 _ctx = _context._context or _context.context() 

1397 tld = _ctx._thread_local_data 

1398 if tld.is_eager: 

1399 try: 

1400 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1401 _ctx, "EnqueueTPUEmbeddingSparseTensorBatch", name, sample_indices, 

1402 embedding_indices, aggregation_weights, mode_override, 

1403 "device_ordinal", device_ordinal, "combiners", combiners, "table_ids", 

1404 table_ids, "max_sequence_lengths", max_sequence_lengths, 

1405 "num_features", num_features) 

1406 return _result 

1407 except _core._NotOkStatusException as e: 

1408 _ops.raise_from_not_ok_status(e, name) 

1409 except _core._FallbackException: 

1410 pass 

1411 try: 

1412 return enqueue_tpu_embedding_sparse_tensor_batch_eager_fallback( 

1413 sample_indices, embedding_indices, aggregation_weights, 

1414 mode_override, device_ordinal=device_ordinal, combiners=combiners, 

1415 table_ids=table_ids, max_sequence_lengths=max_sequence_lengths, 

1416 num_features=num_features, name=name, ctx=_ctx) 

1417 except _core._SymbolicException: 

1418 pass # Add nodes to the TensorFlow graph. 

1419 # Add nodes to the TensorFlow graph. 

1420 if not isinstance(sample_indices, (list, tuple)): 

1421 raise TypeError( 

1422 "Expected list for 'sample_indices' argument to " 

1423 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % sample_indices) 

1424 _attr_N = len(sample_indices) 

1425 if not isinstance(embedding_indices, (list, tuple)): 

1426 raise TypeError( 

1427 "Expected list for 'embedding_indices' argument to " 

1428 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % embedding_indices) 

1429 if len(embedding_indices) != _attr_N: 

1430 raise ValueError( 

1431 "List argument 'embedding_indices' to 'enqueue_tpu_embedding_sparse_tensor_batch' Op with length %d " 

1432 "must match length %d of argument 'sample_indices'." % 

1433 (len(embedding_indices), _attr_N)) 

1434 if not isinstance(aggregation_weights, (list, tuple)): 

1435 raise TypeError( 

1436 "Expected list for 'aggregation_weights' argument to " 

1437 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % aggregation_weights) 

1438 if len(aggregation_weights) != _attr_N: 

1439 raise ValueError( 

1440 "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_sparse_tensor_batch' Op with length %d " 

1441 "must match length %d of argument 'sample_indices'." % 

1442 (len(aggregation_weights), _attr_N)) 

1443 if not isinstance(table_ids, (list, tuple)): 

1444 raise TypeError( 

1445 "Expected list for 'table_ids' argument to " 

1446 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % table_ids) 

1447 table_ids = [_execute.make_int(_i, "table_ids") for _i in table_ids] 

1448 if device_ordinal is None: 

1449 device_ordinal = -1 

1450 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

1451 if combiners is None: 

1452 combiners = [] 

1453 if not isinstance(combiners, (list, tuple)): 

1454 raise TypeError( 

1455 "Expected list for 'combiners' argument to " 

1456 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % combiners) 

1457 combiners = [_execute.make_str(_s, "combiners") for _s in combiners] 

1458 if max_sequence_lengths is None: 

1459 max_sequence_lengths = [] 

1460 if not isinstance(max_sequence_lengths, (list, tuple)): 

1461 raise TypeError( 

1462 "Expected list for 'max_sequence_lengths' argument to " 

1463 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % max_sequence_lengths) 

1464 max_sequence_lengths = [_execute.make_int(_i, "max_sequence_lengths") for _i in max_sequence_lengths] 

1465 if num_features is None: 

1466 num_features = [] 

1467 if not isinstance(num_features, (list, tuple)): 

1468 raise TypeError( 

1469 "Expected list for 'num_features' argument to " 

1470 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % num_features) 

1471 num_features = [_execute.make_int(_i, "num_features") for _i in num_features] 

1472 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1473 "EnqueueTPUEmbeddingSparseTensorBatch", sample_indices=sample_indices, 

1474 embedding_indices=embedding_indices, 

1475 aggregation_weights=aggregation_weights, 

1476 mode_override=mode_override, 

1477 table_ids=table_ids, 

1478 device_ordinal=device_ordinal, 

1479 combiners=combiners, 

1480 max_sequence_lengths=max_sequence_lengths, 

1481 num_features=num_features, 

1482 name=name) 

1483 return _op 

1484EnqueueTPUEmbeddingSparseTensorBatch = tf_export("raw_ops.EnqueueTPUEmbeddingSparseTensorBatch")(_ops.to_raw_op(enqueue_tpu_embedding_sparse_tensor_batch)) 

1485 

1486 

1487def enqueue_tpu_embedding_sparse_tensor_batch_eager_fallback(sample_indices, embedding_indices, aggregation_weights, mode_override, table_ids, device_ordinal, combiners, max_sequence_lengths, num_features, name, ctx): 

1488 if not isinstance(sample_indices, (list, tuple)): 

1489 raise TypeError( 

1490 "Expected list for 'sample_indices' argument to " 

1491 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % sample_indices) 

1492 _attr_N = len(sample_indices) 

1493 if not isinstance(embedding_indices, (list, tuple)): 

1494 raise TypeError( 

1495 "Expected list for 'embedding_indices' argument to " 

1496 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % embedding_indices) 

1497 if len(embedding_indices) != _attr_N: 

1498 raise ValueError( 

1499 "List argument 'embedding_indices' to 'enqueue_tpu_embedding_sparse_tensor_batch' Op with length %d " 

1500 "must match length %d of argument 'sample_indices'." % 

1501 (len(embedding_indices), _attr_N)) 

1502 if not isinstance(aggregation_weights, (list, tuple)): 

1503 raise TypeError( 

1504 "Expected list for 'aggregation_weights' argument to " 

1505 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % aggregation_weights) 

1506 if len(aggregation_weights) != _attr_N: 

1507 raise ValueError( 

1508 "List argument 'aggregation_weights' to 'enqueue_tpu_embedding_sparse_tensor_batch' Op with length %d " 

1509 "must match length %d of argument 'sample_indices'." % 

1510 (len(aggregation_weights), _attr_N)) 

1511 if not isinstance(table_ids, (list, tuple)): 

1512 raise TypeError( 

1513 "Expected list for 'table_ids' argument to " 

1514 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % table_ids) 

1515 table_ids = [_execute.make_int(_i, "table_ids") for _i in table_ids] 

1516 if device_ordinal is None: 

1517 device_ordinal = -1 

1518 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

1519 if combiners is None: 

1520 combiners = [] 

1521 if not isinstance(combiners, (list, tuple)): 

1522 raise TypeError( 

1523 "Expected list for 'combiners' argument to " 

1524 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % combiners) 

1525 combiners = [_execute.make_str(_s, "combiners") for _s in combiners] 

1526 if max_sequence_lengths is None: 

1527 max_sequence_lengths = [] 

1528 if not isinstance(max_sequence_lengths, (list, tuple)): 

1529 raise TypeError( 

1530 "Expected list for 'max_sequence_lengths' argument to " 

1531 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % max_sequence_lengths) 

1532 max_sequence_lengths = [_execute.make_int(_i, "max_sequence_lengths") for _i in max_sequence_lengths] 

1533 if num_features is None: 

1534 num_features = [] 

1535 if not isinstance(num_features, (list, tuple)): 

1536 raise TypeError( 

1537 "Expected list for 'num_features' argument to " 

1538 "'enqueue_tpu_embedding_sparse_tensor_batch' Op, not %r." % num_features) 

1539 num_features = [_execute.make_int(_i, "num_features") for _i in num_features] 

1540 _attr_T1, sample_indices = _execute.args_to_matching_eager(list(sample_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

1541 _attr_T2, embedding_indices = _execute.args_to_matching_eager(list(embedding_indices), ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

1542 _attr_T3, aggregation_weights = _execute.args_to_matching_eager(list(aggregation_weights), ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32) 

1543 mode_override = _ops.convert_to_tensor(mode_override, _dtypes.string) 

1544 _inputs_flat = list(sample_indices) + list(embedding_indices) + list(aggregation_weights) + [mode_override] 

1545 _attrs = ("T1", _attr_T1, "T2", _attr_T2, "T3", _attr_T3, "N", _attr_N, 

1546 "device_ordinal", device_ordinal, "combiners", combiners, "table_ids", 

1547 table_ids, "max_sequence_lengths", max_sequence_lengths, "num_features", 

1548 num_features) 

1549 _result = _execute.execute(b"EnqueueTPUEmbeddingSparseTensorBatch", 0, 

1550 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1551 name=name) 

1552 _result = None 

1553 return _result 

1554 

1555 

1556def infeed_dequeue(dtype, shape, name=None): 

1557 r"""A placeholder op for a value that will be fed into the computation. 

1558 

1559 Args: 

1560 dtype: A `tf.DType`. The type of elements in the tensor. 

1561 shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor. 

1562 name: A name for the operation (optional). 

1563 

1564 Returns: 

1565 A `Tensor` of type `dtype`. 

1566 """ 

1567 _ctx = _context._context or _context.context() 

1568 tld = _ctx._thread_local_data 

1569 if tld.is_eager: 

1570 try: 

1571 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1572 _ctx, "InfeedDequeue", name, "dtype", dtype, "shape", shape) 

1573 return _result 

1574 except _core._NotOkStatusException as e: 

1575 _ops.raise_from_not_ok_status(e, name) 

1576 except _core._FallbackException: 

1577 pass 

1578 try: 

1579 return infeed_dequeue_eager_fallback( 

1580 dtype=dtype, shape=shape, name=name, ctx=_ctx) 

1581 except _core._SymbolicException: 

1582 pass # Add nodes to the TensorFlow graph. 

1583 # Add nodes to the TensorFlow graph. 

1584 dtype = _execute.make_type(dtype, "dtype") 

1585 shape = _execute.make_shape(shape, "shape") 

1586 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1587 "InfeedDequeue", dtype=dtype, shape=shape, name=name) 

1588 _result = _outputs[:] 

1589 if _execute.must_record_gradient(): 

1590 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", 

1591 _op.get_attr("shape")) 

1592 _inputs_flat = _op.inputs 

1593 _execute.record_gradient( 

1594 "InfeedDequeue", _inputs_flat, _attrs, _result) 

1595 _result, = _result 

1596 return _result 

1597 

1598InfeedDequeue = tf_export("raw_ops.InfeedDequeue")(_ops.to_raw_op(infeed_dequeue)) 

1599 

1600 

1601def infeed_dequeue_eager_fallback(dtype, shape, name, ctx): 

1602 dtype = _execute.make_type(dtype, "dtype") 

1603 shape = _execute.make_shape(shape, "shape") 

1604 _inputs_flat = [] 

1605 _attrs = ("dtype", dtype, "shape", shape) 

1606 _result = _execute.execute(b"InfeedDequeue", 1, inputs=_inputs_flat, 

1607 attrs=_attrs, ctx=ctx, name=name) 

1608 if _execute.must_record_gradient(): 

1609 _execute.record_gradient( 

1610 "InfeedDequeue", _inputs_flat, _attrs, _result) 

1611 _result, = _result 

1612 return _result 

1613 

1614 

1615def infeed_dequeue_tuple(dtypes, shapes, name=None): 

1616 r"""Fetches multiple values from infeed as an XLA tuple. 

1617 

1618 Args: 

1619 dtypes: A list of `tf.DTypes` that has length `>= 1`. 

1620 The element types of each element in `outputs`. 

1621 shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). 

1622 The shapes of each tensor in `outputs`. 

1623 name: A name for the operation (optional). 

1624 

1625 Returns: 

1626 A list of `Tensor` objects of type `dtypes`. 

1627 """ 

1628 _ctx = _context._context or _context.context() 

1629 tld = _ctx._thread_local_data 

1630 if tld.is_eager: 

1631 try: 

1632 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1633 _ctx, "InfeedDequeueTuple", name, "dtypes", dtypes, "shapes", shapes) 

1634 return _result 

1635 except _core._NotOkStatusException as e: 

1636 _ops.raise_from_not_ok_status(e, name) 

1637 except _core._FallbackException: 

1638 pass 

1639 try: 

1640 return infeed_dequeue_tuple_eager_fallback( 

1641 dtypes=dtypes, shapes=shapes, name=name, ctx=_ctx) 

1642 except _core._SymbolicException: 

1643 pass # Add nodes to the TensorFlow graph. 

1644 # Add nodes to the TensorFlow graph. 

1645 if not isinstance(dtypes, (list, tuple)): 

1646 raise TypeError( 

1647 "Expected list for 'dtypes' argument to " 

1648 "'infeed_dequeue_tuple' Op, not %r." % dtypes) 

1649 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1650 if not isinstance(shapes, (list, tuple)): 

1651 raise TypeError( 

1652 "Expected list for 'shapes' argument to " 

1653 "'infeed_dequeue_tuple' Op, not %r." % shapes) 

1654 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

1655 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1656 "InfeedDequeueTuple", dtypes=dtypes, shapes=shapes, name=name) 

1657 _result = _outputs[:] 

1658 if not _result: 

1659 return _op 

1660 if _execute.must_record_gradient(): 

1661 _attrs = ("dtypes", _op.get_attr("dtypes"), "shapes", 

1662 _op.get_attr("shapes")) 

1663 _inputs_flat = _op.inputs 

1664 _execute.record_gradient( 

1665 "InfeedDequeueTuple", _inputs_flat, _attrs, _result) 

1666 return _result 

1667 

1668InfeedDequeueTuple = tf_export("raw_ops.InfeedDequeueTuple")(_ops.to_raw_op(infeed_dequeue_tuple)) 

1669 

1670 

1671def infeed_dequeue_tuple_eager_fallback(dtypes, shapes, name, ctx): 

1672 if not isinstance(dtypes, (list, tuple)): 

1673 raise TypeError( 

1674 "Expected list for 'dtypes' argument to " 

1675 "'infeed_dequeue_tuple' Op, not %r." % dtypes) 

1676 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

1677 if not isinstance(shapes, (list, tuple)): 

1678 raise TypeError( 

1679 "Expected list for 'shapes' argument to " 

1680 "'infeed_dequeue_tuple' Op, not %r." % shapes) 

1681 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

1682 _inputs_flat = [] 

1683 _attrs = ("dtypes", dtypes, "shapes", shapes) 

1684 _result = _execute.execute(b"InfeedDequeueTuple", len(dtypes), 

1685 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1686 name=name) 

1687 if _execute.must_record_gradient(): 

1688 _execute.record_gradient( 

1689 "InfeedDequeueTuple", _inputs_flat, _attrs, _result) 

1690 return _result 

1691 

1692 

1693def infeed_enqueue(input, shape=[], layout=[], device_ordinal=-1, name=None): 

1694 r"""An op which feeds a single Tensor value into the computation. 

1695 

1696 Args: 

1697 input: A `Tensor`. 

1698 A tensor that will be provided using the infeed mechanism. 

1699 shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[]`. 

1700 The shape of the tensor. 

1701 layout: An optional list of `ints`. Defaults to `[]`. 

1702 A vector holding the requested layout in minor-to-major sequence. 

1703 If a layout attribute is passed, but its values are all -1, the layout will 

1704 be computed by the infeed operation. 

1705 device_ordinal: An optional `int`. Defaults to `-1`. 

1706 The TPU device to use. This should be -1 when the Op 

1707 is running on a TPU device, and >= 0 when the Op is running on the CPU 

1708 device. 

1709 name: A name for the operation (optional). 

1710 

1711 Returns: 

1712 The created Operation. 

1713 """ 

1714 _ctx = _context._context or _context.context() 

1715 tld = _ctx._thread_local_data 

1716 if tld.is_eager: 

1717 try: 

1718 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1719 _ctx, "InfeedEnqueue", name, input, "shape", shape, "layout", layout, 

1720 "device_ordinal", device_ordinal) 

1721 return _result 

1722 except _core._NotOkStatusException as e: 

1723 _ops.raise_from_not_ok_status(e, name) 

1724 except _core._FallbackException: 

1725 pass 

1726 try: 

1727 return infeed_enqueue_eager_fallback( 

1728 input, shape=shape, layout=layout, device_ordinal=device_ordinal, 

1729 name=name, ctx=_ctx) 

1730 except _core._SymbolicException: 

1731 pass # Add nodes to the TensorFlow graph. 

1732 # Add nodes to the TensorFlow graph. 

1733 if shape is None: 

1734 shape = [] 

1735 shape = _execute.make_shape(shape, "shape") 

1736 if layout is None: 

1737 layout = [] 

1738 if not isinstance(layout, (list, tuple)): 

1739 raise TypeError( 

1740 "Expected list for 'layout' argument to " 

1741 "'infeed_enqueue' Op, not %r." % layout) 

1742 layout = [_execute.make_int(_i, "layout") for _i in layout] 

1743 if device_ordinal is None: 

1744 device_ordinal = -1 

1745 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

1746 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1747 "InfeedEnqueue", input=input, shape=shape, layout=layout, 

1748 device_ordinal=device_ordinal, name=name) 

1749 return _op 

1750InfeedEnqueue = tf_export("raw_ops.InfeedEnqueue")(_ops.to_raw_op(infeed_enqueue)) 

1751 

1752 

1753def infeed_enqueue_eager_fallback(input, shape, layout, device_ordinal, name, ctx): 

1754 if shape is None: 

1755 shape = [] 

1756 shape = _execute.make_shape(shape, "shape") 

1757 if layout is None: 

1758 layout = [] 

1759 if not isinstance(layout, (list, tuple)): 

1760 raise TypeError( 

1761 "Expected list for 'layout' argument to " 

1762 "'infeed_enqueue' Op, not %r." % layout) 

1763 layout = [_execute.make_int(_i, "layout") for _i in layout] 

1764 if device_ordinal is None: 

1765 device_ordinal = -1 

1766 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

1767 _attr_dtype, (input,) = _execute.args_to_matching_eager([input], ctx, []) 

1768 _inputs_flat = [input] 

1769 _attrs = ("dtype", _attr_dtype, "shape", shape, "layout", layout, 

1770 "device_ordinal", device_ordinal) 

1771 _result = _execute.execute(b"InfeedEnqueue", 0, inputs=_inputs_flat, 

1772 attrs=_attrs, ctx=ctx, name=name) 

1773 _result = None 

1774 return _result 

1775 

1776 

1777def infeed_enqueue_prelinearized_buffer(input, device_ordinal=-1, name=None): 

1778 r"""An op which enqueues prelinearized buffer into TPU infeed. 

1779 

1780 Args: 

1781 input: A `Tensor` of type `variant`. 

1782 A variant tensor representing linearized output. 

1783 device_ordinal: An optional `int`. Defaults to `-1`. 

1784 The TPU device to use. This should be -1 when the Op is running on a TPU device 

1785 and = 0 when the Op is running on the CPU device. 

1786 name: A name for the operation (optional). 

1787 

1788 Returns: 

1789 The created Operation. 

1790 """ 

1791 _ctx = _context._context or _context.context() 

1792 tld = _ctx._thread_local_data 

1793 if tld.is_eager: 

1794 try: 

1795 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1796 _ctx, "InfeedEnqueuePrelinearizedBuffer", name, input, 

1797 "device_ordinal", device_ordinal) 

1798 return _result 

1799 except _core._NotOkStatusException as e: 

1800 _ops.raise_from_not_ok_status(e, name) 

1801 except _core._FallbackException: 

1802 pass 

1803 try: 

1804 return infeed_enqueue_prelinearized_buffer_eager_fallback( 

1805 input, device_ordinal=device_ordinal, name=name, ctx=_ctx) 

1806 except _core._SymbolicException: 

1807 pass # Add nodes to the TensorFlow graph. 

1808 # Add nodes to the TensorFlow graph. 

1809 if device_ordinal is None: 

1810 device_ordinal = -1 

1811 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

1812 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1813 "InfeedEnqueuePrelinearizedBuffer", input=input, 

1814 device_ordinal=device_ordinal, 

1815 name=name) 

1816 return _op 

1817InfeedEnqueuePrelinearizedBuffer = tf_export("raw_ops.InfeedEnqueuePrelinearizedBuffer")(_ops.to_raw_op(infeed_enqueue_prelinearized_buffer)) 

1818 

1819 

1820def infeed_enqueue_prelinearized_buffer_eager_fallback(input, device_ordinal, name, ctx): 

1821 if device_ordinal is None: 

1822 device_ordinal = -1 

1823 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

1824 input = _ops.convert_to_tensor(input, _dtypes.variant) 

1825 _inputs_flat = [input] 

1826 _attrs = ("device_ordinal", device_ordinal) 

1827 _result = _execute.execute(b"InfeedEnqueuePrelinearizedBuffer", 0, 

1828 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1829 name=name) 

1830 _result = None 

1831 return _result 

1832 

1833 

1834def infeed_enqueue_tuple(inputs, shapes, layouts=[], device_ordinal=-1, name=None): 

1835 r"""Feeds multiple Tensor values into the computation as an XLA tuple. 

1836 

1837 Args: 

1838 inputs: A list of `Tensor` objects. 

1839 A list of tensors that will be provided using the infeed mechanism. 

1840 shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). 

1841 The shapes of each tensor in `inputs`. 

1842 layouts: An optional list of `ints`. Defaults to `[]`. 

1843 A vector holding the requested layout in minor-to-major sequence for 

1844 all the tuple shapes, in the order the shapes appear in the "shapes" input. 

1845 The layout elements for a sub-shape can be set to -1, in which case the 

1846 corresponding layout will be computed by the infeed operation. 

1847 device_ordinal: An optional `int`. Defaults to `-1`. 

1848 The TPU device to use. This should be -1 when the Op 

1849 is running on a TPU device, and >= 0 when the Op is running on the CPU 

1850 device. 

1851 name: A name for the operation (optional). 

1852 

1853 Returns: 

1854 The created Operation. 

1855 """ 

1856 _ctx = _context._context or _context.context() 

1857 tld = _ctx._thread_local_data 

1858 if tld.is_eager: 

1859 try: 

1860 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1861 _ctx, "InfeedEnqueueTuple", name, inputs, "shapes", shapes, "layouts", 

1862 layouts, "device_ordinal", device_ordinal) 

1863 return _result 

1864 except _core._NotOkStatusException as e: 

1865 _ops.raise_from_not_ok_status(e, name) 

1866 except _core._FallbackException: 

1867 pass 

1868 try: 

1869 return infeed_enqueue_tuple_eager_fallback( 

1870 inputs, shapes=shapes, layouts=layouts, 

1871 device_ordinal=device_ordinal, name=name, ctx=_ctx) 

1872 except _core._SymbolicException: 

1873 pass # Add nodes to the TensorFlow graph. 

1874 # Add nodes to the TensorFlow graph. 

1875 if not isinstance(shapes, (list, tuple)): 

1876 raise TypeError( 

1877 "Expected list for 'shapes' argument to " 

1878 "'infeed_enqueue_tuple' Op, not %r." % shapes) 

1879 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

1880 if layouts is None: 

1881 layouts = [] 

1882 if not isinstance(layouts, (list, tuple)): 

1883 raise TypeError( 

1884 "Expected list for 'layouts' argument to " 

1885 "'infeed_enqueue_tuple' Op, not %r." % layouts) 

1886 layouts = [_execute.make_int(_i, "layouts") for _i in layouts] 

1887 if device_ordinal is None: 

1888 device_ordinal = -1 

1889 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

1890 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1891 "InfeedEnqueueTuple", inputs=inputs, shapes=shapes, layouts=layouts, 

1892 device_ordinal=device_ordinal, name=name) 

1893 return _op 

1894InfeedEnqueueTuple = tf_export("raw_ops.InfeedEnqueueTuple")(_ops.to_raw_op(infeed_enqueue_tuple)) 

1895 

1896 

1897def infeed_enqueue_tuple_eager_fallback(inputs, shapes, layouts, device_ordinal, name, ctx): 

1898 if not isinstance(shapes, (list, tuple)): 

1899 raise TypeError( 

1900 "Expected list for 'shapes' argument to " 

1901 "'infeed_enqueue_tuple' Op, not %r." % shapes) 

1902 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

1903 if layouts is None: 

1904 layouts = [] 

1905 if not isinstance(layouts, (list, tuple)): 

1906 raise TypeError( 

1907 "Expected list for 'layouts' argument to " 

1908 "'infeed_enqueue_tuple' Op, not %r." % layouts) 

1909 layouts = [_execute.make_int(_i, "layouts") for _i in layouts] 

1910 if device_ordinal is None: 

1911 device_ordinal = -1 

1912 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

1913 _attr_dtypes, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx) 

1914 _inputs_flat = list(inputs) 

1915 _attrs = ("dtypes", _attr_dtypes, "shapes", shapes, "layouts", layouts, 

1916 "device_ordinal", device_ordinal) 

1917 _result = _execute.execute(b"InfeedEnqueueTuple", 0, inputs=_inputs_flat, 

1918 attrs=_attrs, ctx=ctx, name=name) 

1919 _result = None 

1920 return _result 

1921 

1922 

1923def is_tpu_embedding_initialized(config="", name=None): 

1924 r"""Whether TPU Embedding is initialized in a distributed TPU system. 

1925 

1926 Args: 

1927 config: An optional `string`. Defaults to `""`. 

1928 name: A name for the operation (optional). 

1929 

1930 Returns: 

1931 A `Tensor` of type `bool`. 

1932 """ 

1933 _ctx = _context._context or _context.context() 

1934 tld = _ctx._thread_local_data 

1935 if tld.is_eager: 

1936 try: 

1937 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1938 _ctx, "IsTPUEmbeddingInitialized", name, "config", config) 

1939 return _result 

1940 except _core._NotOkStatusException as e: 

1941 _ops.raise_from_not_ok_status(e, name) 

1942 except _core._FallbackException: 

1943 pass 

1944 try: 

1945 return is_tpu_embedding_initialized_eager_fallback( 

1946 config=config, name=name, ctx=_ctx) 

1947 except _core._SymbolicException: 

1948 pass # Add nodes to the TensorFlow graph. 

1949 # Add nodes to the TensorFlow graph. 

1950 if config is None: 

1951 config = "" 

1952 config = _execute.make_str(config, "config") 

1953 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1954 "IsTPUEmbeddingInitialized", config=config, name=name) 

1955 _result = _outputs[:] 

1956 if _execute.must_record_gradient(): 

1957 _attrs = ("config", _op.get_attr("config")) 

1958 _inputs_flat = _op.inputs 

1959 _execute.record_gradient( 

1960 "IsTPUEmbeddingInitialized", _inputs_flat, _attrs, _result) 

1961 _result, = _result 

1962 return _result 

1963 

1964IsTPUEmbeddingInitialized = tf_export("raw_ops.IsTPUEmbeddingInitialized")(_ops.to_raw_op(is_tpu_embedding_initialized)) 

1965 

1966 

1967def is_tpu_embedding_initialized_eager_fallback(config, name, ctx): 

1968 if config is None: 

1969 config = "" 

1970 config = _execute.make_str(config, "config") 

1971 _inputs_flat = [] 

1972 _attrs = ("config", config) 

1973 _result = _execute.execute(b"IsTPUEmbeddingInitialized", 1, 

1974 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1975 name=name) 

1976 if _execute.must_record_gradient(): 

1977 _execute.record_gradient( 

1978 "IsTPUEmbeddingInitialized", _inputs_flat, _attrs, _result) 

1979 _result, = _result 

1980 return _result 

1981 

1982 

1983def load_tpu_embedding_adam_parameters(parameters, momenta, velocities, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

1984 r"""Load ADAM embedding parameters. 

1985 

1986 An op that loads optimization parameters into HBM for embedding. Must be 

1987 preceded by a ConfigureTPUEmbeddingHost op that sets up the correct 

1988 embedding table configuration. For example, this op is used to install 

1989 parameters that are loaded from a checkpoint before a training loop is 

1990 executed. 

1991 

1992 Args: 

1993 parameters: A `Tensor` of type `float32`. 

1994 Value of parameters used in the ADAM optimization algorithm. 

1995 momenta: A `Tensor` of type `float32`. 

1996 Value of momenta used in the ADAM optimization algorithm. 

1997 velocities: A `Tensor` of type `float32`. 

1998 Value of velocities used in the ADAM optimization algorithm. 

1999 num_shards: An `int`. 

2000 shard_id: An `int`. 

2001 table_id: An optional `int`. Defaults to `-1`. 

2002 table_name: An optional `string`. Defaults to `""`. 

2003 config: An optional `string`. Defaults to `""`. 

2004 name: A name for the operation (optional). 

2005 

2006 Returns: 

2007 The created Operation. 

2008 """ 

2009 _ctx = _context._context or _context.context() 

2010 tld = _ctx._thread_local_data 

2011 if tld.is_eager: 

2012 try: 

2013 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2014 _ctx, "LoadTPUEmbeddingADAMParameters", name, parameters, momenta, 

2015 velocities, "table_id", table_id, "table_name", table_name, 

2016 "num_shards", num_shards, "shard_id", shard_id, "config", config) 

2017 return _result 

2018 except _core._NotOkStatusException as e: 

2019 _ops.raise_from_not_ok_status(e, name) 

2020 except _core._FallbackException: 

2021 pass 

2022 try: 

2023 return load_tpu_embedding_adam_parameters_eager_fallback( 

2024 parameters, momenta, velocities, table_id=table_id, 

2025 table_name=table_name, num_shards=num_shards, shard_id=shard_id, 

2026 config=config, name=name, ctx=_ctx) 

2027 except _core._SymbolicException: 

2028 pass # Add nodes to the TensorFlow graph. 

2029 # Add nodes to the TensorFlow graph. 

2030 num_shards = _execute.make_int(num_shards, "num_shards") 

2031 shard_id = _execute.make_int(shard_id, "shard_id") 

2032 if table_id is None: 

2033 table_id = -1 

2034 table_id = _execute.make_int(table_id, "table_id") 

2035 if table_name is None: 

2036 table_name = "" 

2037 table_name = _execute.make_str(table_name, "table_name") 

2038 if config is None: 

2039 config = "" 

2040 config = _execute.make_str(config, "config") 

2041 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2042 "LoadTPUEmbeddingADAMParameters", parameters=parameters, 

2043 momenta=momenta, 

2044 velocities=velocities, 

2045 num_shards=num_shards, 

2046 shard_id=shard_id, 

2047 table_id=table_id, 

2048 table_name=table_name, 

2049 config=config, name=name) 

2050 return _op 

2051LoadTPUEmbeddingADAMParameters = tf_export("raw_ops.LoadTPUEmbeddingADAMParameters")(_ops.to_raw_op(load_tpu_embedding_adam_parameters)) 

2052 

2053 

2054def load_tpu_embedding_adam_parameters_eager_fallback(parameters, momenta, velocities, num_shards, shard_id, table_id, table_name, config, name, ctx): 

2055 num_shards = _execute.make_int(num_shards, "num_shards") 

2056 shard_id = _execute.make_int(shard_id, "shard_id") 

2057 if table_id is None: 

2058 table_id = -1 

2059 table_id = _execute.make_int(table_id, "table_id") 

2060 if table_name is None: 

2061 table_name = "" 

2062 table_name = _execute.make_str(table_name, "table_name") 

2063 if config is None: 

2064 config = "" 

2065 config = _execute.make_str(config, "config") 

2066 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

2067 momenta = _ops.convert_to_tensor(momenta, _dtypes.float32) 

2068 velocities = _ops.convert_to_tensor(velocities, _dtypes.float32) 

2069 _inputs_flat = [parameters, momenta, velocities] 

2070 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

2071 num_shards, "shard_id", shard_id, "config", config) 

2072 _result = _execute.execute(b"LoadTPUEmbeddingADAMParameters", 0, 

2073 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2074 name=name) 

2075 _result = None 

2076 return _result 

2077 

2078 

2079def load_tpu_embedding_adadelta_parameters(parameters, accumulators, updates, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

2080 r"""Load Adadelta embedding parameters. 

2081 

2082 An op that loads optimization parameters into HBM for embedding. Must be 

2083 preceded by a ConfigureTPUEmbeddingHost op that sets up the correct 

2084 embedding table configuration. For example, this op is used to install 

2085 parameters that are loaded from a checkpoint before a training loop is 

2086 executed. 

2087 

2088 Args: 

2089 parameters: A `Tensor` of type `float32`. 

2090 Value of parameters used in the Adadelta optimization algorithm. 

2091 accumulators: A `Tensor` of type `float32`. 

2092 Value of accumulators used in the Adadelta optimization algorithm. 

2093 updates: A `Tensor` of type `float32`. 

2094 Value of updates used in the Adadelta optimization algorithm. 

2095 num_shards: An `int`. 

2096 shard_id: An `int`. 

2097 table_id: An optional `int`. Defaults to `-1`. 

2098 table_name: An optional `string`. Defaults to `""`. 

2099 config: An optional `string`. Defaults to `""`. 

2100 name: A name for the operation (optional). 

2101 

2102 Returns: 

2103 The created Operation. 

2104 """ 

2105 _ctx = _context._context or _context.context() 

2106 tld = _ctx._thread_local_data 

2107 if tld.is_eager: 

2108 try: 

2109 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2110 _ctx, "LoadTPUEmbeddingAdadeltaParameters", name, parameters, 

2111 accumulators, updates, "table_id", table_id, "table_name", table_name, 

2112 "num_shards", num_shards, "shard_id", shard_id, "config", config) 

2113 return _result 

2114 except _core._NotOkStatusException as e: 

2115 _ops.raise_from_not_ok_status(e, name) 

2116 except _core._FallbackException: 

2117 pass 

2118 try: 

2119 return load_tpu_embedding_adadelta_parameters_eager_fallback( 

2120 parameters, accumulators, updates, table_id=table_id, 

2121 table_name=table_name, num_shards=num_shards, shard_id=shard_id, 

2122 config=config, name=name, ctx=_ctx) 

2123 except _core._SymbolicException: 

2124 pass # Add nodes to the TensorFlow graph. 

2125 # Add nodes to the TensorFlow graph. 

2126 num_shards = _execute.make_int(num_shards, "num_shards") 

2127 shard_id = _execute.make_int(shard_id, "shard_id") 

2128 if table_id is None: 

2129 table_id = -1 

2130 table_id = _execute.make_int(table_id, "table_id") 

2131 if table_name is None: 

2132 table_name = "" 

2133 table_name = _execute.make_str(table_name, "table_name") 

2134 if config is None: 

2135 config = "" 

2136 config = _execute.make_str(config, "config") 

2137 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2138 "LoadTPUEmbeddingAdadeltaParameters", parameters=parameters, 

2139 accumulators=accumulators, 

2140 updates=updates, 

2141 num_shards=num_shards, 

2142 shard_id=shard_id, 

2143 table_id=table_id, 

2144 table_name=table_name, 

2145 config=config, name=name) 

2146 return _op 

2147LoadTPUEmbeddingAdadeltaParameters = tf_export("raw_ops.LoadTPUEmbeddingAdadeltaParameters")(_ops.to_raw_op(load_tpu_embedding_adadelta_parameters)) 

2148 

2149 

2150def load_tpu_embedding_adadelta_parameters_eager_fallback(parameters, accumulators, updates, num_shards, shard_id, table_id, table_name, config, name, ctx): 

2151 num_shards = _execute.make_int(num_shards, "num_shards") 

2152 shard_id = _execute.make_int(shard_id, "shard_id") 

2153 if table_id is None: 

2154 table_id = -1 

2155 table_id = _execute.make_int(table_id, "table_id") 

2156 if table_name is None: 

2157 table_name = "" 

2158 table_name = _execute.make_str(table_name, "table_name") 

2159 if config is None: 

2160 config = "" 

2161 config = _execute.make_str(config, "config") 

2162 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

2163 accumulators = _ops.convert_to_tensor(accumulators, _dtypes.float32) 

2164 updates = _ops.convert_to_tensor(updates, _dtypes.float32) 

2165 _inputs_flat = [parameters, accumulators, updates] 

2166 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

2167 num_shards, "shard_id", shard_id, "config", config) 

2168 _result = _execute.execute(b"LoadTPUEmbeddingAdadeltaParameters", 0, 

2169 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2170 name=name) 

2171 _result = None 

2172 return _result 

2173 

2174 

2175def load_tpu_embedding_adagrad_momentum_parameters(parameters, accumulators, momenta, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

2176 r"""Load Adagrad Momentum embedding parameters. 

2177 

2178 An op that loads optimization parameters into HBM for embedding. Must be 

2179 preceded by a ConfigureTPUEmbeddingHost op that sets up the correct 

2180 embedding table configuration. For example, this op is used to install 

2181 parameters that are loaded from a checkpoint before a training loop is 

2182 executed. 

2183 

2184 Args: 

2185 parameters: A `Tensor` of type `float32`. 

2186 Value of parameters used in the Adagrad Momentum optimization algorithm. 

2187 accumulators: A `Tensor` of type `float32`. 

2188 Value of accumulators used in the Adagrad Momentum optimization algorithm. 

2189 momenta: A `Tensor` of type `float32`. 

2190 Value of momenta used in the Adagrad Momentum optimization algorithm. 

2191 num_shards: An `int`. 

2192 shard_id: An `int`. 

2193 table_id: An optional `int`. Defaults to `-1`. 

2194 table_name: An optional `string`. Defaults to `""`. 

2195 config: An optional `string`. Defaults to `""`. 

2196 name: A name for the operation (optional). 

2197 

2198 Returns: 

2199 The created Operation. 

2200 """ 

2201 _ctx = _context._context or _context.context() 

2202 tld = _ctx._thread_local_data 

2203 if tld.is_eager: 

2204 try: 

2205 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2206 _ctx, "LoadTPUEmbeddingAdagradMomentumParameters", name, parameters, 

2207 accumulators, momenta, "table_id", table_id, "table_name", table_name, 

2208 "num_shards", num_shards, "shard_id", shard_id, "config", config) 

2209 return _result 

2210 except _core._NotOkStatusException as e: 

2211 _ops.raise_from_not_ok_status(e, name) 

2212 except _core._FallbackException: 

2213 pass 

2214 try: 

2215 return load_tpu_embedding_adagrad_momentum_parameters_eager_fallback( 

2216 parameters, accumulators, momenta, table_id=table_id, 

2217 table_name=table_name, num_shards=num_shards, shard_id=shard_id, 

2218 config=config, name=name, ctx=_ctx) 

2219 except _core._SymbolicException: 

2220 pass # Add nodes to the TensorFlow graph. 

2221 # Add nodes to the TensorFlow graph. 

2222 num_shards = _execute.make_int(num_shards, "num_shards") 

2223 shard_id = _execute.make_int(shard_id, "shard_id") 

2224 if table_id is None: 

2225 table_id = -1 

2226 table_id = _execute.make_int(table_id, "table_id") 

2227 if table_name is None: 

2228 table_name = "" 

2229 table_name = _execute.make_str(table_name, "table_name") 

2230 if config is None: 

2231 config = "" 

2232 config = _execute.make_str(config, "config") 

2233 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2234 "LoadTPUEmbeddingAdagradMomentumParameters", parameters=parameters, 

2235 accumulators=accumulators, 

2236 momenta=momenta, 

2237 num_shards=num_shards, 

2238 shard_id=shard_id, 

2239 table_id=table_id, 

2240 table_name=table_name, 

2241 config=config, name=name) 

2242 return _op 

2243LoadTPUEmbeddingAdagradMomentumParameters = tf_export("raw_ops.LoadTPUEmbeddingAdagradMomentumParameters")(_ops.to_raw_op(load_tpu_embedding_adagrad_momentum_parameters)) 

2244 

2245 

2246def load_tpu_embedding_adagrad_momentum_parameters_eager_fallback(parameters, accumulators, momenta, num_shards, shard_id, table_id, table_name, config, name, ctx): 

2247 num_shards = _execute.make_int(num_shards, "num_shards") 

2248 shard_id = _execute.make_int(shard_id, "shard_id") 

2249 if table_id is None: 

2250 table_id = -1 

2251 table_id = _execute.make_int(table_id, "table_id") 

2252 if table_name is None: 

2253 table_name = "" 

2254 table_name = _execute.make_str(table_name, "table_name") 

2255 if config is None: 

2256 config = "" 

2257 config = _execute.make_str(config, "config") 

2258 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

2259 accumulators = _ops.convert_to_tensor(accumulators, _dtypes.float32) 

2260 momenta = _ops.convert_to_tensor(momenta, _dtypes.float32) 

2261 _inputs_flat = [parameters, accumulators, momenta] 

2262 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

2263 num_shards, "shard_id", shard_id, "config", config) 

2264 _result = _execute.execute(b"LoadTPUEmbeddingAdagradMomentumParameters", 0, 

2265 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2266 name=name) 

2267 _result = None 

2268 return _result 

2269 

2270 

2271def load_tpu_embedding_adagrad_parameters(parameters, accumulators, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

2272 r"""Load Adagrad embedding parameters. 

2273 

2274 An op that loads optimization parameters into HBM for embedding. Must be 

2275 preceded by a ConfigureTPUEmbeddingHost op that sets up the correct 

2276 embedding table configuration. For example, this op is used to install 

2277 parameters that are loaded from a checkpoint before a training loop is 

2278 executed. 

2279 

2280 Args: 

2281 parameters: A `Tensor` of type `float32`. 

2282 Value of parameters used in the Adagrad optimization algorithm. 

2283 accumulators: A `Tensor` of type `float32`. 

2284 Value of accumulators used in the Adagrad optimization algorithm. 

2285 num_shards: An `int`. 

2286 shard_id: An `int`. 

2287 table_id: An optional `int`. Defaults to `-1`. 

2288 table_name: An optional `string`. Defaults to `""`. 

2289 config: An optional `string`. Defaults to `""`. 

2290 name: A name for the operation (optional). 

2291 

2292 Returns: 

2293 The created Operation. 

2294 """ 

2295 _ctx = _context._context or _context.context() 

2296 tld = _ctx._thread_local_data 

2297 if tld.is_eager: 

2298 try: 

2299 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2300 _ctx, "LoadTPUEmbeddingAdagradParameters", name, parameters, 

2301 accumulators, "table_id", table_id, "table_name", table_name, 

2302 "num_shards", num_shards, "shard_id", shard_id, "config", config) 

2303 return _result 

2304 except _core._NotOkStatusException as e: 

2305 _ops.raise_from_not_ok_status(e, name) 

2306 except _core._FallbackException: 

2307 pass 

2308 try: 

2309 return load_tpu_embedding_adagrad_parameters_eager_fallback( 

2310 parameters, accumulators, table_id=table_id, table_name=table_name, 

2311 num_shards=num_shards, shard_id=shard_id, config=config, name=name, 

2312 ctx=_ctx) 

2313 except _core._SymbolicException: 

2314 pass # Add nodes to the TensorFlow graph. 

2315 # Add nodes to the TensorFlow graph. 

2316 num_shards = _execute.make_int(num_shards, "num_shards") 

2317 shard_id = _execute.make_int(shard_id, "shard_id") 

2318 if table_id is None: 

2319 table_id = -1 

2320 table_id = _execute.make_int(table_id, "table_id") 

2321 if table_name is None: 

2322 table_name = "" 

2323 table_name = _execute.make_str(table_name, "table_name") 

2324 if config is None: 

2325 config = "" 

2326 config = _execute.make_str(config, "config") 

2327 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2328 "LoadTPUEmbeddingAdagradParameters", parameters=parameters, 

2329 accumulators=accumulators, 

2330 num_shards=num_shards, 

2331 shard_id=shard_id, 

2332 table_id=table_id, 

2333 table_name=table_name, 

2334 config=config, name=name) 

2335 return _op 

2336LoadTPUEmbeddingAdagradParameters = tf_export("raw_ops.LoadTPUEmbeddingAdagradParameters")(_ops.to_raw_op(load_tpu_embedding_adagrad_parameters)) 

2337 

2338 

2339def load_tpu_embedding_adagrad_parameters_eager_fallback(parameters, accumulators, num_shards, shard_id, table_id, table_name, config, name, ctx): 

2340 num_shards = _execute.make_int(num_shards, "num_shards") 

2341 shard_id = _execute.make_int(shard_id, "shard_id") 

2342 if table_id is None: 

2343 table_id = -1 

2344 table_id = _execute.make_int(table_id, "table_id") 

2345 if table_name is None: 

2346 table_name = "" 

2347 table_name = _execute.make_str(table_name, "table_name") 

2348 if config is None: 

2349 config = "" 

2350 config = _execute.make_str(config, "config") 

2351 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

2352 accumulators = _ops.convert_to_tensor(accumulators, _dtypes.float32) 

2353 _inputs_flat = [parameters, accumulators] 

2354 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

2355 num_shards, "shard_id", shard_id, "config", config) 

2356 _result = _execute.execute(b"LoadTPUEmbeddingAdagradParameters", 0, 

2357 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2358 name=name) 

2359 _result = None 

2360 return _result 

2361 

2362 

2363def load_tpu_embedding_centered_rms_prop_parameters(parameters, ms, mom, mg, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

2364 r"""Load centered RMSProp embedding parameters. 

2365 

2366 An op that loads optimization parameters into HBM for embedding. Must be 

2367 preceded by a ConfigureTPUEmbeddingHost op that sets up the correct 

2368 embedding table configuration. For example, this op is used to install 

2369 parameters that are loaded from a checkpoint before a training loop is 

2370 executed. 

2371 

2372 Args: 

2373 parameters: A `Tensor` of type `float32`. 

2374 Value of parameters used in the centered RMSProp optimization algorithm. 

2375 ms: A `Tensor` of type `float32`. 

2376 Value of ms used in the centered RMSProp optimization algorithm. 

2377 mom: A `Tensor` of type `float32`. 

2378 Value of mom used in the centered RMSProp optimization algorithm. 

2379 mg: A `Tensor` of type `float32`. 

2380 Value of mg used in the centered RMSProp optimization algorithm. 

2381 num_shards: An `int`. 

2382 shard_id: An `int`. 

2383 table_id: An optional `int`. Defaults to `-1`. 

2384 table_name: An optional `string`. Defaults to `""`. 

2385 config: An optional `string`. Defaults to `""`. 

2386 name: A name for the operation (optional). 

2387 

2388 Returns: 

2389 The created Operation. 

2390 """ 

2391 _ctx = _context._context or _context.context() 

2392 tld = _ctx._thread_local_data 

2393 if tld.is_eager: 

2394 try: 

2395 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2396 _ctx, "LoadTPUEmbeddingCenteredRMSPropParameters", name, parameters, 

2397 ms, mom, mg, "table_id", table_id, "table_name", table_name, 

2398 "num_shards", num_shards, "shard_id", shard_id, "config", config) 

2399 return _result 

2400 except _core._NotOkStatusException as e: 

2401 _ops.raise_from_not_ok_status(e, name) 

2402 except _core._FallbackException: 

2403 pass 

2404 try: 

2405 return load_tpu_embedding_centered_rms_prop_parameters_eager_fallback( 

2406 parameters, ms, mom, mg, table_id=table_id, table_name=table_name, 

2407 num_shards=num_shards, shard_id=shard_id, config=config, name=name, 

2408 ctx=_ctx) 

2409 except _core._SymbolicException: 

2410 pass # Add nodes to the TensorFlow graph. 

2411 # Add nodes to the TensorFlow graph. 

2412 num_shards = _execute.make_int(num_shards, "num_shards") 

2413 shard_id = _execute.make_int(shard_id, "shard_id") 

2414 if table_id is None: 

2415 table_id = -1 

2416 table_id = _execute.make_int(table_id, "table_id") 

2417 if table_name is None: 

2418 table_name = "" 

2419 table_name = _execute.make_str(table_name, "table_name") 

2420 if config is None: 

2421 config = "" 

2422 config = _execute.make_str(config, "config") 

2423 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2424 "LoadTPUEmbeddingCenteredRMSPropParameters", parameters=parameters, 

2425 ms=ms, mom=mom, mg=mg, 

2426 num_shards=num_shards, 

2427 shard_id=shard_id, 

2428 table_id=table_id, 

2429 table_name=table_name, 

2430 config=config, name=name) 

2431 return _op 

2432LoadTPUEmbeddingCenteredRMSPropParameters = tf_export("raw_ops.LoadTPUEmbeddingCenteredRMSPropParameters")(_ops.to_raw_op(load_tpu_embedding_centered_rms_prop_parameters)) 

2433 

2434 

2435def load_tpu_embedding_centered_rms_prop_parameters_eager_fallback(parameters, ms, mom, mg, num_shards, shard_id, table_id, table_name, config, name, ctx): 

2436 num_shards = _execute.make_int(num_shards, "num_shards") 

2437 shard_id = _execute.make_int(shard_id, "shard_id") 

2438 if table_id is None: 

2439 table_id = -1 

2440 table_id = _execute.make_int(table_id, "table_id") 

2441 if table_name is None: 

2442 table_name = "" 

2443 table_name = _execute.make_str(table_name, "table_name") 

2444 if config is None: 

2445 config = "" 

2446 config = _execute.make_str(config, "config") 

2447 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

2448 ms = _ops.convert_to_tensor(ms, _dtypes.float32) 

2449 mom = _ops.convert_to_tensor(mom, _dtypes.float32) 

2450 mg = _ops.convert_to_tensor(mg, _dtypes.float32) 

2451 _inputs_flat = [parameters, ms, mom, mg] 

2452 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

2453 num_shards, "shard_id", shard_id, "config", config) 

2454 _result = _execute.execute(b"LoadTPUEmbeddingCenteredRMSPropParameters", 0, 

2455 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2456 name=name) 

2457 _result = None 

2458 return _result 

2459 

2460 

2461def load_tpu_embedding_ftrl_parameters(parameters, accumulators, linears, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

2462 r"""Load FTRL embedding parameters. 

2463 

2464 An op that loads optimization parameters into HBM for embedding. Must be 

2465 preceded by a ConfigureTPUEmbeddingHost op that sets up the correct 

2466 embedding table configuration. For example, this op is used to install 

2467 parameters that are loaded from a checkpoint before a training loop is 

2468 executed. 

2469 

2470 Args: 

2471 parameters: A `Tensor` of type `float32`. 

2472 Value of parameters used in the FTRL optimization algorithm. 

2473 accumulators: A `Tensor` of type `float32`. 

2474 Value of accumulators used in the FTRL optimization algorithm. 

2475 linears: A `Tensor` of type `float32`. 

2476 Value of linears used in the FTRL optimization algorithm. 

2477 num_shards: An `int`. 

2478 shard_id: An `int`. 

2479 table_id: An optional `int`. Defaults to `-1`. 

2480 table_name: An optional `string`. Defaults to `""`. 

2481 config: An optional `string`. Defaults to `""`. 

2482 name: A name for the operation (optional). 

2483 

2484 Returns: 

2485 The created Operation. 

2486 """ 

2487 _ctx = _context._context or _context.context() 

2488 tld = _ctx._thread_local_data 

2489 if tld.is_eager: 

2490 try: 

2491 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2492 _ctx, "LoadTPUEmbeddingFTRLParameters", name, parameters, 

2493 accumulators, linears, "table_id", table_id, "table_name", table_name, 

2494 "num_shards", num_shards, "shard_id", shard_id, "config", config) 

2495 return _result 

2496 except _core._NotOkStatusException as e: 

2497 _ops.raise_from_not_ok_status(e, name) 

2498 except _core._FallbackException: 

2499 pass 

2500 try: 

2501 return load_tpu_embedding_ftrl_parameters_eager_fallback( 

2502 parameters, accumulators, linears, table_id=table_id, 

2503 table_name=table_name, num_shards=num_shards, shard_id=shard_id, 

2504 config=config, name=name, ctx=_ctx) 

2505 except _core._SymbolicException: 

2506 pass # Add nodes to the TensorFlow graph. 

2507 # Add nodes to the TensorFlow graph. 

2508 num_shards = _execute.make_int(num_shards, "num_shards") 

2509 shard_id = _execute.make_int(shard_id, "shard_id") 

2510 if table_id is None: 

2511 table_id = -1 

2512 table_id = _execute.make_int(table_id, "table_id") 

2513 if table_name is None: 

2514 table_name = "" 

2515 table_name = _execute.make_str(table_name, "table_name") 

2516 if config is None: 

2517 config = "" 

2518 config = _execute.make_str(config, "config") 

2519 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2520 "LoadTPUEmbeddingFTRLParameters", parameters=parameters, 

2521 accumulators=accumulators, 

2522 linears=linears, 

2523 num_shards=num_shards, 

2524 shard_id=shard_id, 

2525 table_id=table_id, 

2526 table_name=table_name, 

2527 config=config, name=name) 

2528 return _op 

2529LoadTPUEmbeddingFTRLParameters = tf_export("raw_ops.LoadTPUEmbeddingFTRLParameters")(_ops.to_raw_op(load_tpu_embedding_ftrl_parameters)) 

2530 

2531 

2532def load_tpu_embedding_ftrl_parameters_eager_fallback(parameters, accumulators, linears, num_shards, shard_id, table_id, table_name, config, name, ctx): 

2533 num_shards = _execute.make_int(num_shards, "num_shards") 

2534 shard_id = _execute.make_int(shard_id, "shard_id") 

2535 if table_id is None: 

2536 table_id = -1 

2537 table_id = _execute.make_int(table_id, "table_id") 

2538 if table_name is None: 

2539 table_name = "" 

2540 table_name = _execute.make_str(table_name, "table_name") 

2541 if config is None: 

2542 config = "" 

2543 config = _execute.make_str(config, "config") 

2544 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

2545 accumulators = _ops.convert_to_tensor(accumulators, _dtypes.float32) 

2546 linears = _ops.convert_to_tensor(linears, _dtypes.float32) 

2547 _inputs_flat = [parameters, accumulators, linears] 

2548 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

2549 num_shards, "shard_id", shard_id, "config", config) 

2550 _result = _execute.execute(b"LoadTPUEmbeddingFTRLParameters", 0, 

2551 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2552 name=name) 

2553 _result = None 

2554 return _result 

2555 

2556 

2557def load_tpu_embedding_frequency_estimator_parameters(parameters, last_hit_step, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

2558 r"""Load frequency estimator embedding parameters. 

2559 

2560 An op that loads optimization parameters into HBM for embedding. Must be 

2561 preceded by a ConfigureTPUEmbeddingHost op that sets up the correct 

2562 embedding table configuration. For example, this op is used to install 

2563 parameters that are loaded from a checkpoint before a training loop is 

2564 executed. 

2565 

2566 Args: 

2567 parameters: A `Tensor` of type `float32`. 

2568 Value of parameters used in the frequency estimator optimization algorithm. 

2569 last_hit_step: A `Tensor` of type `float32`. 

2570 Value of last_hit_step used in the frequency estimator optimization algorithm. 

2571 num_shards: An `int`. 

2572 shard_id: An `int`. 

2573 table_id: An optional `int`. Defaults to `-1`. 

2574 table_name: An optional `string`. Defaults to `""`. 

2575 config: An optional `string`. Defaults to `""`. 

2576 name: A name for the operation (optional). 

2577 

2578 Returns: 

2579 The created Operation. 

2580 """ 

2581 _ctx = _context._context or _context.context() 

2582 tld = _ctx._thread_local_data 

2583 if tld.is_eager: 

2584 try: 

2585 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2586 _ctx, "LoadTPUEmbeddingFrequencyEstimatorParameters", name, 

2587 parameters, last_hit_step, "table_id", table_id, "table_name", 

2588 table_name, "num_shards", num_shards, "shard_id", shard_id, "config", 

2589 config) 

2590 return _result 

2591 except _core._NotOkStatusException as e: 

2592 _ops.raise_from_not_ok_status(e, name) 

2593 except _core._FallbackException: 

2594 pass 

2595 try: 

2596 return load_tpu_embedding_frequency_estimator_parameters_eager_fallback( 

2597 parameters, last_hit_step, table_id=table_id, table_name=table_name, 

2598 num_shards=num_shards, shard_id=shard_id, config=config, name=name, 

2599 ctx=_ctx) 

2600 except _core._SymbolicException: 

2601 pass # Add nodes to the TensorFlow graph. 

2602 # Add nodes to the TensorFlow graph. 

2603 num_shards = _execute.make_int(num_shards, "num_shards") 

2604 shard_id = _execute.make_int(shard_id, "shard_id") 

2605 if table_id is None: 

2606 table_id = -1 

2607 table_id = _execute.make_int(table_id, "table_id") 

2608 if table_name is None: 

2609 table_name = "" 

2610 table_name = _execute.make_str(table_name, "table_name") 

2611 if config is None: 

2612 config = "" 

2613 config = _execute.make_str(config, "config") 

2614 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2615 "LoadTPUEmbeddingFrequencyEstimatorParameters", parameters=parameters, 

2616 last_hit_step=last_hit_step, 

2617 num_shards=num_shards, 

2618 shard_id=shard_id, 

2619 table_id=table_id, 

2620 table_name=table_name, 

2621 config=config, 

2622 name=name) 

2623 return _op 

2624LoadTPUEmbeddingFrequencyEstimatorParameters = tf_export("raw_ops.LoadTPUEmbeddingFrequencyEstimatorParameters")(_ops.to_raw_op(load_tpu_embedding_frequency_estimator_parameters)) 

2625 

2626 

2627def load_tpu_embedding_frequency_estimator_parameters_eager_fallback(parameters, last_hit_step, num_shards, shard_id, table_id, table_name, config, name, ctx): 

2628 num_shards = _execute.make_int(num_shards, "num_shards") 

2629 shard_id = _execute.make_int(shard_id, "shard_id") 

2630 if table_id is None: 

2631 table_id = -1 

2632 table_id = _execute.make_int(table_id, "table_id") 

2633 if table_name is None: 

2634 table_name = "" 

2635 table_name = _execute.make_str(table_name, "table_name") 

2636 if config is None: 

2637 config = "" 

2638 config = _execute.make_str(config, "config") 

2639 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

2640 last_hit_step = _ops.convert_to_tensor(last_hit_step, _dtypes.float32) 

2641 _inputs_flat = [parameters, last_hit_step] 

2642 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

2643 num_shards, "shard_id", shard_id, "config", config) 

2644 _result = _execute.execute(b"LoadTPUEmbeddingFrequencyEstimatorParameters", 

2645 0, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2646 name=name) 

2647 _result = None 

2648 return _result 

2649 

2650 

2651def load_tpu_embedding_mdl_adagrad_light_parameters(parameters, accumulators, weights, benefits, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

2652 r"""Load MDL Adagrad Light embedding parameters. 

2653 

2654 An op that loads optimization parameters into HBM for embedding. Must be 

2655 preceded by a ConfigureTPUEmbeddingHost op that sets up the correct 

2656 embedding table configuration. For example, this op is used to install 

2657 parameters that are loaded from a checkpoint before a training loop is 

2658 executed. 

2659 

2660 Args: 

2661 parameters: A `Tensor` of type `float32`. 

2662 Value of parameters used in the MDL Adagrad Light optimization algorithm. 

2663 accumulators: A `Tensor` of type `float32`. 

2664 Value of accumulators used in the MDL Adagrad Light optimization algorithm. 

2665 weights: A `Tensor` of type `float32`. 

2666 Value of weights used in the MDL Adagrad Light optimization algorithm. 

2667 benefits: A `Tensor` of type `float32`. 

2668 Value of benefits used in the MDL Adagrad Light optimization algorithm. 

2669 num_shards: An `int`. 

2670 shard_id: An `int`. 

2671 table_id: An optional `int`. Defaults to `-1`. 

2672 table_name: An optional `string`. Defaults to `""`. 

2673 config: An optional `string`. Defaults to `""`. 

2674 name: A name for the operation (optional). 

2675 

2676 Returns: 

2677 The created Operation. 

2678 """ 

2679 _ctx = _context._context or _context.context() 

2680 tld = _ctx._thread_local_data 

2681 if tld.is_eager: 

2682 try: 

2683 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2684 _ctx, "LoadTPUEmbeddingMDLAdagradLightParameters", name, parameters, 

2685 accumulators, weights, benefits, "table_id", table_id, "table_name", 

2686 table_name, "num_shards", num_shards, "shard_id", shard_id, "config", 

2687 config) 

2688 return _result 

2689 except _core._NotOkStatusException as e: 

2690 _ops.raise_from_not_ok_status(e, name) 

2691 except _core._FallbackException: 

2692 pass 

2693 try: 

2694 return load_tpu_embedding_mdl_adagrad_light_parameters_eager_fallback( 

2695 parameters, accumulators, weights, benefits, table_id=table_id, 

2696 table_name=table_name, num_shards=num_shards, shard_id=shard_id, 

2697 config=config, name=name, ctx=_ctx) 

2698 except _core._SymbolicException: 

2699 pass # Add nodes to the TensorFlow graph. 

2700 # Add nodes to the TensorFlow graph. 

2701 num_shards = _execute.make_int(num_shards, "num_shards") 

2702 shard_id = _execute.make_int(shard_id, "shard_id") 

2703 if table_id is None: 

2704 table_id = -1 

2705 table_id = _execute.make_int(table_id, "table_id") 

2706 if table_name is None: 

2707 table_name = "" 

2708 table_name = _execute.make_str(table_name, "table_name") 

2709 if config is None: 

2710 config = "" 

2711 config = _execute.make_str(config, "config") 

2712 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2713 "LoadTPUEmbeddingMDLAdagradLightParameters", parameters=parameters, 

2714 accumulators=accumulators, 

2715 weights=weights, 

2716 benefits=benefits, 

2717 num_shards=num_shards, 

2718 shard_id=shard_id, 

2719 table_id=table_id, 

2720 table_name=table_name, 

2721 config=config, name=name) 

2722 return _op 

2723LoadTPUEmbeddingMDLAdagradLightParameters = tf_export("raw_ops.LoadTPUEmbeddingMDLAdagradLightParameters")(_ops.to_raw_op(load_tpu_embedding_mdl_adagrad_light_parameters)) 

2724 

2725 

2726def load_tpu_embedding_mdl_adagrad_light_parameters_eager_fallback(parameters, accumulators, weights, benefits, num_shards, shard_id, table_id, table_name, config, name, ctx): 

2727 num_shards = _execute.make_int(num_shards, "num_shards") 

2728 shard_id = _execute.make_int(shard_id, "shard_id") 

2729 if table_id is None: 

2730 table_id = -1 

2731 table_id = _execute.make_int(table_id, "table_id") 

2732 if table_name is None: 

2733 table_name = "" 

2734 table_name = _execute.make_str(table_name, "table_name") 

2735 if config is None: 

2736 config = "" 

2737 config = _execute.make_str(config, "config") 

2738 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

2739 accumulators = _ops.convert_to_tensor(accumulators, _dtypes.float32) 

2740 weights = _ops.convert_to_tensor(weights, _dtypes.float32) 

2741 benefits = _ops.convert_to_tensor(benefits, _dtypes.float32) 

2742 _inputs_flat = [parameters, accumulators, weights, benefits] 

2743 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

2744 num_shards, "shard_id", shard_id, "config", config) 

2745 _result = _execute.execute(b"LoadTPUEmbeddingMDLAdagradLightParameters", 0, 

2746 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2747 name=name) 

2748 _result = None 

2749 return _result 

2750 

2751 

2752def load_tpu_embedding_momentum_parameters(parameters, momenta, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

2753 r"""Load Momentum embedding parameters. 

2754 

2755 An op that loads optimization parameters into HBM for embedding. Must be 

2756 preceded by a ConfigureTPUEmbeddingHost op that sets up the correct 

2757 embedding table configuration. For example, this op is used to install 

2758 parameters that are loaded from a checkpoint before a training loop is 

2759 executed. 

2760 

2761 Args: 

2762 parameters: A `Tensor` of type `float32`. 

2763 Value of parameters used in the Momentum optimization algorithm. 

2764 momenta: A `Tensor` of type `float32`. 

2765 Value of momenta used in the Momentum optimization algorithm. 

2766 num_shards: An `int`. 

2767 shard_id: An `int`. 

2768 table_id: An optional `int`. Defaults to `-1`. 

2769 table_name: An optional `string`. Defaults to `""`. 

2770 config: An optional `string`. Defaults to `""`. 

2771 name: A name for the operation (optional). 

2772 

2773 Returns: 

2774 The created Operation. 

2775 """ 

2776 _ctx = _context._context or _context.context() 

2777 tld = _ctx._thread_local_data 

2778 if tld.is_eager: 

2779 try: 

2780 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2781 _ctx, "LoadTPUEmbeddingMomentumParameters", name, parameters, momenta, 

2782 "table_id", table_id, "table_name", table_name, "num_shards", 

2783 num_shards, "shard_id", shard_id, "config", config) 

2784 return _result 

2785 except _core._NotOkStatusException as e: 

2786 _ops.raise_from_not_ok_status(e, name) 

2787 except _core._FallbackException: 

2788 pass 

2789 try: 

2790 return load_tpu_embedding_momentum_parameters_eager_fallback( 

2791 parameters, momenta, table_id=table_id, table_name=table_name, 

2792 num_shards=num_shards, shard_id=shard_id, config=config, name=name, 

2793 ctx=_ctx) 

2794 except _core._SymbolicException: 

2795 pass # Add nodes to the TensorFlow graph. 

2796 # Add nodes to the TensorFlow graph. 

2797 num_shards = _execute.make_int(num_shards, "num_shards") 

2798 shard_id = _execute.make_int(shard_id, "shard_id") 

2799 if table_id is None: 

2800 table_id = -1 

2801 table_id = _execute.make_int(table_id, "table_id") 

2802 if table_name is None: 

2803 table_name = "" 

2804 table_name = _execute.make_str(table_name, "table_name") 

2805 if config is None: 

2806 config = "" 

2807 config = _execute.make_str(config, "config") 

2808 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2809 "LoadTPUEmbeddingMomentumParameters", parameters=parameters, 

2810 momenta=momenta, 

2811 num_shards=num_shards, 

2812 shard_id=shard_id, 

2813 table_id=table_id, 

2814 table_name=table_name, 

2815 config=config, name=name) 

2816 return _op 

2817LoadTPUEmbeddingMomentumParameters = tf_export("raw_ops.LoadTPUEmbeddingMomentumParameters")(_ops.to_raw_op(load_tpu_embedding_momentum_parameters)) 

2818 

2819 

2820def load_tpu_embedding_momentum_parameters_eager_fallback(parameters, momenta, num_shards, shard_id, table_id, table_name, config, name, ctx): 

2821 num_shards = _execute.make_int(num_shards, "num_shards") 

2822 shard_id = _execute.make_int(shard_id, "shard_id") 

2823 if table_id is None: 

2824 table_id = -1 

2825 table_id = _execute.make_int(table_id, "table_id") 

2826 if table_name is None: 

2827 table_name = "" 

2828 table_name = _execute.make_str(table_name, "table_name") 

2829 if config is None: 

2830 config = "" 

2831 config = _execute.make_str(config, "config") 

2832 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

2833 momenta = _ops.convert_to_tensor(momenta, _dtypes.float32) 

2834 _inputs_flat = [parameters, momenta] 

2835 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

2836 num_shards, "shard_id", shard_id, "config", config) 

2837 _result = _execute.execute(b"LoadTPUEmbeddingMomentumParameters", 0, 

2838 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2839 name=name) 

2840 _result = None 

2841 return _result 

2842 

2843 

2844def load_tpu_embedding_proximal_adagrad_parameters(parameters, accumulators, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

2845 r"""Load proximal Adagrad embedding parameters. 

2846 

2847 An op that loads optimization parameters into HBM for embedding. Must be 

2848 preceded by a ConfigureTPUEmbeddingHost op that sets up the correct 

2849 embedding table configuration. For example, this op is used to install 

2850 parameters that are loaded from a checkpoint before a training loop is 

2851 executed. 

2852 

2853 Args: 

2854 parameters: A `Tensor` of type `float32`. 

2855 Value of parameters used in the proximal Adagrad optimization algorithm. 

2856 accumulators: A `Tensor` of type `float32`. 

2857 Value of accumulators used in the proximal Adagrad optimization algorithm. 

2858 num_shards: An `int`. 

2859 shard_id: An `int`. 

2860 table_id: An optional `int`. Defaults to `-1`. 

2861 table_name: An optional `string`. Defaults to `""`. 

2862 config: An optional `string`. Defaults to `""`. 

2863 name: A name for the operation (optional). 

2864 

2865 Returns: 

2866 The created Operation. 

2867 """ 

2868 _ctx = _context._context or _context.context() 

2869 tld = _ctx._thread_local_data 

2870 if tld.is_eager: 

2871 try: 

2872 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2873 _ctx, "LoadTPUEmbeddingProximalAdagradParameters", name, parameters, 

2874 accumulators, "table_id", table_id, "table_name", table_name, 

2875 "num_shards", num_shards, "shard_id", shard_id, "config", config) 

2876 return _result 

2877 except _core._NotOkStatusException as e: 

2878 _ops.raise_from_not_ok_status(e, name) 

2879 except _core._FallbackException: 

2880 pass 

2881 try: 

2882 return load_tpu_embedding_proximal_adagrad_parameters_eager_fallback( 

2883 parameters, accumulators, table_id=table_id, table_name=table_name, 

2884 num_shards=num_shards, shard_id=shard_id, config=config, name=name, 

2885 ctx=_ctx) 

2886 except _core._SymbolicException: 

2887 pass # Add nodes to the TensorFlow graph. 

2888 # Add nodes to the TensorFlow graph. 

2889 num_shards = _execute.make_int(num_shards, "num_shards") 

2890 shard_id = _execute.make_int(shard_id, "shard_id") 

2891 if table_id is None: 

2892 table_id = -1 

2893 table_id = _execute.make_int(table_id, "table_id") 

2894 if table_name is None: 

2895 table_name = "" 

2896 table_name = _execute.make_str(table_name, "table_name") 

2897 if config is None: 

2898 config = "" 

2899 config = _execute.make_str(config, "config") 

2900 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2901 "LoadTPUEmbeddingProximalAdagradParameters", parameters=parameters, 

2902 accumulators=accumulators, 

2903 num_shards=num_shards, 

2904 shard_id=shard_id, 

2905 table_id=table_id, 

2906 table_name=table_name, 

2907 config=config, name=name) 

2908 return _op 

2909LoadTPUEmbeddingProximalAdagradParameters = tf_export("raw_ops.LoadTPUEmbeddingProximalAdagradParameters")(_ops.to_raw_op(load_tpu_embedding_proximal_adagrad_parameters)) 

2910 

2911 

2912def load_tpu_embedding_proximal_adagrad_parameters_eager_fallback(parameters, accumulators, num_shards, shard_id, table_id, table_name, config, name, ctx): 

2913 num_shards = _execute.make_int(num_shards, "num_shards") 

2914 shard_id = _execute.make_int(shard_id, "shard_id") 

2915 if table_id is None: 

2916 table_id = -1 

2917 table_id = _execute.make_int(table_id, "table_id") 

2918 if table_name is None: 

2919 table_name = "" 

2920 table_name = _execute.make_str(table_name, "table_name") 

2921 if config is None: 

2922 config = "" 

2923 config = _execute.make_str(config, "config") 

2924 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

2925 accumulators = _ops.convert_to_tensor(accumulators, _dtypes.float32) 

2926 _inputs_flat = [parameters, accumulators] 

2927 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

2928 num_shards, "shard_id", shard_id, "config", config) 

2929 _result = _execute.execute(b"LoadTPUEmbeddingProximalAdagradParameters", 0, 

2930 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2931 name=name) 

2932 _result = None 

2933 return _result 

2934 

2935 

2936def load_tpu_embedding_proximal_yogi_parameters(parameters, v, m, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

2937 r"""TODO: add doc. 

2938 

2939 Args: 

2940 parameters: A `Tensor` of type `float32`. 

2941 v: A `Tensor` of type `float32`. 

2942 m: A `Tensor` of type `float32`. 

2943 num_shards: An `int`. 

2944 shard_id: An `int`. 

2945 table_id: An optional `int`. Defaults to `-1`. 

2946 table_name: An optional `string`. Defaults to `""`. 

2947 config: An optional `string`. Defaults to `""`. 

2948 name: A name for the operation (optional). 

2949 

2950 Returns: 

2951 The created Operation. 

2952 """ 

2953 _ctx = _context._context or _context.context() 

2954 tld = _ctx._thread_local_data 

2955 if tld.is_eager: 

2956 try: 

2957 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2958 _ctx, "LoadTPUEmbeddingProximalYogiParameters", name, parameters, v, 

2959 m, "table_id", table_id, "table_name", table_name, "num_shards", 

2960 num_shards, "shard_id", shard_id, "config", config) 

2961 return _result 

2962 except _core._NotOkStatusException as e: 

2963 _ops.raise_from_not_ok_status(e, name) 

2964 except _core._FallbackException: 

2965 pass 

2966 try: 

2967 return load_tpu_embedding_proximal_yogi_parameters_eager_fallback( 

2968 parameters, v, m, table_id=table_id, table_name=table_name, 

2969 num_shards=num_shards, shard_id=shard_id, config=config, name=name, 

2970 ctx=_ctx) 

2971 except _core._SymbolicException: 

2972 pass # Add nodes to the TensorFlow graph. 

2973 # Add nodes to the TensorFlow graph. 

2974 num_shards = _execute.make_int(num_shards, "num_shards") 

2975 shard_id = _execute.make_int(shard_id, "shard_id") 

2976 if table_id is None: 

2977 table_id = -1 

2978 table_id = _execute.make_int(table_id, "table_id") 

2979 if table_name is None: 

2980 table_name = "" 

2981 table_name = _execute.make_str(table_name, "table_name") 

2982 if config is None: 

2983 config = "" 

2984 config = _execute.make_str(config, "config") 

2985 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2986 "LoadTPUEmbeddingProximalYogiParameters", parameters=parameters, v=v, 

2987 m=m, num_shards=num_shards, 

2988 shard_id=shard_id, 

2989 table_id=table_id, 

2990 table_name=table_name, 

2991 config=config, name=name) 

2992 return _op 

2993LoadTPUEmbeddingProximalYogiParameters = tf_export("raw_ops.LoadTPUEmbeddingProximalYogiParameters")(_ops.to_raw_op(load_tpu_embedding_proximal_yogi_parameters)) 

2994 

2995 

2996def load_tpu_embedding_proximal_yogi_parameters_eager_fallback(parameters, v, m, num_shards, shard_id, table_id, table_name, config, name, ctx): 

2997 num_shards = _execute.make_int(num_shards, "num_shards") 

2998 shard_id = _execute.make_int(shard_id, "shard_id") 

2999 if table_id is None: 

3000 table_id = -1 

3001 table_id = _execute.make_int(table_id, "table_id") 

3002 if table_name is None: 

3003 table_name = "" 

3004 table_name = _execute.make_str(table_name, "table_name") 

3005 if config is None: 

3006 config = "" 

3007 config = _execute.make_str(config, "config") 

3008 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

3009 v = _ops.convert_to_tensor(v, _dtypes.float32) 

3010 m = _ops.convert_to_tensor(m, _dtypes.float32) 

3011 _inputs_flat = [parameters, v, m] 

3012 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

3013 num_shards, "shard_id", shard_id, "config", config) 

3014 _result = _execute.execute(b"LoadTPUEmbeddingProximalYogiParameters", 0, 

3015 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3016 name=name) 

3017 _result = None 

3018 return _result 

3019 

3020 

3021def load_tpu_embedding_rms_prop_parameters(parameters, ms, mom, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

3022 r"""Load RMSProp embedding parameters. 

3023 

3024 An op that loads optimization parameters into HBM for embedding. Must be 

3025 preceded by a ConfigureTPUEmbeddingHost op that sets up the correct 

3026 embedding table configuration. For example, this op is used to install 

3027 parameters that are loaded from a checkpoint before a training loop is 

3028 executed. 

3029 

3030 Args: 

3031 parameters: A `Tensor` of type `float32`. 

3032 Value of parameters used in the RMSProp optimization algorithm. 

3033 ms: A `Tensor` of type `float32`. 

3034 Value of ms used in the RMSProp optimization algorithm. 

3035 mom: A `Tensor` of type `float32`. 

3036 Value of mom used in the RMSProp optimization algorithm. 

3037 num_shards: An `int`. 

3038 shard_id: An `int`. 

3039 table_id: An optional `int`. Defaults to `-1`. 

3040 table_name: An optional `string`. Defaults to `""`. 

3041 config: An optional `string`. Defaults to `""`. 

3042 name: A name for the operation (optional). 

3043 

3044 Returns: 

3045 The created Operation. 

3046 """ 

3047 _ctx = _context._context or _context.context() 

3048 tld = _ctx._thread_local_data 

3049 if tld.is_eager: 

3050 try: 

3051 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3052 _ctx, "LoadTPUEmbeddingRMSPropParameters", name, parameters, ms, mom, 

3053 "table_id", table_id, "table_name", table_name, "num_shards", 

3054 num_shards, "shard_id", shard_id, "config", config) 

3055 return _result 

3056 except _core._NotOkStatusException as e: 

3057 _ops.raise_from_not_ok_status(e, name) 

3058 except _core._FallbackException: 

3059 pass 

3060 try: 

3061 return load_tpu_embedding_rms_prop_parameters_eager_fallback( 

3062 parameters, ms, mom, table_id=table_id, table_name=table_name, 

3063 num_shards=num_shards, shard_id=shard_id, config=config, name=name, 

3064 ctx=_ctx) 

3065 except _core._SymbolicException: 

3066 pass # Add nodes to the TensorFlow graph. 

3067 # Add nodes to the TensorFlow graph. 

3068 num_shards = _execute.make_int(num_shards, "num_shards") 

3069 shard_id = _execute.make_int(shard_id, "shard_id") 

3070 if table_id is None: 

3071 table_id = -1 

3072 table_id = _execute.make_int(table_id, "table_id") 

3073 if table_name is None: 

3074 table_name = "" 

3075 table_name = _execute.make_str(table_name, "table_name") 

3076 if config is None: 

3077 config = "" 

3078 config = _execute.make_str(config, "config") 

3079 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3080 "LoadTPUEmbeddingRMSPropParameters", parameters=parameters, ms=ms, 

3081 mom=mom, num_shards=num_shards, 

3082 shard_id=shard_id, 

3083 table_id=table_id, 

3084 table_name=table_name, 

3085 config=config, name=name) 

3086 return _op 

3087LoadTPUEmbeddingRMSPropParameters = tf_export("raw_ops.LoadTPUEmbeddingRMSPropParameters")(_ops.to_raw_op(load_tpu_embedding_rms_prop_parameters)) 

3088 

3089 

3090def load_tpu_embedding_rms_prop_parameters_eager_fallback(parameters, ms, mom, num_shards, shard_id, table_id, table_name, config, name, ctx): 

3091 num_shards = _execute.make_int(num_shards, "num_shards") 

3092 shard_id = _execute.make_int(shard_id, "shard_id") 

3093 if table_id is None: 

3094 table_id = -1 

3095 table_id = _execute.make_int(table_id, "table_id") 

3096 if table_name is None: 

3097 table_name = "" 

3098 table_name = _execute.make_str(table_name, "table_name") 

3099 if config is None: 

3100 config = "" 

3101 config = _execute.make_str(config, "config") 

3102 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

3103 ms = _ops.convert_to_tensor(ms, _dtypes.float32) 

3104 mom = _ops.convert_to_tensor(mom, _dtypes.float32) 

3105 _inputs_flat = [parameters, ms, mom] 

3106 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

3107 num_shards, "shard_id", shard_id, "config", config) 

3108 _result = _execute.execute(b"LoadTPUEmbeddingRMSPropParameters", 0, 

3109 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3110 name=name) 

3111 _result = None 

3112 return _result 

3113 

3114 

3115def load_tpu_embedding_stochastic_gradient_descent_parameters(parameters, num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

3116 r"""Load SGD embedding parameters. 

3117 

3118 An op that loads optimization parameters into HBM for embedding. Must be 

3119 preceded by a ConfigureTPUEmbeddingHost op that sets up the correct 

3120 embedding table configuration. For example, this op is used to install 

3121 parameters that are loaded from a checkpoint before a training loop is 

3122 executed. 

3123 

3124 Args: 

3125 parameters: A `Tensor` of type `float32`. 

3126 Value of parameters used in the stochastic gradient descent optimization algorithm. 

3127 num_shards: An `int`. 

3128 shard_id: An `int`. 

3129 table_id: An optional `int`. Defaults to `-1`. 

3130 table_name: An optional `string`. Defaults to `""`. 

3131 config: An optional `string`. Defaults to `""`. 

3132 name: A name for the operation (optional). 

3133 

3134 Returns: 

3135 The created Operation. 

3136 """ 

3137 _ctx = _context._context or _context.context() 

3138 tld = _ctx._thread_local_data 

3139 if tld.is_eager: 

3140 try: 

3141 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3142 _ctx, "LoadTPUEmbeddingStochasticGradientDescentParameters", name, 

3143 parameters, "table_id", table_id, "table_name", table_name, 

3144 "num_shards", num_shards, "shard_id", shard_id, "config", config) 

3145 return _result 

3146 except _core._NotOkStatusException as e: 

3147 _ops.raise_from_not_ok_status(e, name) 

3148 except _core._FallbackException: 

3149 pass 

3150 try: 

3151 return load_tpu_embedding_stochastic_gradient_descent_parameters_eager_fallback( 

3152 parameters, table_id=table_id, table_name=table_name, 

3153 num_shards=num_shards, shard_id=shard_id, config=config, name=name, 

3154 ctx=_ctx) 

3155 except _core._SymbolicException: 

3156 pass # Add nodes to the TensorFlow graph. 

3157 # Add nodes to the TensorFlow graph. 

3158 num_shards = _execute.make_int(num_shards, "num_shards") 

3159 shard_id = _execute.make_int(shard_id, "shard_id") 

3160 if table_id is None: 

3161 table_id = -1 

3162 table_id = _execute.make_int(table_id, "table_id") 

3163 if table_name is None: 

3164 table_name = "" 

3165 table_name = _execute.make_str(table_name, "table_name") 

3166 if config is None: 

3167 config = "" 

3168 config = _execute.make_str(config, "config") 

3169 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3170 "LoadTPUEmbeddingStochasticGradientDescentParameters", parameters=parameters, 

3171 num_shards=num_shards, 

3172 shard_id=shard_id, 

3173 table_id=table_id, 

3174 table_name=table_name, 

3175 config=config, 

3176 name=name) 

3177 return _op 

3178LoadTPUEmbeddingStochasticGradientDescentParameters = tf_export("raw_ops.LoadTPUEmbeddingStochasticGradientDescentParameters")(_ops.to_raw_op(load_tpu_embedding_stochastic_gradient_descent_parameters)) 

3179 

3180 

3181def load_tpu_embedding_stochastic_gradient_descent_parameters_eager_fallback(parameters, num_shards, shard_id, table_id, table_name, config, name, ctx): 

3182 num_shards = _execute.make_int(num_shards, "num_shards") 

3183 shard_id = _execute.make_int(shard_id, "shard_id") 

3184 if table_id is None: 

3185 table_id = -1 

3186 table_id = _execute.make_int(table_id, "table_id") 

3187 if table_name is None: 

3188 table_name = "" 

3189 table_name = _execute.make_str(table_name, "table_name") 

3190 if config is None: 

3191 config = "" 

3192 config = _execute.make_str(config, "config") 

3193 parameters = _ops.convert_to_tensor(parameters, _dtypes.float32) 

3194 _inputs_flat = [parameters] 

3195 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

3196 num_shards, "shard_id", shard_id, "config", config) 

3197 _result = _execute.execute(b"LoadTPUEmbeddingStochasticGradientDescentParameters", 

3198 0, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3199 name=name) 

3200 _result = None 

3201 return _result 

3202 

3203 

3204def outfeed_dequeue(dtype, shape, device_ordinal=-1, name=None): 

3205 r"""Retrieves a single tensor from the computation outfeed. 

3206 

3207 This operation will block indefinitely until data is available. 

3208 

3209 Args: 

3210 dtype: A `tf.DType`. The type of elements in the tensor. 

3211 shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor. 

3212 device_ordinal: An optional `int`. Defaults to `-1`. 

3213 The TPU device to use. This should be -1 when the Op 

3214 is running on a TPU device, and >= 0 when the Op is running on the CPU 

3215 device. 

3216 name: A name for the operation (optional). 

3217 

3218 Returns: 

3219 A `Tensor` of type `dtype`. 

3220 """ 

3221 _ctx = _context._context or _context.context() 

3222 tld = _ctx._thread_local_data 

3223 if tld.is_eager: 

3224 try: 

3225 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3226 _ctx, "OutfeedDequeue", name, "dtype", dtype, "shape", shape, 

3227 "device_ordinal", device_ordinal) 

3228 return _result 

3229 except _core._NotOkStatusException as e: 

3230 _ops.raise_from_not_ok_status(e, name) 

3231 except _core._FallbackException: 

3232 pass 

3233 try: 

3234 return outfeed_dequeue_eager_fallback( 

3235 dtype=dtype, shape=shape, device_ordinal=device_ordinal, name=name, 

3236 ctx=_ctx) 

3237 except _core._SymbolicException: 

3238 pass # Add nodes to the TensorFlow graph. 

3239 # Add nodes to the TensorFlow graph. 

3240 dtype = _execute.make_type(dtype, "dtype") 

3241 shape = _execute.make_shape(shape, "shape") 

3242 if device_ordinal is None: 

3243 device_ordinal = -1 

3244 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

3245 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3246 "OutfeedDequeue", dtype=dtype, shape=shape, 

3247 device_ordinal=device_ordinal, name=name) 

3248 _result = _outputs[:] 

3249 if _execute.must_record_gradient(): 

3250 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", 

3251 _op.get_attr("shape"), "device_ordinal", 

3252 _op._get_attr_int("device_ordinal")) 

3253 _inputs_flat = _op.inputs 

3254 _execute.record_gradient( 

3255 "OutfeedDequeue", _inputs_flat, _attrs, _result) 

3256 _result, = _result 

3257 return _result 

3258 

3259OutfeedDequeue = tf_export("raw_ops.OutfeedDequeue")(_ops.to_raw_op(outfeed_dequeue)) 

3260 

3261 

3262def outfeed_dequeue_eager_fallback(dtype, shape, device_ordinal, name, ctx): 

3263 dtype = _execute.make_type(dtype, "dtype") 

3264 shape = _execute.make_shape(shape, "shape") 

3265 if device_ordinal is None: 

3266 device_ordinal = -1 

3267 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

3268 _inputs_flat = [] 

3269 _attrs = ("dtype", dtype, "shape", shape, "device_ordinal", device_ordinal) 

3270 _result = _execute.execute(b"OutfeedDequeue", 1, inputs=_inputs_flat, 

3271 attrs=_attrs, ctx=ctx, name=name) 

3272 if _execute.must_record_gradient(): 

3273 _execute.record_gradient( 

3274 "OutfeedDequeue", _inputs_flat, _attrs, _result) 

3275 _result, = _result 

3276 return _result 

3277 

3278 

3279def outfeed_dequeue_tuple(dtypes, shapes, device_ordinal=-1, name=None): 

3280 r"""Retrieve multiple values from the computation outfeed. 

3281 

3282 This operation will block indefinitely until data is available. Output `i` 

3283 corresponds to XLA tuple element `i`. 

3284 

3285 Args: 

3286 dtypes: A list of `tf.DTypes` that has length `>= 1`. 

3287 The element types of each element in `outputs`. 

3288 shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). 

3289 The shapes of each tensor in `outputs`. 

3290 device_ordinal: An optional `int`. Defaults to `-1`. 

3291 The TPU device to use. This should be -1 when the Op 

3292 is running on a TPU device, and >= 0 when the Op is running on the CPU 

3293 device. 

3294 name: A name for the operation (optional). 

3295 

3296 Returns: 

3297 A list of `Tensor` objects of type `dtypes`. 

3298 """ 

3299 _ctx = _context._context or _context.context() 

3300 tld = _ctx._thread_local_data 

3301 if tld.is_eager: 

3302 try: 

3303 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3304 _ctx, "OutfeedDequeueTuple", name, "dtypes", dtypes, "shapes", shapes, 

3305 "device_ordinal", device_ordinal) 

3306 return _result 

3307 except _core._NotOkStatusException as e: 

3308 _ops.raise_from_not_ok_status(e, name) 

3309 except _core._FallbackException: 

3310 pass 

3311 try: 

3312 return outfeed_dequeue_tuple_eager_fallback( 

3313 dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal, 

3314 name=name, ctx=_ctx) 

3315 except _core._SymbolicException: 

3316 pass # Add nodes to the TensorFlow graph. 

3317 # Add nodes to the TensorFlow graph. 

3318 if not isinstance(dtypes, (list, tuple)): 

3319 raise TypeError( 

3320 "Expected list for 'dtypes' argument to " 

3321 "'outfeed_dequeue_tuple' Op, not %r." % dtypes) 

3322 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

3323 if not isinstance(shapes, (list, tuple)): 

3324 raise TypeError( 

3325 "Expected list for 'shapes' argument to " 

3326 "'outfeed_dequeue_tuple' Op, not %r." % shapes) 

3327 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

3328 if device_ordinal is None: 

3329 device_ordinal = -1 

3330 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

3331 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3332 "OutfeedDequeueTuple", dtypes=dtypes, shapes=shapes, 

3333 device_ordinal=device_ordinal, name=name) 

3334 _result = _outputs[:] 

3335 if not _result: 

3336 return _op 

3337 if _execute.must_record_gradient(): 

3338 _attrs = ("dtypes", _op.get_attr("dtypes"), "shapes", 

3339 _op.get_attr("shapes"), "device_ordinal", 

3340 _op._get_attr_int("device_ordinal")) 

3341 _inputs_flat = _op.inputs 

3342 _execute.record_gradient( 

3343 "OutfeedDequeueTuple", _inputs_flat, _attrs, _result) 

3344 return _result 

3345 

3346OutfeedDequeueTuple = tf_export("raw_ops.OutfeedDequeueTuple")(_ops.to_raw_op(outfeed_dequeue_tuple)) 

3347 

3348 

3349def outfeed_dequeue_tuple_eager_fallback(dtypes, shapes, device_ordinal, name, ctx): 

3350 if not isinstance(dtypes, (list, tuple)): 

3351 raise TypeError( 

3352 "Expected list for 'dtypes' argument to " 

3353 "'outfeed_dequeue_tuple' Op, not %r." % dtypes) 

3354 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

3355 if not isinstance(shapes, (list, tuple)): 

3356 raise TypeError( 

3357 "Expected list for 'shapes' argument to " 

3358 "'outfeed_dequeue_tuple' Op, not %r." % shapes) 

3359 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

3360 if device_ordinal is None: 

3361 device_ordinal = -1 

3362 device_ordinal = _execute.make_int(device_ordinal, "device_ordinal") 

3363 _inputs_flat = [] 

3364 _attrs = ("dtypes", dtypes, "shapes", shapes, "device_ordinal", 

3365 device_ordinal) 

3366 _result = _execute.execute(b"OutfeedDequeueTuple", len(dtypes), 

3367 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3368 name=name) 

3369 if _execute.must_record_gradient(): 

3370 _execute.record_gradient( 

3371 "OutfeedDequeueTuple", _inputs_flat, _attrs, _result) 

3372 return _result 

3373 

3374 

3375def outfeed_dequeue_tuple_v2(device_ordinal, dtypes, shapes, name=None): 

3376 r"""Retrieve multiple values from the computation outfeed. Device ordinal is a 

3377tensor allowing dynamic outfeed. 

3378 

3379 This operation will block indefinitely until data is available. Output `i` 

3380 corresponds to XLA tuple element `i`. 

3381 

3382 Args: 

3383 device_ordinal: A `Tensor` of type `int32`. 

3384 An int scalar tensor, representing the TPU device to use. This should be -1 when 

3385 the Op is running on a TPU device, and >= 0 when the Op is running on the CPU 

3386 device. 

3387 dtypes: A list of `tf.DTypes` that has length `>= 1`. 

3388 The element types of each element in `outputs`. 

3389 shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). 

3390 The shapes of each tensor in `outputs`. 

3391 name: A name for the operation (optional). 

3392 

3393 Returns: 

3394 A list of `Tensor` objects of type `dtypes`. 

3395 """ 

3396 _ctx = _context._context or _context.context() 

3397 tld = _ctx._thread_local_data 

3398 if tld.is_eager: 

3399 try: 

3400 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3401 _ctx, "OutfeedDequeueTupleV2", name, device_ordinal, "dtypes", dtypes, 

3402 "shapes", shapes) 

3403 return _result 

3404 except _core._NotOkStatusException as e: 

3405 _ops.raise_from_not_ok_status(e, name) 

3406 except _core._FallbackException: 

3407 pass 

3408 try: 

3409 return outfeed_dequeue_tuple_v2_eager_fallback( 

3410 device_ordinal, dtypes=dtypes, shapes=shapes, name=name, ctx=_ctx) 

3411 except _core._SymbolicException: 

3412 pass # Add nodes to the TensorFlow graph. 

3413 # Add nodes to the TensorFlow graph. 

3414 if not isinstance(dtypes, (list, tuple)): 

3415 raise TypeError( 

3416 "Expected list for 'dtypes' argument to " 

3417 "'outfeed_dequeue_tuple_v2' Op, not %r." % dtypes) 

3418 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

3419 if not isinstance(shapes, (list, tuple)): 

3420 raise TypeError( 

3421 "Expected list for 'shapes' argument to " 

3422 "'outfeed_dequeue_tuple_v2' Op, not %r." % shapes) 

3423 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

3424 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3425 "OutfeedDequeueTupleV2", device_ordinal=device_ordinal, dtypes=dtypes, 

3426 shapes=shapes, name=name) 

3427 _result = _outputs[:] 

3428 if not _result: 

3429 return _op 

3430 if _execute.must_record_gradient(): 

3431 _attrs = ("dtypes", _op.get_attr("dtypes"), "shapes", 

3432 _op.get_attr("shapes")) 

3433 _inputs_flat = _op.inputs 

3434 _execute.record_gradient( 

3435 "OutfeedDequeueTupleV2", _inputs_flat, _attrs, _result) 

3436 return _result 

3437 

3438OutfeedDequeueTupleV2 = tf_export("raw_ops.OutfeedDequeueTupleV2")(_ops.to_raw_op(outfeed_dequeue_tuple_v2)) 

3439 

3440 

3441def outfeed_dequeue_tuple_v2_eager_fallback(device_ordinal, dtypes, shapes, name, ctx): 

3442 if not isinstance(dtypes, (list, tuple)): 

3443 raise TypeError( 

3444 "Expected list for 'dtypes' argument to " 

3445 "'outfeed_dequeue_tuple_v2' Op, not %r." % dtypes) 

3446 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes] 

3447 if not isinstance(shapes, (list, tuple)): 

3448 raise TypeError( 

3449 "Expected list for 'shapes' argument to " 

3450 "'outfeed_dequeue_tuple_v2' Op, not %r." % shapes) 

3451 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

3452 device_ordinal = _ops.convert_to_tensor(device_ordinal, _dtypes.int32) 

3453 _inputs_flat = [device_ordinal] 

3454 _attrs = ("dtypes", dtypes, "shapes", shapes) 

3455 _result = _execute.execute(b"OutfeedDequeueTupleV2", len(dtypes), 

3456 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3457 name=name) 

3458 if _execute.must_record_gradient(): 

3459 _execute.record_gradient( 

3460 "OutfeedDequeueTupleV2", _inputs_flat, _attrs, _result) 

3461 return _result 

3462 

3463 

3464def outfeed_dequeue_v2(device_ordinal, dtype, shape, name=None): 

3465 r"""Retrieves a single tensor from the computation outfeed. Device ordinal is a 

3466tensor allowing dynamic outfeed. 

3467 

3468 This operation will block indefinitely until data is available. 

3469 

3470 Args: 

3471 device_ordinal: A `Tensor` of type `int32`. 

3472 An int scalar tensor, representing the TPU device to use. This should be -1 when 

3473 the Op is running on a TPU device, and >= 0 when the Op is running on the CPU 

3474 device. 

3475 dtype: A `tf.DType`. The type of elements in the tensor. 

3476 shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor. 

3477 name: A name for the operation (optional). 

3478 

3479 Returns: 

3480 A `Tensor` of type `dtype`. 

3481 """ 

3482 _ctx = _context._context or _context.context() 

3483 tld = _ctx._thread_local_data 

3484 if tld.is_eager: 

3485 try: 

3486 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3487 _ctx, "OutfeedDequeueV2", name, device_ordinal, "dtype", dtype, 

3488 "shape", shape) 

3489 return _result 

3490 except _core._NotOkStatusException as e: 

3491 _ops.raise_from_not_ok_status(e, name) 

3492 except _core._FallbackException: 

3493 pass 

3494 try: 

3495 return outfeed_dequeue_v2_eager_fallback( 

3496 device_ordinal, dtype=dtype, shape=shape, name=name, ctx=_ctx) 

3497 except _core._SymbolicException: 

3498 pass # Add nodes to the TensorFlow graph. 

3499 # Add nodes to the TensorFlow graph. 

3500 dtype = _execute.make_type(dtype, "dtype") 

3501 shape = _execute.make_shape(shape, "shape") 

3502 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3503 "OutfeedDequeueV2", device_ordinal=device_ordinal, dtype=dtype, 

3504 shape=shape, name=name) 

3505 _result = _outputs[:] 

3506 if _execute.must_record_gradient(): 

3507 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", 

3508 _op.get_attr("shape")) 

3509 _inputs_flat = _op.inputs 

3510 _execute.record_gradient( 

3511 "OutfeedDequeueV2", _inputs_flat, _attrs, _result) 

3512 _result, = _result 

3513 return _result 

3514 

3515OutfeedDequeueV2 = tf_export("raw_ops.OutfeedDequeueV2")(_ops.to_raw_op(outfeed_dequeue_v2)) 

3516 

3517 

3518def outfeed_dequeue_v2_eager_fallback(device_ordinal, dtype, shape, name, ctx): 

3519 dtype = _execute.make_type(dtype, "dtype") 

3520 shape = _execute.make_shape(shape, "shape") 

3521 device_ordinal = _ops.convert_to_tensor(device_ordinal, _dtypes.int32) 

3522 _inputs_flat = [device_ordinal] 

3523 _attrs = ("dtype", dtype, "shape", shape) 

3524 _result = _execute.execute(b"OutfeedDequeueV2", 1, inputs=_inputs_flat, 

3525 attrs=_attrs, ctx=ctx, name=name) 

3526 if _execute.must_record_gradient(): 

3527 _execute.record_gradient( 

3528 "OutfeedDequeueV2", _inputs_flat, _attrs, _result) 

3529 _result, = _result 

3530 return _result 

3531 

3532 

3533def outfeed_enqueue(input, name=None): 

3534 r"""Enqueue a Tensor on the computation outfeed. 

3535 

3536 Args: 

3537 input: A `Tensor`. A tensor that will be inserted into the outfeed queue. 

3538 name: A name for the operation (optional). 

3539 

3540 Returns: 

3541 The created Operation. 

3542 """ 

3543 _ctx = _context._context or _context.context() 

3544 tld = _ctx._thread_local_data 

3545 if tld.is_eager: 

3546 try: 

3547 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3548 _ctx, "OutfeedEnqueue", name, input) 

3549 return _result 

3550 except _core._NotOkStatusException as e: 

3551 _ops.raise_from_not_ok_status(e, name) 

3552 except _core._FallbackException: 

3553 pass 

3554 try: 

3555 return outfeed_enqueue_eager_fallback( 

3556 input, name=name, ctx=_ctx) 

3557 except _core._SymbolicException: 

3558 pass # Add nodes to the TensorFlow graph. 

3559 # Add nodes to the TensorFlow graph. 

3560 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3561 "OutfeedEnqueue", input=input, name=name) 

3562 return _op 

3563OutfeedEnqueue = tf_export("raw_ops.OutfeedEnqueue")(_ops.to_raw_op(outfeed_enqueue)) 

3564 

3565 

3566def outfeed_enqueue_eager_fallback(input, name, ctx): 

3567 _attr_dtype, (input,) = _execute.args_to_matching_eager([input], ctx, []) 

3568 _inputs_flat = [input] 

3569 _attrs = ("dtype", _attr_dtype) 

3570 _result = _execute.execute(b"OutfeedEnqueue", 0, inputs=_inputs_flat, 

3571 attrs=_attrs, ctx=ctx, name=name) 

3572 _result = None 

3573 return _result 

3574 

3575 

3576def outfeed_enqueue_tuple(inputs, name=None): 

3577 r"""Enqueue multiple Tensor values on the computation outfeed. 

3578 

3579 Args: 

3580 inputs: A list of `Tensor` objects. 

3581 A list of tensors that will be inserted into the outfeed queue as an 

3582 XLA tuple. 

3583 name: A name for the operation (optional). 

3584 

3585 Returns: 

3586 The created Operation. 

3587 """ 

3588 _ctx = _context._context or _context.context() 

3589 tld = _ctx._thread_local_data 

3590 if tld.is_eager: 

3591 try: 

3592 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3593 _ctx, "OutfeedEnqueueTuple", name, inputs) 

3594 return _result 

3595 except _core._NotOkStatusException as e: 

3596 _ops.raise_from_not_ok_status(e, name) 

3597 except _core._FallbackException: 

3598 pass 

3599 try: 

3600 return outfeed_enqueue_tuple_eager_fallback( 

3601 inputs, name=name, ctx=_ctx) 

3602 except _core._SymbolicException: 

3603 pass # Add nodes to the TensorFlow graph. 

3604 # Add nodes to the TensorFlow graph. 

3605 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3606 "OutfeedEnqueueTuple", inputs=inputs, name=name) 

3607 return _op 

3608OutfeedEnqueueTuple = tf_export("raw_ops.OutfeedEnqueueTuple")(_ops.to_raw_op(outfeed_enqueue_tuple)) 

3609 

3610 

3611def outfeed_enqueue_tuple_eager_fallback(inputs, name, ctx): 

3612 _attr_dtypes, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx) 

3613 _inputs_flat = list(inputs) 

3614 _attrs = ("dtypes", _attr_dtypes) 

3615 _result = _execute.execute(b"OutfeedEnqueueTuple", 0, inputs=_inputs_flat, 

3616 attrs=_attrs, ctx=ctx, name=name) 

3617 _result = None 

3618 return _result 

3619 

3620 

3621def prelinearize(input, shape=[], layout=[], name=None): 

3622 r"""An op which linearizes one Tensor value to an opaque variant tensor. 

3623 

3624 Args: 

3625 input: A `Tensor`. A tensor that will be linearized. 

3626 shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[]`. 

3627 The shape of the tensor. 

3628 layout: An optional list of `ints`. Defaults to `[]`. 

3629 A vector holding the requested layout in minor-to-major sequence. If a layout 

3630 attribute is passed but its values are all -1 the layout will be computed by 

3631 the infeed operation. 

3632 name: A name for the operation (optional). 

3633 

3634 Returns: 

3635 A `Tensor` of type `variant`. 

3636 """ 

3637 _ctx = _context._context or _context.context() 

3638 tld = _ctx._thread_local_data 

3639 if tld.is_eager: 

3640 try: 

3641 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3642 _ctx, "Prelinearize", name, input, "shape", shape, "layout", layout) 

3643 return _result 

3644 except _core._NotOkStatusException as e: 

3645 _ops.raise_from_not_ok_status(e, name) 

3646 except _core._FallbackException: 

3647 pass 

3648 try: 

3649 return prelinearize_eager_fallback( 

3650 input, shape=shape, layout=layout, name=name, ctx=_ctx) 

3651 except _core._SymbolicException: 

3652 pass # Add nodes to the TensorFlow graph. 

3653 # Add nodes to the TensorFlow graph. 

3654 if shape is None: 

3655 shape = [] 

3656 shape = _execute.make_shape(shape, "shape") 

3657 if layout is None: 

3658 layout = [] 

3659 if not isinstance(layout, (list, tuple)): 

3660 raise TypeError( 

3661 "Expected list for 'layout' argument to " 

3662 "'prelinearize' Op, not %r." % layout) 

3663 layout = [_execute.make_int(_i, "layout") for _i in layout] 

3664 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3665 "Prelinearize", input=input, shape=shape, layout=layout, name=name) 

3666 _result = _outputs[:] 

3667 if _execute.must_record_gradient(): 

3668 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", 

3669 _op.get_attr("shape"), "layout", _op.get_attr("layout")) 

3670 _inputs_flat = _op.inputs 

3671 _execute.record_gradient( 

3672 "Prelinearize", _inputs_flat, _attrs, _result) 

3673 _result, = _result 

3674 return _result 

3675 

3676Prelinearize = tf_export("raw_ops.Prelinearize")(_ops.to_raw_op(prelinearize)) 

3677 

3678 

3679def prelinearize_eager_fallback(input, shape, layout, name, ctx): 

3680 if shape is None: 

3681 shape = [] 

3682 shape = _execute.make_shape(shape, "shape") 

3683 if layout is None: 

3684 layout = [] 

3685 if not isinstance(layout, (list, tuple)): 

3686 raise TypeError( 

3687 "Expected list for 'layout' argument to " 

3688 "'prelinearize' Op, not %r." % layout) 

3689 layout = [_execute.make_int(_i, "layout") for _i in layout] 

3690 _attr_dtype, (input,) = _execute.args_to_matching_eager([input], ctx, []) 

3691 _inputs_flat = [input] 

3692 _attrs = ("dtype", _attr_dtype, "shape", shape, "layout", layout) 

3693 _result = _execute.execute(b"Prelinearize", 1, inputs=_inputs_flat, 

3694 attrs=_attrs, ctx=ctx, name=name) 

3695 if _execute.must_record_gradient(): 

3696 _execute.record_gradient( 

3697 "Prelinearize", _inputs_flat, _attrs, _result) 

3698 _result, = _result 

3699 return _result 

3700 

3701 

3702def prelinearize_tuple(inputs, shapes, layouts=[], name=None): 

3703 r"""An op which linearizes multiple Tensor values to an opaque variant tensor. 

3704 

3705 Args: 

3706 inputs: A list of `Tensor` objects. 

3707 A list of tensors that will be provided using the infeed mechanism. 

3708 shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). 

3709 The shapes of each tensor in `inputs`. 

3710 layouts: An optional list of `ints`. Defaults to `[]`. 

3711 A vector holding the requested layout in minor-to-major sequence for all the 

3712 tuple shapes in the order the shapes appear in the "shapes" input. The layout 

3713 elements for a sub-shape can be set to -1 in which case the corresponding layout 

3714 will be computed by the infeed operation. 

3715 name: A name for the operation (optional). 

3716 

3717 Returns: 

3718 A `Tensor` of type `variant`. 

3719 """ 

3720 _ctx = _context._context or _context.context() 

3721 tld = _ctx._thread_local_data 

3722 if tld.is_eager: 

3723 try: 

3724 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3725 _ctx, "PrelinearizeTuple", name, inputs, "shapes", shapes, "layouts", 

3726 layouts) 

3727 return _result 

3728 except _core._NotOkStatusException as e: 

3729 _ops.raise_from_not_ok_status(e, name) 

3730 except _core._FallbackException: 

3731 pass 

3732 try: 

3733 return prelinearize_tuple_eager_fallback( 

3734 inputs, shapes=shapes, layouts=layouts, name=name, ctx=_ctx) 

3735 except _core._SymbolicException: 

3736 pass # Add nodes to the TensorFlow graph. 

3737 # Add nodes to the TensorFlow graph. 

3738 if not isinstance(shapes, (list, tuple)): 

3739 raise TypeError( 

3740 "Expected list for 'shapes' argument to " 

3741 "'prelinearize_tuple' Op, not %r." % shapes) 

3742 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

3743 if layouts is None: 

3744 layouts = [] 

3745 if not isinstance(layouts, (list, tuple)): 

3746 raise TypeError( 

3747 "Expected list for 'layouts' argument to " 

3748 "'prelinearize_tuple' Op, not %r." % layouts) 

3749 layouts = [_execute.make_int(_i, "layouts") for _i in layouts] 

3750 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3751 "PrelinearizeTuple", inputs=inputs, shapes=shapes, layouts=layouts, 

3752 name=name) 

3753 _result = _outputs[:] 

3754 if _execute.must_record_gradient(): 

3755 _attrs = ("dtypes", _op.get_attr("dtypes"), "shapes", 

3756 _op.get_attr("shapes"), "layouts", _op.get_attr("layouts")) 

3757 _inputs_flat = _op.inputs 

3758 _execute.record_gradient( 

3759 "PrelinearizeTuple", _inputs_flat, _attrs, _result) 

3760 _result, = _result 

3761 return _result 

3762 

3763PrelinearizeTuple = tf_export("raw_ops.PrelinearizeTuple")(_ops.to_raw_op(prelinearize_tuple)) 

3764 

3765 

3766def prelinearize_tuple_eager_fallback(inputs, shapes, layouts, name, ctx): 

3767 if not isinstance(shapes, (list, tuple)): 

3768 raise TypeError( 

3769 "Expected list for 'shapes' argument to " 

3770 "'prelinearize_tuple' Op, not %r." % shapes) 

3771 shapes = [_execute.make_shape(_s, "shapes") for _s in shapes] 

3772 if layouts is None: 

3773 layouts = [] 

3774 if not isinstance(layouts, (list, tuple)): 

3775 raise TypeError( 

3776 "Expected list for 'layouts' argument to " 

3777 "'prelinearize_tuple' Op, not %r." % layouts) 

3778 layouts = [_execute.make_int(_i, "layouts") for _i in layouts] 

3779 _attr_dtypes, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx) 

3780 _inputs_flat = list(inputs) 

3781 _attrs = ("dtypes", _attr_dtypes, "shapes", shapes, "layouts", layouts) 

3782 _result = _execute.execute(b"PrelinearizeTuple", 1, inputs=_inputs_flat, 

3783 attrs=_attrs, ctx=ctx, name=name) 

3784 if _execute.must_record_gradient(): 

3785 _execute.record_gradient( 

3786 "PrelinearizeTuple", _inputs_flat, _attrs, _result) 

3787 _result, = _result 

3788 return _result 

3789 

3790 

3791def read_variable_xla_split_nd(resource, T, N, num_splits, paddings=[], name=None): 

3792 r"""Splits resource variable input tensor across all dimensions. 

3793 

3794 An op which splits the resource variable input tensor based on the given 

3795 num_splits attribute, pads slices optionally, and returned the slices. Slices 

3796 are returned in row-major order. 

3797 

3798 This op may be generated via the TPU bridge. 

3799 

3800 For example, with `input` tensor: 

3801 ``` 

3802 [[0, 1, 2], 

3803 [3, 4, 5], 

3804 [6, 7, 8]] 

3805 ``` 

3806 `num_splits`: 

3807 ``` 

3808 [2, 2] 

3809 ``` 

3810 and `paddings`: 

3811 ``` 

3812 [1, 1] 

3813 ``` 

3814 the expected `outputs` is: 

3815 ``` 

3816 [[0, 1], 

3817 [3, 4]] 

3818 [[2, 0], 

3819 [5, 0]] 

3820 [[6, 7], 

3821 [0, 0]] 

3822 [[8, 0], 

3823 [0, 0]] 

3824 ``` 

3825 

3826 Args: 

3827 resource: A `Tensor` of type `resource`. 

3828 Resource variable of input tensor to split across all dimensions. 

3829 } 

3830 out_arg { 

3831 name: "outputs" 

3832 description: <<END 

3833 Output slices based on input and num_splits defined, in row-major order. 

3834 T: A `tf.DType`. 

3835 N: An `int` that is `>= 1`. 

3836 num_splits: A list of `ints`. 

3837 Number of ways to split per dimension. Shape dimensions must be evenly 

3838 divisible. 

3839 paddings: An optional list of `ints`. Defaults to `[]`. 

3840 Optional list of right paddings per dimension of input tensor to apply before 

3841 splitting. This can be used to make a dimension evenly divisible. 

3842 name: A name for the operation (optional). 

3843 

3844 Returns: 

3845 A list of `N` `Tensor` objects with type `T`. 

3846 """ 

3847 _ctx = _context._context or _context.context() 

3848 tld = _ctx._thread_local_data 

3849 if tld.is_eager: 

3850 try: 

3851 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3852 _ctx, "ReadVariableXlaSplitND", name, resource, "T", T, "N", N, 

3853 "num_splits", num_splits, "paddings", paddings) 

3854 return _result 

3855 except _core._NotOkStatusException as e: 

3856 _ops.raise_from_not_ok_status(e, name) 

3857 except _core._FallbackException: 

3858 pass 

3859 try: 

3860 return read_variable_xla_split_nd_eager_fallback( 

3861 resource, T=T, N=N, num_splits=num_splits, paddings=paddings, 

3862 name=name, ctx=_ctx) 

3863 except _core._SymbolicException: 

3864 pass # Add nodes to the TensorFlow graph. 

3865 # Add nodes to the TensorFlow graph. 

3866 T = _execute.make_type(T, "T") 

3867 N = _execute.make_int(N, "N") 

3868 if not isinstance(num_splits, (list, tuple)): 

3869 raise TypeError( 

3870 "Expected list for 'num_splits' argument to " 

3871 "'read_variable_xla_split_nd' Op, not %r." % num_splits) 

3872 num_splits = [_execute.make_int(_i, "num_splits") for _i in num_splits] 

3873 if paddings is None: 

3874 paddings = [] 

3875 if not isinstance(paddings, (list, tuple)): 

3876 raise TypeError( 

3877 "Expected list for 'paddings' argument to " 

3878 "'read_variable_xla_split_nd' Op, not %r." % paddings) 

3879 paddings = [_execute.make_int(_i, "paddings") for _i in paddings] 

3880 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3881 "ReadVariableXlaSplitND", resource=resource, T=T, N=N, 

3882 num_splits=num_splits, paddings=paddings, 

3883 name=name) 

3884 _result = _outputs[:] 

3885 if not _result: 

3886 return _op 

3887 if _execute.must_record_gradient(): 

3888 _attrs = ("T", _op._get_attr_type("T"), "N", _op._get_attr_int("N"), 

3889 "num_splits", _op.get_attr("num_splits"), "paddings", 

3890 _op.get_attr("paddings")) 

3891 _inputs_flat = _op.inputs 

3892 _execute.record_gradient( 

3893 "ReadVariableXlaSplitND", _inputs_flat, _attrs, _result) 

3894 return _result 

3895 

3896ReadVariableXlaSplitND = tf_export("raw_ops.ReadVariableXlaSplitND")(_ops.to_raw_op(read_variable_xla_split_nd)) 

3897 

3898 

3899def read_variable_xla_split_nd_eager_fallback(resource, T, N, num_splits, paddings, name, ctx): 

3900 T = _execute.make_type(T, "T") 

3901 N = _execute.make_int(N, "N") 

3902 if not isinstance(num_splits, (list, tuple)): 

3903 raise TypeError( 

3904 "Expected list for 'num_splits' argument to " 

3905 "'read_variable_xla_split_nd' Op, not %r." % num_splits) 

3906 num_splits = [_execute.make_int(_i, "num_splits") for _i in num_splits] 

3907 if paddings is None: 

3908 paddings = [] 

3909 if not isinstance(paddings, (list, tuple)): 

3910 raise TypeError( 

3911 "Expected list for 'paddings' argument to " 

3912 "'read_variable_xla_split_nd' Op, not %r." % paddings) 

3913 paddings = [_execute.make_int(_i, "paddings") for _i in paddings] 

3914 resource = _ops.convert_to_tensor(resource, _dtypes.resource) 

3915 _inputs_flat = [resource] 

3916 _attrs = ("T", T, "N", N, "num_splits", num_splits, "paddings", paddings) 

3917 _result = _execute.execute(b"ReadVariableXlaSplitND", N, 

3918 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3919 name=name) 

3920 if _execute.must_record_gradient(): 

3921 _execute.record_gradient( 

3922 "ReadVariableXlaSplitND", _inputs_flat, _attrs, _result) 

3923 return _result 

3924 

3925 

3926def recv_tpu_embedding_activations(num_outputs, config, name=None): 

3927 r"""An op that receives embedding activations on the TPU. 

3928 

3929 The TPU system performs the embedding lookups and aggregations specified by 

3930 the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The 

3931 results of these aggregations are visible to the Tensorflow Graph as the 

3932 outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing 

3933 one Tensor of activations per table specified in the model. There can be at 

3934 most one RecvTPUEmbeddingActivations op in the TPU graph. 

3935 

3936 Args: 

3937 num_outputs: An `int` that is `>= 1`. 

3938 The number of output activation tensors, equal to the number of 

3939 embedding tables in the model. 

3940 config: A `string`. Serialized TPUEmbeddingConfiguration proto. 

3941 name: A name for the operation (optional). 

3942 

3943 Returns: 

3944 A list of `num_outputs` `Tensor` objects with type `float32`. 

3945 """ 

3946 _ctx = _context._context or _context.context() 

3947 tld = _ctx._thread_local_data 

3948 if tld.is_eager: 

3949 try: 

3950 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3951 _ctx, "RecvTPUEmbeddingActivations", name, "num_outputs", num_outputs, 

3952 "config", config) 

3953 return _result 

3954 except _core._NotOkStatusException as e: 

3955 _ops.raise_from_not_ok_status(e, name) 

3956 except _core._FallbackException: 

3957 pass 

3958 try: 

3959 return recv_tpu_embedding_activations_eager_fallback( 

3960 num_outputs=num_outputs, config=config, name=name, ctx=_ctx) 

3961 except _core._SymbolicException: 

3962 pass # Add nodes to the TensorFlow graph. 

3963 # Add nodes to the TensorFlow graph. 

3964 num_outputs = _execute.make_int(num_outputs, "num_outputs") 

3965 config = _execute.make_str(config, "config") 

3966 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3967 "RecvTPUEmbeddingActivations", num_outputs=num_outputs, config=config, 

3968 name=name) 

3969 _result = _outputs[:] 

3970 if not _result: 

3971 return _op 

3972 if _execute.must_record_gradient(): 

3973 _attrs = ("num_outputs", _op._get_attr_int("num_outputs"), "config", 

3974 _op.get_attr("config")) 

3975 _inputs_flat = _op.inputs 

3976 _execute.record_gradient( 

3977 "RecvTPUEmbeddingActivations", _inputs_flat, _attrs, _result) 

3978 return _result 

3979 

3980RecvTPUEmbeddingActivations = tf_export("raw_ops.RecvTPUEmbeddingActivations")(_ops.to_raw_op(recv_tpu_embedding_activations)) 

3981 

3982 

3983def recv_tpu_embedding_activations_eager_fallback(num_outputs, config, name, ctx): 

3984 num_outputs = _execute.make_int(num_outputs, "num_outputs") 

3985 config = _execute.make_str(config, "config") 

3986 _inputs_flat = [] 

3987 _attrs = ("num_outputs", num_outputs, "config", config) 

3988 _result = _execute.execute(b"RecvTPUEmbeddingActivations", num_outputs, 

3989 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3990 name=name) 

3991 if _execute.must_record_gradient(): 

3992 _execute.record_gradient( 

3993 "RecvTPUEmbeddingActivations", _inputs_flat, _attrs, _result) 

3994 return _result 

3995 

3996_RetrieveTPUEmbeddingADAMParametersOutput = collections.namedtuple( 

3997 "RetrieveTPUEmbeddingADAMParameters", 

3998 ["parameters", "momenta", "velocities"]) 

3999 

4000 

4001def retrieve_tpu_embedding_adam_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

4002 r"""Retrieve ADAM embedding parameters. 

4003 

4004 An op that retrieves optimization parameters from embedding to host 

4005 memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up 

4006 the correct embedding table configuration. For example, this op is 

4007 used to retrieve updated parameters before saving a checkpoint. 

4008 

4009 Args: 

4010 num_shards: An `int`. 

4011 shard_id: An `int`. 

4012 table_id: An optional `int`. Defaults to `-1`. 

4013 table_name: An optional `string`. Defaults to `""`. 

4014 config: An optional `string`. Defaults to `""`. 

4015 name: A name for the operation (optional). 

4016 

4017 Returns: 

4018 A tuple of `Tensor` objects (parameters, momenta, velocities). 

4019 

4020 parameters: A `Tensor` of type `float32`. 

4021 momenta: A `Tensor` of type `float32`. 

4022 velocities: A `Tensor` of type `float32`. 

4023 """ 

4024 _ctx = _context._context or _context.context() 

4025 tld = _ctx._thread_local_data 

4026 if tld.is_eager: 

4027 try: 

4028 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4029 _ctx, "RetrieveTPUEmbeddingADAMParameters", name, "table_id", 

4030 table_id, "table_name", table_name, "num_shards", num_shards, 

4031 "shard_id", shard_id, "config", config) 

4032 _result = _RetrieveTPUEmbeddingADAMParametersOutput._make(_result) 

4033 return _result 

4034 except _core._NotOkStatusException as e: 

4035 _ops.raise_from_not_ok_status(e, name) 

4036 except _core._FallbackException: 

4037 pass 

4038 try: 

4039 return retrieve_tpu_embedding_adam_parameters_eager_fallback( 

4040 table_id=table_id, table_name=table_name, num_shards=num_shards, 

4041 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

4042 except _core._SymbolicException: 

4043 pass # Add nodes to the TensorFlow graph. 

4044 # Add nodes to the TensorFlow graph. 

4045 num_shards = _execute.make_int(num_shards, "num_shards") 

4046 shard_id = _execute.make_int(shard_id, "shard_id") 

4047 if table_id is None: 

4048 table_id = -1 

4049 table_id = _execute.make_int(table_id, "table_id") 

4050 if table_name is None: 

4051 table_name = "" 

4052 table_name = _execute.make_str(table_name, "table_name") 

4053 if config is None: 

4054 config = "" 

4055 config = _execute.make_str(config, "config") 

4056 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4057 "RetrieveTPUEmbeddingADAMParameters", num_shards=num_shards, 

4058 shard_id=shard_id, 

4059 table_id=table_id, 

4060 table_name=table_name, 

4061 config=config, name=name) 

4062 _result = _outputs[:] 

4063 if _execute.must_record_gradient(): 

4064 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

4065 _op.get_attr("table_name"), "num_shards", 

4066 _op._get_attr_int("num_shards"), "shard_id", 

4067 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

4068 _inputs_flat = _op.inputs 

4069 _execute.record_gradient( 

4070 "RetrieveTPUEmbeddingADAMParameters", _inputs_flat, _attrs, _result) 

4071 _result = _RetrieveTPUEmbeddingADAMParametersOutput._make(_result) 

4072 return _result 

4073 

4074RetrieveTPUEmbeddingADAMParameters = tf_export("raw_ops.RetrieveTPUEmbeddingADAMParameters")(_ops.to_raw_op(retrieve_tpu_embedding_adam_parameters)) 

4075 

4076 

4077def retrieve_tpu_embedding_adam_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

4078 num_shards = _execute.make_int(num_shards, "num_shards") 

4079 shard_id = _execute.make_int(shard_id, "shard_id") 

4080 if table_id is None: 

4081 table_id = -1 

4082 table_id = _execute.make_int(table_id, "table_id") 

4083 if table_name is None: 

4084 table_name = "" 

4085 table_name = _execute.make_str(table_name, "table_name") 

4086 if config is None: 

4087 config = "" 

4088 config = _execute.make_str(config, "config") 

4089 _inputs_flat = [] 

4090 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

4091 num_shards, "shard_id", shard_id, "config", config) 

4092 _result = _execute.execute(b"RetrieveTPUEmbeddingADAMParameters", 3, 

4093 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4094 name=name) 

4095 if _execute.must_record_gradient(): 

4096 _execute.record_gradient( 

4097 "RetrieveTPUEmbeddingADAMParameters", _inputs_flat, _attrs, _result) 

4098 _result = _RetrieveTPUEmbeddingADAMParametersOutput._make(_result) 

4099 return _result 

4100 

4101_RetrieveTPUEmbeddingAdadeltaParametersOutput = collections.namedtuple( 

4102 "RetrieveTPUEmbeddingAdadeltaParameters", 

4103 ["parameters", "accumulators", "updates"]) 

4104 

4105 

4106def retrieve_tpu_embedding_adadelta_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

4107 r"""Retrieve Adadelta embedding parameters. 

4108 

4109 An op that retrieves optimization parameters from embedding to host 

4110 memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up 

4111 the correct embedding table configuration. For example, this op is 

4112 used to retrieve updated parameters before saving a checkpoint. 

4113 

4114 Args: 

4115 num_shards: An `int`. 

4116 shard_id: An `int`. 

4117 table_id: An optional `int`. Defaults to `-1`. 

4118 table_name: An optional `string`. Defaults to `""`. 

4119 config: An optional `string`. Defaults to `""`. 

4120 name: A name for the operation (optional). 

4121 

4122 Returns: 

4123 A tuple of `Tensor` objects (parameters, accumulators, updates). 

4124 

4125 parameters: A `Tensor` of type `float32`. 

4126 accumulators: A `Tensor` of type `float32`. 

4127 updates: A `Tensor` of type `float32`. 

4128 """ 

4129 _ctx = _context._context or _context.context() 

4130 tld = _ctx._thread_local_data 

4131 if tld.is_eager: 

4132 try: 

4133 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4134 _ctx, "RetrieveTPUEmbeddingAdadeltaParameters", name, "table_id", 

4135 table_id, "table_name", table_name, "num_shards", num_shards, 

4136 "shard_id", shard_id, "config", config) 

4137 _result = _RetrieveTPUEmbeddingAdadeltaParametersOutput._make(_result) 

4138 return _result 

4139 except _core._NotOkStatusException as e: 

4140 _ops.raise_from_not_ok_status(e, name) 

4141 except _core._FallbackException: 

4142 pass 

4143 try: 

4144 return retrieve_tpu_embedding_adadelta_parameters_eager_fallback( 

4145 table_id=table_id, table_name=table_name, num_shards=num_shards, 

4146 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

4147 except _core._SymbolicException: 

4148 pass # Add nodes to the TensorFlow graph. 

4149 # Add nodes to the TensorFlow graph. 

4150 num_shards = _execute.make_int(num_shards, "num_shards") 

4151 shard_id = _execute.make_int(shard_id, "shard_id") 

4152 if table_id is None: 

4153 table_id = -1 

4154 table_id = _execute.make_int(table_id, "table_id") 

4155 if table_name is None: 

4156 table_name = "" 

4157 table_name = _execute.make_str(table_name, "table_name") 

4158 if config is None: 

4159 config = "" 

4160 config = _execute.make_str(config, "config") 

4161 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4162 "RetrieveTPUEmbeddingAdadeltaParameters", num_shards=num_shards, 

4163 shard_id=shard_id, 

4164 table_id=table_id, 

4165 table_name=table_name, 

4166 config=config, name=name) 

4167 _result = _outputs[:] 

4168 if _execute.must_record_gradient(): 

4169 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

4170 _op.get_attr("table_name"), "num_shards", 

4171 _op._get_attr_int("num_shards"), "shard_id", 

4172 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

4173 _inputs_flat = _op.inputs 

4174 _execute.record_gradient( 

4175 "RetrieveTPUEmbeddingAdadeltaParameters", _inputs_flat, _attrs, _result) 

4176 _result = _RetrieveTPUEmbeddingAdadeltaParametersOutput._make(_result) 

4177 return _result 

4178 

4179RetrieveTPUEmbeddingAdadeltaParameters = tf_export("raw_ops.RetrieveTPUEmbeddingAdadeltaParameters")(_ops.to_raw_op(retrieve_tpu_embedding_adadelta_parameters)) 

4180 

4181 

4182def retrieve_tpu_embedding_adadelta_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

4183 num_shards = _execute.make_int(num_shards, "num_shards") 

4184 shard_id = _execute.make_int(shard_id, "shard_id") 

4185 if table_id is None: 

4186 table_id = -1 

4187 table_id = _execute.make_int(table_id, "table_id") 

4188 if table_name is None: 

4189 table_name = "" 

4190 table_name = _execute.make_str(table_name, "table_name") 

4191 if config is None: 

4192 config = "" 

4193 config = _execute.make_str(config, "config") 

4194 _inputs_flat = [] 

4195 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

4196 num_shards, "shard_id", shard_id, "config", config) 

4197 _result = _execute.execute(b"RetrieveTPUEmbeddingAdadeltaParameters", 3, 

4198 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4199 name=name) 

4200 if _execute.must_record_gradient(): 

4201 _execute.record_gradient( 

4202 "RetrieveTPUEmbeddingAdadeltaParameters", _inputs_flat, _attrs, _result) 

4203 _result = _RetrieveTPUEmbeddingAdadeltaParametersOutput._make(_result) 

4204 return _result 

4205 

4206_RetrieveTPUEmbeddingAdagradMomentumParametersOutput = collections.namedtuple( 

4207 "RetrieveTPUEmbeddingAdagradMomentumParameters", 

4208 ["parameters", "accumulators", "momenta"]) 

4209 

4210 

4211def retrieve_tpu_embedding_adagrad_momentum_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

4212 r"""Retrieve Adagrad Momentum embedding parameters. 

4213 

4214 An op that retrieves optimization parameters from embedding to host 

4215 memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up 

4216 the correct embedding table configuration. For example, this op is 

4217 used to retrieve updated parameters before saving a checkpoint. 

4218 

4219 Args: 

4220 num_shards: An `int`. 

4221 shard_id: An `int`. 

4222 table_id: An optional `int`. Defaults to `-1`. 

4223 table_name: An optional `string`. Defaults to `""`. 

4224 config: An optional `string`. Defaults to `""`. 

4225 name: A name for the operation (optional). 

4226 

4227 Returns: 

4228 A tuple of `Tensor` objects (parameters, accumulators, momenta). 

4229 

4230 parameters: A `Tensor` of type `float32`. 

4231 accumulators: A `Tensor` of type `float32`. 

4232 momenta: A `Tensor` of type `float32`. 

4233 """ 

4234 _ctx = _context._context or _context.context() 

4235 tld = _ctx._thread_local_data 

4236 if tld.is_eager: 

4237 try: 

4238 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4239 _ctx, "RetrieveTPUEmbeddingAdagradMomentumParameters", name, 

4240 "table_id", table_id, "table_name", table_name, "num_shards", 

4241 num_shards, "shard_id", shard_id, "config", config) 

4242 _result = _RetrieveTPUEmbeddingAdagradMomentumParametersOutput._make(_result) 

4243 return _result 

4244 except _core._NotOkStatusException as e: 

4245 _ops.raise_from_not_ok_status(e, name) 

4246 except _core._FallbackException: 

4247 pass 

4248 try: 

4249 return retrieve_tpu_embedding_adagrad_momentum_parameters_eager_fallback( 

4250 table_id=table_id, table_name=table_name, num_shards=num_shards, 

4251 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

4252 except _core._SymbolicException: 

4253 pass # Add nodes to the TensorFlow graph. 

4254 # Add nodes to the TensorFlow graph. 

4255 num_shards = _execute.make_int(num_shards, "num_shards") 

4256 shard_id = _execute.make_int(shard_id, "shard_id") 

4257 if table_id is None: 

4258 table_id = -1 

4259 table_id = _execute.make_int(table_id, "table_id") 

4260 if table_name is None: 

4261 table_name = "" 

4262 table_name = _execute.make_str(table_name, "table_name") 

4263 if config is None: 

4264 config = "" 

4265 config = _execute.make_str(config, "config") 

4266 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4267 "RetrieveTPUEmbeddingAdagradMomentumParameters", num_shards=num_shards, 

4268 shard_id=shard_id, 

4269 table_id=table_id, 

4270 table_name=table_name, 

4271 config=config, 

4272 name=name) 

4273 _result = _outputs[:] 

4274 if _execute.must_record_gradient(): 

4275 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

4276 _op.get_attr("table_name"), "num_shards", 

4277 _op._get_attr_int("num_shards"), "shard_id", 

4278 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

4279 _inputs_flat = _op.inputs 

4280 _execute.record_gradient( 

4281 "RetrieveTPUEmbeddingAdagradMomentumParameters", _inputs_flat, _attrs, _result) 

4282 _result = _RetrieveTPUEmbeddingAdagradMomentumParametersOutput._make(_result) 

4283 return _result 

4284 

4285RetrieveTPUEmbeddingAdagradMomentumParameters = tf_export("raw_ops.RetrieveTPUEmbeddingAdagradMomentumParameters")(_ops.to_raw_op(retrieve_tpu_embedding_adagrad_momentum_parameters)) 

4286 

4287 

4288def retrieve_tpu_embedding_adagrad_momentum_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

4289 num_shards = _execute.make_int(num_shards, "num_shards") 

4290 shard_id = _execute.make_int(shard_id, "shard_id") 

4291 if table_id is None: 

4292 table_id = -1 

4293 table_id = _execute.make_int(table_id, "table_id") 

4294 if table_name is None: 

4295 table_name = "" 

4296 table_name = _execute.make_str(table_name, "table_name") 

4297 if config is None: 

4298 config = "" 

4299 config = _execute.make_str(config, "config") 

4300 _inputs_flat = [] 

4301 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

4302 num_shards, "shard_id", shard_id, "config", config) 

4303 _result = _execute.execute(b"RetrieveTPUEmbeddingAdagradMomentumParameters", 

4304 3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4305 name=name) 

4306 if _execute.must_record_gradient(): 

4307 _execute.record_gradient( 

4308 "RetrieveTPUEmbeddingAdagradMomentumParameters", _inputs_flat, _attrs, _result) 

4309 _result = _RetrieveTPUEmbeddingAdagradMomentumParametersOutput._make(_result) 

4310 return _result 

4311 

4312_RetrieveTPUEmbeddingAdagradParametersOutput = collections.namedtuple( 

4313 "RetrieveTPUEmbeddingAdagradParameters", 

4314 ["parameters", "accumulators"]) 

4315 

4316 

4317def retrieve_tpu_embedding_adagrad_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

4318 r"""Retrieve Adagrad embedding parameters. 

4319 

4320 An op that retrieves optimization parameters from embedding to host 

4321 memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up 

4322 the correct embedding table configuration. For example, this op is 

4323 used to retrieve updated parameters before saving a checkpoint. 

4324 

4325 Args: 

4326 num_shards: An `int`. 

4327 shard_id: An `int`. 

4328 table_id: An optional `int`. Defaults to `-1`. 

4329 table_name: An optional `string`. Defaults to `""`. 

4330 config: An optional `string`. Defaults to `""`. 

4331 name: A name for the operation (optional). 

4332 

4333 Returns: 

4334 A tuple of `Tensor` objects (parameters, accumulators). 

4335 

4336 parameters: A `Tensor` of type `float32`. 

4337 accumulators: A `Tensor` of type `float32`. 

4338 """ 

4339 _ctx = _context._context or _context.context() 

4340 tld = _ctx._thread_local_data 

4341 if tld.is_eager: 

4342 try: 

4343 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4344 _ctx, "RetrieveTPUEmbeddingAdagradParameters", name, "table_id", 

4345 table_id, "table_name", table_name, "num_shards", num_shards, 

4346 "shard_id", shard_id, "config", config) 

4347 _result = _RetrieveTPUEmbeddingAdagradParametersOutput._make(_result) 

4348 return _result 

4349 except _core._NotOkStatusException as e: 

4350 _ops.raise_from_not_ok_status(e, name) 

4351 except _core._FallbackException: 

4352 pass 

4353 try: 

4354 return retrieve_tpu_embedding_adagrad_parameters_eager_fallback( 

4355 table_id=table_id, table_name=table_name, num_shards=num_shards, 

4356 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

4357 except _core._SymbolicException: 

4358 pass # Add nodes to the TensorFlow graph. 

4359 # Add nodes to the TensorFlow graph. 

4360 num_shards = _execute.make_int(num_shards, "num_shards") 

4361 shard_id = _execute.make_int(shard_id, "shard_id") 

4362 if table_id is None: 

4363 table_id = -1 

4364 table_id = _execute.make_int(table_id, "table_id") 

4365 if table_name is None: 

4366 table_name = "" 

4367 table_name = _execute.make_str(table_name, "table_name") 

4368 if config is None: 

4369 config = "" 

4370 config = _execute.make_str(config, "config") 

4371 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4372 "RetrieveTPUEmbeddingAdagradParameters", num_shards=num_shards, 

4373 shard_id=shard_id, 

4374 table_id=table_id, 

4375 table_name=table_name, 

4376 config=config, name=name) 

4377 _result = _outputs[:] 

4378 if _execute.must_record_gradient(): 

4379 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

4380 _op.get_attr("table_name"), "num_shards", 

4381 _op._get_attr_int("num_shards"), "shard_id", 

4382 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

4383 _inputs_flat = _op.inputs 

4384 _execute.record_gradient( 

4385 "RetrieveTPUEmbeddingAdagradParameters", _inputs_flat, _attrs, _result) 

4386 _result = _RetrieveTPUEmbeddingAdagradParametersOutput._make(_result) 

4387 return _result 

4388 

4389RetrieveTPUEmbeddingAdagradParameters = tf_export("raw_ops.RetrieveTPUEmbeddingAdagradParameters")(_ops.to_raw_op(retrieve_tpu_embedding_adagrad_parameters)) 

4390 

4391 

4392def retrieve_tpu_embedding_adagrad_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

4393 num_shards = _execute.make_int(num_shards, "num_shards") 

4394 shard_id = _execute.make_int(shard_id, "shard_id") 

4395 if table_id is None: 

4396 table_id = -1 

4397 table_id = _execute.make_int(table_id, "table_id") 

4398 if table_name is None: 

4399 table_name = "" 

4400 table_name = _execute.make_str(table_name, "table_name") 

4401 if config is None: 

4402 config = "" 

4403 config = _execute.make_str(config, "config") 

4404 _inputs_flat = [] 

4405 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

4406 num_shards, "shard_id", shard_id, "config", config) 

4407 _result = _execute.execute(b"RetrieveTPUEmbeddingAdagradParameters", 2, 

4408 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4409 name=name) 

4410 if _execute.must_record_gradient(): 

4411 _execute.record_gradient( 

4412 "RetrieveTPUEmbeddingAdagradParameters", _inputs_flat, _attrs, _result) 

4413 _result = _RetrieveTPUEmbeddingAdagradParametersOutput._make(_result) 

4414 return _result 

4415 

4416_RetrieveTPUEmbeddingCenteredRMSPropParametersOutput = collections.namedtuple( 

4417 "RetrieveTPUEmbeddingCenteredRMSPropParameters", 

4418 ["parameters", "ms", "mom", "mg"]) 

4419 

4420 

4421def retrieve_tpu_embedding_centered_rms_prop_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

4422 r"""Retrieve centered RMSProp embedding parameters. 

4423 

4424 An op that retrieves optimization parameters from embedding to host 

4425 memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up 

4426 the correct embedding table configuration. For example, this op is 

4427 used to retrieve updated parameters before saving a checkpoint. 

4428 

4429 Args: 

4430 num_shards: An `int`. 

4431 shard_id: An `int`. 

4432 table_id: An optional `int`. Defaults to `-1`. 

4433 table_name: An optional `string`. Defaults to `""`. 

4434 config: An optional `string`. Defaults to `""`. 

4435 name: A name for the operation (optional). 

4436 

4437 Returns: 

4438 A tuple of `Tensor` objects (parameters, ms, mom, mg). 

4439 

4440 parameters: A `Tensor` of type `float32`. 

4441 ms: A `Tensor` of type `float32`. 

4442 mom: A `Tensor` of type `float32`. 

4443 mg: A `Tensor` of type `float32`. 

4444 """ 

4445 _ctx = _context._context or _context.context() 

4446 tld = _ctx._thread_local_data 

4447 if tld.is_eager: 

4448 try: 

4449 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4450 _ctx, "RetrieveTPUEmbeddingCenteredRMSPropParameters", name, 

4451 "table_id", table_id, "table_name", table_name, "num_shards", 

4452 num_shards, "shard_id", shard_id, "config", config) 

4453 _result = _RetrieveTPUEmbeddingCenteredRMSPropParametersOutput._make(_result) 

4454 return _result 

4455 except _core._NotOkStatusException as e: 

4456 _ops.raise_from_not_ok_status(e, name) 

4457 except _core._FallbackException: 

4458 pass 

4459 try: 

4460 return retrieve_tpu_embedding_centered_rms_prop_parameters_eager_fallback( 

4461 table_id=table_id, table_name=table_name, num_shards=num_shards, 

4462 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

4463 except _core._SymbolicException: 

4464 pass # Add nodes to the TensorFlow graph. 

4465 # Add nodes to the TensorFlow graph. 

4466 num_shards = _execute.make_int(num_shards, "num_shards") 

4467 shard_id = _execute.make_int(shard_id, "shard_id") 

4468 if table_id is None: 

4469 table_id = -1 

4470 table_id = _execute.make_int(table_id, "table_id") 

4471 if table_name is None: 

4472 table_name = "" 

4473 table_name = _execute.make_str(table_name, "table_name") 

4474 if config is None: 

4475 config = "" 

4476 config = _execute.make_str(config, "config") 

4477 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4478 "RetrieveTPUEmbeddingCenteredRMSPropParameters", num_shards=num_shards, 

4479 shard_id=shard_id, 

4480 table_id=table_id, 

4481 table_name=table_name, 

4482 config=config, 

4483 name=name) 

4484 _result = _outputs[:] 

4485 if _execute.must_record_gradient(): 

4486 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

4487 _op.get_attr("table_name"), "num_shards", 

4488 _op._get_attr_int("num_shards"), "shard_id", 

4489 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

4490 _inputs_flat = _op.inputs 

4491 _execute.record_gradient( 

4492 "RetrieveTPUEmbeddingCenteredRMSPropParameters", _inputs_flat, _attrs, _result) 

4493 _result = _RetrieveTPUEmbeddingCenteredRMSPropParametersOutput._make(_result) 

4494 return _result 

4495 

4496RetrieveTPUEmbeddingCenteredRMSPropParameters = tf_export("raw_ops.RetrieveTPUEmbeddingCenteredRMSPropParameters")(_ops.to_raw_op(retrieve_tpu_embedding_centered_rms_prop_parameters)) 

4497 

4498 

4499def retrieve_tpu_embedding_centered_rms_prop_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

4500 num_shards = _execute.make_int(num_shards, "num_shards") 

4501 shard_id = _execute.make_int(shard_id, "shard_id") 

4502 if table_id is None: 

4503 table_id = -1 

4504 table_id = _execute.make_int(table_id, "table_id") 

4505 if table_name is None: 

4506 table_name = "" 

4507 table_name = _execute.make_str(table_name, "table_name") 

4508 if config is None: 

4509 config = "" 

4510 config = _execute.make_str(config, "config") 

4511 _inputs_flat = [] 

4512 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

4513 num_shards, "shard_id", shard_id, "config", config) 

4514 _result = _execute.execute(b"RetrieveTPUEmbeddingCenteredRMSPropParameters", 

4515 4, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4516 name=name) 

4517 if _execute.must_record_gradient(): 

4518 _execute.record_gradient( 

4519 "RetrieveTPUEmbeddingCenteredRMSPropParameters", _inputs_flat, _attrs, _result) 

4520 _result = _RetrieveTPUEmbeddingCenteredRMSPropParametersOutput._make(_result) 

4521 return _result 

4522 

4523_RetrieveTPUEmbeddingFTRLParametersOutput = collections.namedtuple( 

4524 "RetrieveTPUEmbeddingFTRLParameters", 

4525 ["parameters", "accumulators", "linears"]) 

4526 

4527 

4528def retrieve_tpu_embedding_ftrl_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

4529 r"""Retrieve FTRL embedding parameters. 

4530 

4531 An op that retrieves optimization parameters from embedding to host 

4532 memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up 

4533 the correct embedding table configuration. For example, this op is 

4534 used to retrieve updated parameters before saving a checkpoint. 

4535 

4536 Args: 

4537 num_shards: An `int`. 

4538 shard_id: An `int`. 

4539 table_id: An optional `int`. Defaults to `-1`. 

4540 table_name: An optional `string`. Defaults to `""`. 

4541 config: An optional `string`. Defaults to `""`. 

4542 name: A name for the operation (optional). 

4543 

4544 Returns: 

4545 A tuple of `Tensor` objects (parameters, accumulators, linears). 

4546 

4547 parameters: A `Tensor` of type `float32`. 

4548 accumulators: A `Tensor` of type `float32`. 

4549 linears: A `Tensor` of type `float32`. 

4550 """ 

4551 _ctx = _context._context or _context.context() 

4552 tld = _ctx._thread_local_data 

4553 if tld.is_eager: 

4554 try: 

4555 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4556 _ctx, "RetrieveTPUEmbeddingFTRLParameters", name, "table_id", 

4557 table_id, "table_name", table_name, "num_shards", num_shards, 

4558 "shard_id", shard_id, "config", config) 

4559 _result = _RetrieveTPUEmbeddingFTRLParametersOutput._make(_result) 

4560 return _result 

4561 except _core._NotOkStatusException as e: 

4562 _ops.raise_from_not_ok_status(e, name) 

4563 except _core._FallbackException: 

4564 pass 

4565 try: 

4566 return retrieve_tpu_embedding_ftrl_parameters_eager_fallback( 

4567 table_id=table_id, table_name=table_name, num_shards=num_shards, 

4568 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

4569 except _core._SymbolicException: 

4570 pass # Add nodes to the TensorFlow graph. 

4571 # Add nodes to the TensorFlow graph. 

4572 num_shards = _execute.make_int(num_shards, "num_shards") 

4573 shard_id = _execute.make_int(shard_id, "shard_id") 

4574 if table_id is None: 

4575 table_id = -1 

4576 table_id = _execute.make_int(table_id, "table_id") 

4577 if table_name is None: 

4578 table_name = "" 

4579 table_name = _execute.make_str(table_name, "table_name") 

4580 if config is None: 

4581 config = "" 

4582 config = _execute.make_str(config, "config") 

4583 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4584 "RetrieveTPUEmbeddingFTRLParameters", num_shards=num_shards, 

4585 shard_id=shard_id, 

4586 table_id=table_id, 

4587 table_name=table_name, 

4588 config=config, name=name) 

4589 _result = _outputs[:] 

4590 if _execute.must_record_gradient(): 

4591 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

4592 _op.get_attr("table_name"), "num_shards", 

4593 _op._get_attr_int("num_shards"), "shard_id", 

4594 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

4595 _inputs_flat = _op.inputs 

4596 _execute.record_gradient( 

4597 "RetrieveTPUEmbeddingFTRLParameters", _inputs_flat, _attrs, _result) 

4598 _result = _RetrieveTPUEmbeddingFTRLParametersOutput._make(_result) 

4599 return _result 

4600 

4601RetrieveTPUEmbeddingFTRLParameters = tf_export("raw_ops.RetrieveTPUEmbeddingFTRLParameters")(_ops.to_raw_op(retrieve_tpu_embedding_ftrl_parameters)) 

4602 

4603 

4604def retrieve_tpu_embedding_ftrl_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

4605 num_shards = _execute.make_int(num_shards, "num_shards") 

4606 shard_id = _execute.make_int(shard_id, "shard_id") 

4607 if table_id is None: 

4608 table_id = -1 

4609 table_id = _execute.make_int(table_id, "table_id") 

4610 if table_name is None: 

4611 table_name = "" 

4612 table_name = _execute.make_str(table_name, "table_name") 

4613 if config is None: 

4614 config = "" 

4615 config = _execute.make_str(config, "config") 

4616 _inputs_flat = [] 

4617 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

4618 num_shards, "shard_id", shard_id, "config", config) 

4619 _result = _execute.execute(b"RetrieveTPUEmbeddingFTRLParameters", 3, 

4620 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4621 name=name) 

4622 if _execute.must_record_gradient(): 

4623 _execute.record_gradient( 

4624 "RetrieveTPUEmbeddingFTRLParameters", _inputs_flat, _attrs, _result) 

4625 _result = _RetrieveTPUEmbeddingFTRLParametersOutput._make(_result) 

4626 return _result 

4627 

4628_RetrieveTPUEmbeddingFrequencyEstimatorParametersOutput = collections.namedtuple( 

4629 "RetrieveTPUEmbeddingFrequencyEstimatorParameters", 

4630 ["parameters", "last_hit_step"]) 

4631 

4632 

4633def retrieve_tpu_embedding_frequency_estimator_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

4634 r"""Retrieve frequency estimator embedding parameters. 

4635 

4636 An op that retrieves optimization parameters from embedding to host 

4637 memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up 

4638 the correct embedding table configuration. For example, this op is 

4639 used to retrieve updated parameters before saving a checkpoint. 

4640 

4641 Args: 

4642 num_shards: An `int`. 

4643 shard_id: An `int`. 

4644 table_id: An optional `int`. Defaults to `-1`. 

4645 table_name: An optional `string`. Defaults to `""`. 

4646 config: An optional `string`. Defaults to `""`. 

4647 name: A name for the operation (optional). 

4648 

4649 Returns: 

4650 A tuple of `Tensor` objects (parameters, last_hit_step). 

4651 

4652 parameters: A `Tensor` of type `float32`. 

4653 last_hit_step: A `Tensor` of type `float32`. 

4654 """ 

4655 _ctx = _context._context or _context.context() 

4656 tld = _ctx._thread_local_data 

4657 if tld.is_eager: 

4658 try: 

4659 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4660 _ctx, "RetrieveTPUEmbeddingFrequencyEstimatorParameters", name, 

4661 "table_id", table_id, "table_name", table_name, "num_shards", 

4662 num_shards, "shard_id", shard_id, "config", config) 

4663 _result = _RetrieveTPUEmbeddingFrequencyEstimatorParametersOutput._make(_result) 

4664 return _result 

4665 except _core._NotOkStatusException as e: 

4666 _ops.raise_from_not_ok_status(e, name) 

4667 except _core._FallbackException: 

4668 pass 

4669 try: 

4670 return retrieve_tpu_embedding_frequency_estimator_parameters_eager_fallback( 

4671 table_id=table_id, table_name=table_name, num_shards=num_shards, 

4672 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

4673 except _core._SymbolicException: 

4674 pass # Add nodes to the TensorFlow graph. 

4675 # Add nodes to the TensorFlow graph. 

4676 num_shards = _execute.make_int(num_shards, "num_shards") 

4677 shard_id = _execute.make_int(shard_id, "shard_id") 

4678 if table_id is None: 

4679 table_id = -1 

4680 table_id = _execute.make_int(table_id, "table_id") 

4681 if table_name is None: 

4682 table_name = "" 

4683 table_name = _execute.make_str(table_name, "table_name") 

4684 if config is None: 

4685 config = "" 

4686 config = _execute.make_str(config, "config") 

4687 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4688 "RetrieveTPUEmbeddingFrequencyEstimatorParameters", num_shards=num_shards, 

4689 shard_id=shard_id, 

4690 table_id=table_id, 

4691 table_name=table_name, 

4692 config=config, 

4693 name=name) 

4694 _result = _outputs[:] 

4695 if _execute.must_record_gradient(): 

4696 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

4697 _op.get_attr("table_name"), "num_shards", 

4698 _op._get_attr_int("num_shards"), "shard_id", 

4699 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

4700 _inputs_flat = _op.inputs 

4701 _execute.record_gradient( 

4702 "RetrieveTPUEmbeddingFrequencyEstimatorParameters", _inputs_flat, _attrs, _result) 

4703 _result = _RetrieveTPUEmbeddingFrequencyEstimatorParametersOutput._make(_result) 

4704 return _result 

4705 

4706RetrieveTPUEmbeddingFrequencyEstimatorParameters = tf_export("raw_ops.RetrieveTPUEmbeddingFrequencyEstimatorParameters")(_ops.to_raw_op(retrieve_tpu_embedding_frequency_estimator_parameters)) 

4707 

4708 

4709def retrieve_tpu_embedding_frequency_estimator_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

4710 num_shards = _execute.make_int(num_shards, "num_shards") 

4711 shard_id = _execute.make_int(shard_id, "shard_id") 

4712 if table_id is None: 

4713 table_id = -1 

4714 table_id = _execute.make_int(table_id, "table_id") 

4715 if table_name is None: 

4716 table_name = "" 

4717 table_name = _execute.make_str(table_name, "table_name") 

4718 if config is None: 

4719 config = "" 

4720 config = _execute.make_str(config, "config") 

4721 _inputs_flat = [] 

4722 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

4723 num_shards, "shard_id", shard_id, "config", config) 

4724 _result = _execute.execute(b"RetrieveTPUEmbeddingFrequencyEstimatorParameters", 

4725 2, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4726 name=name) 

4727 if _execute.must_record_gradient(): 

4728 _execute.record_gradient( 

4729 "RetrieveTPUEmbeddingFrequencyEstimatorParameters", _inputs_flat, _attrs, _result) 

4730 _result = _RetrieveTPUEmbeddingFrequencyEstimatorParametersOutput._make(_result) 

4731 return _result 

4732 

4733_RetrieveTPUEmbeddingMDLAdagradLightParametersOutput = collections.namedtuple( 

4734 "RetrieveTPUEmbeddingMDLAdagradLightParameters", 

4735 ["parameters", "accumulators", "weights", "benefits"]) 

4736 

4737 

4738def retrieve_tpu_embedding_mdl_adagrad_light_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

4739 r"""Retrieve MDL Adagrad Light embedding parameters. 

4740 

4741 An op that retrieves optimization parameters from embedding to host 

4742 memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up 

4743 the correct embedding table configuration. For example, this op is 

4744 used to retrieve updated parameters before saving a checkpoint. 

4745 

4746 Args: 

4747 num_shards: An `int`. 

4748 shard_id: An `int`. 

4749 table_id: An optional `int`. Defaults to `-1`. 

4750 table_name: An optional `string`. Defaults to `""`. 

4751 config: An optional `string`. Defaults to `""`. 

4752 name: A name for the operation (optional). 

4753 

4754 Returns: 

4755 A tuple of `Tensor` objects (parameters, accumulators, weights, benefits). 

4756 

4757 parameters: A `Tensor` of type `float32`. 

4758 accumulators: A `Tensor` of type `float32`. 

4759 weights: A `Tensor` of type `float32`. 

4760 benefits: A `Tensor` of type `float32`. 

4761 """ 

4762 _ctx = _context._context or _context.context() 

4763 tld = _ctx._thread_local_data 

4764 if tld.is_eager: 

4765 try: 

4766 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4767 _ctx, "RetrieveTPUEmbeddingMDLAdagradLightParameters", name, 

4768 "table_id", table_id, "table_name", table_name, "num_shards", 

4769 num_shards, "shard_id", shard_id, "config", config) 

4770 _result = _RetrieveTPUEmbeddingMDLAdagradLightParametersOutput._make(_result) 

4771 return _result 

4772 except _core._NotOkStatusException as e: 

4773 _ops.raise_from_not_ok_status(e, name) 

4774 except _core._FallbackException: 

4775 pass 

4776 try: 

4777 return retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager_fallback( 

4778 table_id=table_id, table_name=table_name, num_shards=num_shards, 

4779 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

4780 except _core._SymbolicException: 

4781 pass # Add nodes to the TensorFlow graph. 

4782 # Add nodes to the TensorFlow graph. 

4783 num_shards = _execute.make_int(num_shards, "num_shards") 

4784 shard_id = _execute.make_int(shard_id, "shard_id") 

4785 if table_id is None: 

4786 table_id = -1 

4787 table_id = _execute.make_int(table_id, "table_id") 

4788 if table_name is None: 

4789 table_name = "" 

4790 table_name = _execute.make_str(table_name, "table_name") 

4791 if config is None: 

4792 config = "" 

4793 config = _execute.make_str(config, "config") 

4794 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4795 "RetrieveTPUEmbeddingMDLAdagradLightParameters", num_shards=num_shards, 

4796 shard_id=shard_id, 

4797 table_id=table_id, 

4798 table_name=table_name, 

4799 config=config, 

4800 name=name) 

4801 _result = _outputs[:] 

4802 if _execute.must_record_gradient(): 

4803 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

4804 _op.get_attr("table_name"), "num_shards", 

4805 _op._get_attr_int("num_shards"), "shard_id", 

4806 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

4807 _inputs_flat = _op.inputs 

4808 _execute.record_gradient( 

4809 "RetrieveTPUEmbeddingMDLAdagradLightParameters", _inputs_flat, _attrs, _result) 

4810 _result = _RetrieveTPUEmbeddingMDLAdagradLightParametersOutput._make(_result) 

4811 return _result 

4812 

4813RetrieveTPUEmbeddingMDLAdagradLightParameters = tf_export("raw_ops.RetrieveTPUEmbeddingMDLAdagradLightParameters")(_ops.to_raw_op(retrieve_tpu_embedding_mdl_adagrad_light_parameters)) 

4814 

4815 

4816def retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

4817 num_shards = _execute.make_int(num_shards, "num_shards") 

4818 shard_id = _execute.make_int(shard_id, "shard_id") 

4819 if table_id is None: 

4820 table_id = -1 

4821 table_id = _execute.make_int(table_id, "table_id") 

4822 if table_name is None: 

4823 table_name = "" 

4824 table_name = _execute.make_str(table_name, "table_name") 

4825 if config is None: 

4826 config = "" 

4827 config = _execute.make_str(config, "config") 

4828 _inputs_flat = [] 

4829 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

4830 num_shards, "shard_id", shard_id, "config", config) 

4831 _result = _execute.execute(b"RetrieveTPUEmbeddingMDLAdagradLightParameters", 

4832 4, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4833 name=name) 

4834 if _execute.must_record_gradient(): 

4835 _execute.record_gradient( 

4836 "RetrieveTPUEmbeddingMDLAdagradLightParameters", _inputs_flat, _attrs, _result) 

4837 _result = _RetrieveTPUEmbeddingMDLAdagradLightParametersOutput._make(_result) 

4838 return _result 

4839 

4840_RetrieveTPUEmbeddingMomentumParametersOutput = collections.namedtuple( 

4841 "RetrieveTPUEmbeddingMomentumParameters", 

4842 ["parameters", "momenta"]) 

4843 

4844 

4845def retrieve_tpu_embedding_momentum_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

4846 r"""Retrieve Momentum embedding parameters. 

4847 

4848 An op that retrieves optimization parameters from embedding to host 

4849 memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up 

4850 the correct embedding table configuration. For example, this op is 

4851 used to retrieve updated parameters before saving a checkpoint. 

4852 

4853 Args: 

4854 num_shards: An `int`. 

4855 shard_id: An `int`. 

4856 table_id: An optional `int`. Defaults to `-1`. 

4857 table_name: An optional `string`. Defaults to `""`. 

4858 config: An optional `string`. Defaults to `""`. 

4859 name: A name for the operation (optional). 

4860 

4861 Returns: 

4862 A tuple of `Tensor` objects (parameters, momenta). 

4863 

4864 parameters: A `Tensor` of type `float32`. 

4865 momenta: A `Tensor` of type `float32`. 

4866 """ 

4867 _ctx = _context._context or _context.context() 

4868 tld = _ctx._thread_local_data 

4869 if tld.is_eager: 

4870 try: 

4871 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4872 _ctx, "RetrieveTPUEmbeddingMomentumParameters", name, "table_id", 

4873 table_id, "table_name", table_name, "num_shards", num_shards, 

4874 "shard_id", shard_id, "config", config) 

4875 _result = _RetrieveTPUEmbeddingMomentumParametersOutput._make(_result) 

4876 return _result 

4877 except _core._NotOkStatusException as e: 

4878 _ops.raise_from_not_ok_status(e, name) 

4879 except _core._FallbackException: 

4880 pass 

4881 try: 

4882 return retrieve_tpu_embedding_momentum_parameters_eager_fallback( 

4883 table_id=table_id, table_name=table_name, num_shards=num_shards, 

4884 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

4885 except _core._SymbolicException: 

4886 pass # Add nodes to the TensorFlow graph. 

4887 # Add nodes to the TensorFlow graph. 

4888 num_shards = _execute.make_int(num_shards, "num_shards") 

4889 shard_id = _execute.make_int(shard_id, "shard_id") 

4890 if table_id is None: 

4891 table_id = -1 

4892 table_id = _execute.make_int(table_id, "table_id") 

4893 if table_name is None: 

4894 table_name = "" 

4895 table_name = _execute.make_str(table_name, "table_name") 

4896 if config is None: 

4897 config = "" 

4898 config = _execute.make_str(config, "config") 

4899 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4900 "RetrieveTPUEmbeddingMomentumParameters", num_shards=num_shards, 

4901 shard_id=shard_id, 

4902 table_id=table_id, 

4903 table_name=table_name, 

4904 config=config, name=name) 

4905 _result = _outputs[:] 

4906 if _execute.must_record_gradient(): 

4907 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

4908 _op.get_attr("table_name"), "num_shards", 

4909 _op._get_attr_int("num_shards"), "shard_id", 

4910 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

4911 _inputs_flat = _op.inputs 

4912 _execute.record_gradient( 

4913 "RetrieveTPUEmbeddingMomentumParameters", _inputs_flat, _attrs, _result) 

4914 _result = _RetrieveTPUEmbeddingMomentumParametersOutput._make(_result) 

4915 return _result 

4916 

4917RetrieveTPUEmbeddingMomentumParameters = tf_export("raw_ops.RetrieveTPUEmbeddingMomentumParameters")(_ops.to_raw_op(retrieve_tpu_embedding_momentum_parameters)) 

4918 

4919 

4920def retrieve_tpu_embedding_momentum_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

4921 num_shards = _execute.make_int(num_shards, "num_shards") 

4922 shard_id = _execute.make_int(shard_id, "shard_id") 

4923 if table_id is None: 

4924 table_id = -1 

4925 table_id = _execute.make_int(table_id, "table_id") 

4926 if table_name is None: 

4927 table_name = "" 

4928 table_name = _execute.make_str(table_name, "table_name") 

4929 if config is None: 

4930 config = "" 

4931 config = _execute.make_str(config, "config") 

4932 _inputs_flat = [] 

4933 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

4934 num_shards, "shard_id", shard_id, "config", config) 

4935 _result = _execute.execute(b"RetrieveTPUEmbeddingMomentumParameters", 2, 

4936 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

4937 name=name) 

4938 if _execute.must_record_gradient(): 

4939 _execute.record_gradient( 

4940 "RetrieveTPUEmbeddingMomentumParameters", _inputs_flat, _attrs, _result) 

4941 _result = _RetrieveTPUEmbeddingMomentumParametersOutput._make(_result) 

4942 return _result 

4943 

4944_RetrieveTPUEmbeddingProximalAdagradParametersOutput = collections.namedtuple( 

4945 "RetrieveTPUEmbeddingProximalAdagradParameters", 

4946 ["parameters", "accumulators"]) 

4947 

4948 

4949def retrieve_tpu_embedding_proximal_adagrad_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

4950 r"""Retrieve proximal Adagrad embedding parameters. 

4951 

4952 An op that retrieves optimization parameters from embedding to host 

4953 memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up 

4954 the correct embedding table configuration. For example, this op is 

4955 used to retrieve updated parameters before saving a checkpoint. 

4956 

4957 Args: 

4958 num_shards: An `int`. 

4959 shard_id: An `int`. 

4960 table_id: An optional `int`. Defaults to `-1`. 

4961 table_name: An optional `string`. Defaults to `""`. 

4962 config: An optional `string`. Defaults to `""`. 

4963 name: A name for the operation (optional). 

4964 

4965 Returns: 

4966 A tuple of `Tensor` objects (parameters, accumulators). 

4967 

4968 parameters: A `Tensor` of type `float32`. 

4969 accumulators: A `Tensor` of type `float32`. 

4970 """ 

4971 _ctx = _context._context or _context.context() 

4972 tld = _ctx._thread_local_data 

4973 if tld.is_eager: 

4974 try: 

4975 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4976 _ctx, "RetrieveTPUEmbeddingProximalAdagradParameters", name, 

4977 "table_id", table_id, "table_name", table_name, "num_shards", 

4978 num_shards, "shard_id", shard_id, "config", config) 

4979 _result = _RetrieveTPUEmbeddingProximalAdagradParametersOutput._make(_result) 

4980 return _result 

4981 except _core._NotOkStatusException as e: 

4982 _ops.raise_from_not_ok_status(e, name) 

4983 except _core._FallbackException: 

4984 pass 

4985 try: 

4986 return retrieve_tpu_embedding_proximal_adagrad_parameters_eager_fallback( 

4987 table_id=table_id, table_name=table_name, num_shards=num_shards, 

4988 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

4989 except _core._SymbolicException: 

4990 pass # Add nodes to the TensorFlow graph. 

4991 # Add nodes to the TensorFlow graph. 

4992 num_shards = _execute.make_int(num_shards, "num_shards") 

4993 shard_id = _execute.make_int(shard_id, "shard_id") 

4994 if table_id is None: 

4995 table_id = -1 

4996 table_id = _execute.make_int(table_id, "table_id") 

4997 if table_name is None: 

4998 table_name = "" 

4999 table_name = _execute.make_str(table_name, "table_name") 

5000 if config is None: 

5001 config = "" 

5002 config = _execute.make_str(config, "config") 

5003 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5004 "RetrieveTPUEmbeddingProximalAdagradParameters", num_shards=num_shards, 

5005 shard_id=shard_id, 

5006 table_id=table_id, 

5007 table_name=table_name, 

5008 config=config, 

5009 name=name) 

5010 _result = _outputs[:] 

5011 if _execute.must_record_gradient(): 

5012 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

5013 _op.get_attr("table_name"), "num_shards", 

5014 _op._get_attr_int("num_shards"), "shard_id", 

5015 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

5016 _inputs_flat = _op.inputs 

5017 _execute.record_gradient( 

5018 "RetrieveTPUEmbeddingProximalAdagradParameters", _inputs_flat, _attrs, _result) 

5019 _result = _RetrieveTPUEmbeddingProximalAdagradParametersOutput._make(_result) 

5020 return _result 

5021 

5022RetrieveTPUEmbeddingProximalAdagradParameters = tf_export("raw_ops.RetrieveTPUEmbeddingProximalAdagradParameters")(_ops.to_raw_op(retrieve_tpu_embedding_proximal_adagrad_parameters)) 

5023 

5024 

5025def retrieve_tpu_embedding_proximal_adagrad_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

5026 num_shards = _execute.make_int(num_shards, "num_shards") 

5027 shard_id = _execute.make_int(shard_id, "shard_id") 

5028 if table_id is None: 

5029 table_id = -1 

5030 table_id = _execute.make_int(table_id, "table_id") 

5031 if table_name is None: 

5032 table_name = "" 

5033 table_name = _execute.make_str(table_name, "table_name") 

5034 if config is None: 

5035 config = "" 

5036 config = _execute.make_str(config, "config") 

5037 _inputs_flat = [] 

5038 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

5039 num_shards, "shard_id", shard_id, "config", config) 

5040 _result = _execute.execute(b"RetrieveTPUEmbeddingProximalAdagradParameters", 

5041 2, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

5042 name=name) 

5043 if _execute.must_record_gradient(): 

5044 _execute.record_gradient( 

5045 "RetrieveTPUEmbeddingProximalAdagradParameters", _inputs_flat, _attrs, _result) 

5046 _result = _RetrieveTPUEmbeddingProximalAdagradParametersOutput._make(_result) 

5047 return _result 

5048 

5049_RetrieveTPUEmbeddingProximalYogiParametersOutput = collections.namedtuple( 

5050 "RetrieveTPUEmbeddingProximalYogiParameters", 

5051 ["parameters", "v", "m"]) 

5052 

5053 

5054def retrieve_tpu_embedding_proximal_yogi_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

5055 r"""TODO: add doc. 

5056 

5057 Args: 

5058 num_shards: An `int`. 

5059 shard_id: An `int`. 

5060 table_id: An optional `int`. Defaults to `-1`. 

5061 table_name: An optional `string`. Defaults to `""`. 

5062 config: An optional `string`. Defaults to `""`. 

5063 name: A name for the operation (optional). 

5064 

5065 Returns: 

5066 A tuple of `Tensor` objects (parameters, v, m). 

5067 

5068 parameters: A `Tensor` of type `float32`. 

5069 v: A `Tensor` of type `float32`. 

5070 m: A `Tensor` of type `float32`. 

5071 """ 

5072 _ctx = _context._context or _context.context() 

5073 tld = _ctx._thread_local_data 

5074 if tld.is_eager: 

5075 try: 

5076 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5077 _ctx, "RetrieveTPUEmbeddingProximalYogiParameters", name, "table_id", 

5078 table_id, "table_name", table_name, "num_shards", num_shards, 

5079 "shard_id", shard_id, "config", config) 

5080 _result = _RetrieveTPUEmbeddingProximalYogiParametersOutput._make(_result) 

5081 return _result 

5082 except _core._NotOkStatusException as e: 

5083 _ops.raise_from_not_ok_status(e, name) 

5084 except _core._FallbackException: 

5085 pass 

5086 try: 

5087 return retrieve_tpu_embedding_proximal_yogi_parameters_eager_fallback( 

5088 table_id=table_id, table_name=table_name, num_shards=num_shards, 

5089 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

5090 except _core._SymbolicException: 

5091 pass # Add nodes to the TensorFlow graph. 

5092 # Add nodes to the TensorFlow graph. 

5093 num_shards = _execute.make_int(num_shards, "num_shards") 

5094 shard_id = _execute.make_int(shard_id, "shard_id") 

5095 if table_id is None: 

5096 table_id = -1 

5097 table_id = _execute.make_int(table_id, "table_id") 

5098 if table_name is None: 

5099 table_name = "" 

5100 table_name = _execute.make_str(table_name, "table_name") 

5101 if config is None: 

5102 config = "" 

5103 config = _execute.make_str(config, "config") 

5104 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5105 "RetrieveTPUEmbeddingProximalYogiParameters", num_shards=num_shards, 

5106 shard_id=shard_id, 

5107 table_id=table_id, 

5108 table_name=table_name, 

5109 config=config, 

5110 name=name) 

5111 _result = _outputs[:] 

5112 if _execute.must_record_gradient(): 

5113 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

5114 _op.get_attr("table_name"), "num_shards", 

5115 _op._get_attr_int("num_shards"), "shard_id", 

5116 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

5117 _inputs_flat = _op.inputs 

5118 _execute.record_gradient( 

5119 "RetrieveTPUEmbeddingProximalYogiParameters", _inputs_flat, _attrs, _result) 

5120 _result = _RetrieveTPUEmbeddingProximalYogiParametersOutput._make(_result) 

5121 return _result 

5122 

5123RetrieveTPUEmbeddingProximalYogiParameters = tf_export("raw_ops.RetrieveTPUEmbeddingProximalYogiParameters")(_ops.to_raw_op(retrieve_tpu_embedding_proximal_yogi_parameters)) 

5124 

5125 

5126def retrieve_tpu_embedding_proximal_yogi_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

5127 num_shards = _execute.make_int(num_shards, "num_shards") 

5128 shard_id = _execute.make_int(shard_id, "shard_id") 

5129 if table_id is None: 

5130 table_id = -1 

5131 table_id = _execute.make_int(table_id, "table_id") 

5132 if table_name is None: 

5133 table_name = "" 

5134 table_name = _execute.make_str(table_name, "table_name") 

5135 if config is None: 

5136 config = "" 

5137 config = _execute.make_str(config, "config") 

5138 _inputs_flat = [] 

5139 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

5140 num_shards, "shard_id", shard_id, "config", config) 

5141 _result = _execute.execute(b"RetrieveTPUEmbeddingProximalYogiParameters", 3, 

5142 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

5143 name=name) 

5144 if _execute.must_record_gradient(): 

5145 _execute.record_gradient( 

5146 "RetrieveTPUEmbeddingProximalYogiParameters", _inputs_flat, _attrs, _result) 

5147 _result = _RetrieveTPUEmbeddingProximalYogiParametersOutput._make(_result) 

5148 return _result 

5149 

5150_RetrieveTPUEmbeddingRMSPropParametersOutput = collections.namedtuple( 

5151 "RetrieveTPUEmbeddingRMSPropParameters", 

5152 ["parameters", "ms", "mom"]) 

5153 

5154 

5155def retrieve_tpu_embedding_rms_prop_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

5156 r"""Retrieve RMSProp embedding parameters. 

5157 

5158 An op that retrieves optimization parameters from embedding to host 

5159 memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up 

5160 the correct embedding table configuration. For example, this op is 

5161 used to retrieve updated parameters before saving a checkpoint. 

5162 

5163 Args: 

5164 num_shards: An `int`. 

5165 shard_id: An `int`. 

5166 table_id: An optional `int`. Defaults to `-1`. 

5167 table_name: An optional `string`. Defaults to `""`. 

5168 config: An optional `string`. Defaults to `""`. 

5169 name: A name for the operation (optional). 

5170 

5171 Returns: 

5172 A tuple of `Tensor` objects (parameters, ms, mom). 

5173 

5174 parameters: A `Tensor` of type `float32`. 

5175 ms: A `Tensor` of type `float32`. 

5176 mom: A `Tensor` of type `float32`. 

5177 """ 

5178 _ctx = _context._context or _context.context() 

5179 tld = _ctx._thread_local_data 

5180 if tld.is_eager: 

5181 try: 

5182 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5183 _ctx, "RetrieveTPUEmbeddingRMSPropParameters", name, "table_id", 

5184 table_id, "table_name", table_name, "num_shards", num_shards, 

5185 "shard_id", shard_id, "config", config) 

5186 _result = _RetrieveTPUEmbeddingRMSPropParametersOutput._make(_result) 

5187 return _result 

5188 except _core._NotOkStatusException as e: 

5189 _ops.raise_from_not_ok_status(e, name) 

5190 except _core._FallbackException: 

5191 pass 

5192 try: 

5193 return retrieve_tpu_embedding_rms_prop_parameters_eager_fallback( 

5194 table_id=table_id, table_name=table_name, num_shards=num_shards, 

5195 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

5196 except _core._SymbolicException: 

5197 pass # Add nodes to the TensorFlow graph. 

5198 # Add nodes to the TensorFlow graph. 

5199 num_shards = _execute.make_int(num_shards, "num_shards") 

5200 shard_id = _execute.make_int(shard_id, "shard_id") 

5201 if table_id is None: 

5202 table_id = -1 

5203 table_id = _execute.make_int(table_id, "table_id") 

5204 if table_name is None: 

5205 table_name = "" 

5206 table_name = _execute.make_str(table_name, "table_name") 

5207 if config is None: 

5208 config = "" 

5209 config = _execute.make_str(config, "config") 

5210 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5211 "RetrieveTPUEmbeddingRMSPropParameters", num_shards=num_shards, 

5212 shard_id=shard_id, 

5213 table_id=table_id, 

5214 table_name=table_name, 

5215 config=config, name=name) 

5216 _result = _outputs[:] 

5217 if _execute.must_record_gradient(): 

5218 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

5219 _op.get_attr("table_name"), "num_shards", 

5220 _op._get_attr_int("num_shards"), "shard_id", 

5221 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

5222 _inputs_flat = _op.inputs 

5223 _execute.record_gradient( 

5224 "RetrieveTPUEmbeddingRMSPropParameters", _inputs_flat, _attrs, _result) 

5225 _result = _RetrieveTPUEmbeddingRMSPropParametersOutput._make(_result) 

5226 return _result 

5227 

5228RetrieveTPUEmbeddingRMSPropParameters = tf_export("raw_ops.RetrieveTPUEmbeddingRMSPropParameters")(_ops.to_raw_op(retrieve_tpu_embedding_rms_prop_parameters)) 

5229 

5230 

5231def retrieve_tpu_embedding_rms_prop_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

5232 num_shards = _execute.make_int(num_shards, "num_shards") 

5233 shard_id = _execute.make_int(shard_id, "shard_id") 

5234 if table_id is None: 

5235 table_id = -1 

5236 table_id = _execute.make_int(table_id, "table_id") 

5237 if table_name is None: 

5238 table_name = "" 

5239 table_name = _execute.make_str(table_name, "table_name") 

5240 if config is None: 

5241 config = "" 

5242 config = _execute.make_str(config, "config") 

5243 _inputs_flat = [] 

5244 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

5245 num_shards, "shard_id", shard_id, "config", config) 

5246 _result = _execute.execute(b"RetrieveTPUEmbeddingRMSPropParameters", 3, 

5247 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

5248 name=name) 

5249 if _execute.must_record_gradient(): 

5250 _execute.record_gradient( 

5251 "RetrieveTPUEmbeddingRMSPropParameters", _inputs_flat, _attrs, _result) 

5252 _result = _RetrieveTPUEmbeddingRMSPropParametersOutput._make(_result) 

5253 return _result 

5254 

5255 

5256def retrieve_tpu_embedding_stochastic_gradient_descent_parameters(num_shards, shard_id, table_id=-1, table_name="", config="", name=None): 

5257 r"""Retrieve SGD embedding parameters. 

5258 

5259 An op that retrieves optimization parameters from embedding to host 

5260 memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up 

5261 the correct embedding table configuration. For example, this op is 

5262 used to retrieve updated parameters before saving a checkpoint. 

5263 

5264 Args: 

5265 num_shards: An `int`. 

5266 shard_id: An `int`. 

5267 table_id: An optional `int`. Defaults to `-1`. 

5268 table_name: An optional `string`. Defaults to `""`. 

5269 config: An optional `string`. Defaults to `""`. 

5270 name: A name for the operation (optional). 

5271 

5272 Returns: 

5273 A `Tensor` of type `float32`. 

5274 """ 

5275 _ctx = _context._context or _context.context() 

5276 tld = _ctx._thread_local_data 

5277 if tld.is_eager: 

5278 try: 

5279 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5280 _ctx, "RetrieveTPUEmbeddingStochasticGradientDescentParameters", name, 

5281 "table_id", table_id, "table_name", table_name, "num_shards", 

5282 num_shards, "shard_id", shard_id, "config", config) 

5283 return _result 

5284 except _core._NotOkStatusException as e: 

5285 _ops.raise_from_not_ok_status(e, name) 

5286 except _core._FallbackException: 

5287 pass 

5288 try: 

5289 return retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager_fallback( 

5290 table_id=table_id, table_name=table_name, num_shards=num_shards, 

5291 shard_id=shard_id, config=config, name=name, ctx=_ctx) 

5292 except _core._SymbolicException: 

5293 pass # Add nodes to the TensorFlow graph. 

5294 # Add nodes to the TensorFlow graph. 

5295 num_shards = _execute.make_int(num_shards, "num_shards") 

5296 shard_id = _execute.make_int(shard_id, "shard_id") 

5297 if table_id is None: 

5298 table_id = -1 

5299 table_id = _execute.make_int(table_id, "table_id") 

5300 if table_name is None: 

5301 table_name = "" 

5302 table_name = _execute.make_str(table_name, "table_name") 

5303 if config is None: 

5304 config = "" 

5305 config = _execute.make_str(config, "config") 

5306 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5307 "RetrieveTPUEmbeddingStochasticGradientDescentParameters", num_shards=num_shards, 

5308 shard_id=shard_id, 

5309 table_id=table_id, 

5310 table_name=table_name, 

5311 config=config, 

5312 name=name) 

5313 _result = _outputs[:] 

5314 if _execute.must_record_gradient(): 

5315 _attrs = ("table_id", _op._get_attr_int("table_id"), "table_name", 

5316 _op.get_attr("table_name"), "num_shards", 

5317 _op._get_attr_int("num_shards"), "shard_id", 

5318 _op._get_attr_int("shard_id"), "config", _op.get_attr("config")) 

5319 _inputs_flat = _op.inputs 

5320 _execute.record_gradient( 

5321 "RetrieveTPUEmbeddingStochasticGradientDescentParameters", _inputs_flat, _attrs, _result) 

5322 _result, = _result 

5323 return _result 

5324 

5325RetrieveTPUEmbeddingStochasticGradientDescentParameters = tf_export("raw_ops.RetrieveTPUEmbeddingStochasticGradientDescentParameters")(_ops.to_raw_op(retrieve_tpu_embedding_stochastic_gradient_descent_parameters)) 

5326 

5327 

5328def retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager_fallback(num_shards, shard_id, table_id, table_name, config, name, ctx): 

5329 num_shards = _execute.make_int(num_shards, "num_shards") 

5330 shard_id = _execute.make_int(shard_id, "shard_id") 

5331 if table_id is None: 

5332 table_id = -1 

5333 table_id = _execute.make_int(table_id, "table_id") 

5334 if table_name is None: 

5335 table_name = "" 

5336 table_name = _execute.make_str(table_name, "table_name") 

5337 if config is None: 

5338 config = "" 

5339 config = _execute.make_str(config, "config") 

5340 _inputs_flat = [] 

5341 _attrs = ("table_id", table_id, "table_name", table_name, "num_shards", 

5342 num_shards, "shard_id", shard_id, "config", config) 

5343 _result = _execute.execute(b"RetrieveTPUEmbeddingStochasticGradientDescentParameters", 

5344 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

5345 name=name) 

5346 if _execute.must_record_gradient(): 

5347 _execute.record_gradient( 

5348 "RetrieveTPUEmbeddingStochasticGradientDescentParameters", _inputs_flat, _attrs, _result) 

5349 _result, = _result 

5350 return _result 

5351 

5352 

5353def send_tpu_embedding_gradients(inputs, learning_rates, config, name=None): 

5354 r"""Performs gradient updates of embedding tables. 

5355 

5356 Args: 

5357 inputs: A list of at least 1 `Tensor` objects with type `float32`. 

5358 A TensorList of gradients with which to update embedding tables. 

5359 This argument has the same length and shapes as the return value of 

5360 RecvTPUEmbeddingActivations, but contains gradients of the model's loss 

5361 with respect to the embedding activations. The embedding tables are updated 

5362 from these gradients via the optimizer specified in the TPU embedding 

5363 configuration given to tpu.initialize_system. 

5364 learning_rates: A list of `Tensor` objects with type `float32`. 

5365 A TensorList of float32 scalars, one for each dynamic learning 

5366 rate tag: see the comments in 

5367 //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto. 

5368 Multiple tables can share the same dynamic learning rate tag as specified 

5369 in the configuration. If the learning rates for all tables are constant, 

5370 this list should be empty. 

5371 config: A `string`. Serialized TPUEmbeddingConfiguration proto. 

5372 name: A name for the operation (optional). 

5373 

5374 Returns: 

5375 The created Operation. 

5376 """ 

5377 _ctx = _context._context or _context.context() 

5378 tld = _ctx._thread_local_data 

5379 if tld.is_eager: 

5380 try: 

5381 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5382 _ctx, "SendTPUEmbeddingGradients", name, inputs, learning_rates, 

5383 "config", config) 

5384 return _result 

5385 except _core._NotOkStatusException as e: 

5386 _ops.raise_from_not_ok_status(e, name) 

5387 except _core._FallbackException: 

5388 pass 

5389 try: 

5390 return send_tpu_embedding_gradients_eager_fallback( 

5391 inputs, learning_rates, config=config, name=name, ctx=_ctx) 

5392 except _core._SymbolicException: 

5393 pass # Add nodes to the TensorFlow graph. 

5394 # Add nodes to the TensorFlow graph. 

5395 if not isinstance(inputs, (list, tuple)): 

5396 raise TypeError( 

5397 "Expected list for 'inputs' argument to " 

5398 "'send_tpu_embedding_gradients' Op, not %r." % inputs) 

5399 _attr_N = len(inputs) 

5400 if not isinstance(learning_rates, (list, tuple)): 

5401 raise TypeError( 

5402 "Expected list for 'learning_rates' argument to " 

5403 "'send_tpu_embedding_gradients' Op, not %r." % learning_rates) 

5404 _attr_NN = len(learning_rates) 

5405 config = _execute.make_str(config, "config") 

5406 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5407 "SendTPUEmbeddingGradients", inputs=inputs, 

5408 learning_rates=learning_rates, 

5409 config=config, name=name) 

5410 return _op 

5411SendTPUEmbeddingGradients = tf_export("raw_ops.SendTPUEmbeddingGradients")(_ops.to_raw_op(send_tpu_embedding_gradients)) 

5412 

5413 

5414def send_tpu_embedding_gradients_eager_fallback(inputs, learning_rates, config, name, ctx): 

5415 if not isinstance(inputs, (list, tuple)): 

5416 raise TypeError( 

5417 "Expected list for 'inputs' argument to " 

5418 "'send_tpu_embedding_gradients' Op, not %r." % inputs) 

5419 _attr_N = len(inputs) 

5420 if not isinstance(learning_rates, (list, tuple)): 

5421 raise TypeError( 

5422 "Expected list for 'learning_rates' argument to " 

5423 "'send_tpu_embedding_gradients' Op, not %r." % learning_rates) 

5424 _attr_NN = len(learning_rates) 

5425 config = _execute.make_str(config, "config") 

5426 inputs = _ops.convert_n_to_tensor(inputs, _dtypes.float32) 

5427 learning_rates = _ops.convert_n_to_tensor(learning_rates, _dtypes.float32) 

5428 _inputs_flat = list(inputs) + list(learning_rates) 

5429 _attrs = ("N", _attr_N, "NN", _attr_NN, "config", config) 

5430 _result = _execute.execute(b"SendTPUEmbeddingGradients", 0, 

5431 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

5432 name=name) 

5433 _result = None 

5434 return _result 

5435 

5436 

5437def shutdown_distributed_tpu(name=None): 

5438 r"""Shuts down a running distributed TPU system. 

5439 

5440 The op returns an error if no system is running. 

5441 

5442 Args: 

5443 name: A name for the operation (optional). 

5444 

5445 Returns: 

5446 The created Operation. 

5447 """ 

5448 _ctx = _context._context or _context.context() 

5449 tld = _ctx._thread_local_data 

5450 if tld.is_eager: 

5451 try: 

5452 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5453 _ctx, "ShutdownDistributedTPU", name) 

5454 return _result 

5455 except _core._NotOkStatusException as e: 

5456 _ops.raise_from_not_ok_status(e, name) 

5457 except _core._FallbackException: 

5458 pass 

5459 try: 

5460 return shutdown_distributed_tpu_eager_fallback( 

5461 name=name, ctx=_ctx) 

5462 except _core._SymbolicException: 

5463 pass # Add nodes to the TensorFlow graph. 

5464 # Add nodes to the TensorFlow graph. 

5465 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5466 "ShutdownDistributedTPU", name=name) 

5467 return _op 

5468ShutdownDistributedTPU = tf_export("raw_ops.ShutdownDistributedTPU")(_ops.to_raw_op(shutdown_distributed_tpu)) 

5469 

5470 

5471def shutdown_distributed_tpu_eager_fallback(name, ctx): 

5472 _inputs_flat = [] 

5473 _attrs = None 

5474 _result = _execute.execute(b"ShutdownDistributedTPU", 0, 

5475 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

5476 name=name) 

5477 _result = None 

5478 return _result 

5479 

5480 

5481def tpu_compilation_result(name=None): 

5482 r"""Returns the result of a TPU compilation. 

5483 

5484 This operation returns the result of a TPU compilation as a serialized 

5485 CompilationResultProto, which holds a status and an error message if an error 

5486 occurred during compilation. 

5487 

5488 Args: 

5489 name: A name for the operation (optional). 

5490 

5491 Returns: 

5492 A `Tensor` of type `string`. 

5493 """ 

5494 _ctx = _context._context or _context.context() 

5495 tld = _ctx._thread_local_data 

5496 if tld.is_eager: 

5497 try: 

5498 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5499 _ctx, "TPUCompilationResult", name) 

5500 return _result 

5501 except _core._NotOkStatusException as e: 

5502 _ops.raise_from_not_ok_status(e, name) 

5503 except _core._FallbackException: 

5504 pass 

5505 try: 

5506 return tpu_compilation_result_eager_fallback( 

5507 name=name, ctx=_ctx) 

5508 except _core._SymbolicException: 

5509 pass # Add nodes to the TensorFlow graph. 

5510 # Add nodes to the TensorFlow graph. 

5511 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5512 "TPUCompilationResult", name=name) 

5513 _result = _outputs[:] 

5514 if _execute.must_record_gradient(): 

5515 _attrs = () 

5516 _inputs_flat = _op.inputs 

5517 _execute.record_gradient( 

5518 "TPUCompilationResult", _inputs_flat, _attrs, _result) 

5519 _result, = _result 

5520 return _result 

5521 

5522TPUCompilationResult = tf_export("raw_ops.TPUCompilationResult")(_ops.to_raw_op(tpu_compilation_result)) 

5523 

5524 

5525def tpu_compilation_result_eager_fallback(name, ctx): 

5526 _inputs_flat = [] 

5527 _attrs = None 

5528 _result = _execute.execute(b"TPUCompilationResult", 1, inputs=_inputs_flat, 

5529 attrs=_attrs, ctx=ctx, name=name) 

5530 if _execute.must_record_gradient(): 

5531 _execute.record_gradient( 

5532 "TPUCompilationResult", _inputs_flat, _attrs, _result) 

5533 _result, = _result 

5534 return _result 

5535 

5536 

5537def tpu_embedding_activations(embedding_variable, sliced_activations, table_id, lookup_id, name=None): 

5538 r"""An op enabling differentiation of TPU Embeddings. 

5539 

5540 This op simply returns its first input, which is assumed to have been sliced 

5541 from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of 

5542 this op, and its first argument being a trainable Variable, enables automatic 

5543 differentiation of graphs containing embeddings via the TPU Embedding Python 

5544 libraries. 

5545 

5546 Args: 

5547 embedding_variable: A `Tensor` of type `float32`. 

5548 A trainable variable, enabling optimizers to find this op. 

5549 sliced_activations: A `Tensor` of type `float32`. 

5550 The embedding activations Tensor to return. 

5551 table_id: An `int` that is `>= 0`. 

5552 The id of the table in the embedding layer configuration from which 

5553 these activations were computed. 

5554 lookup_id: An `int` that is `>= 0`. 

5555 Identifier of the set of embedding indices which produced these 

5556 activations. 

5557 name: A name for the operation (optional). 

5558 

5559 Returns: 

5560 A `Tensor` of type `float32`. 

5561 """ 

5562 _ctx = _context._context or _context.context() 

5563 tld = _ctx._thread_local_data 

5564 if tld.is_eager: 

5565 try: 

5566 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5567 _ctx, "TPUEmbeddingActivations", name, embedding_variable, 

5568 sliced_activations, "table_id", table_id, "lookup_id", lookup_id) 

5569 return _result 

5570 except _core._NotOkStatusException as e: 

5571 _ops.raise_from_not_ok_status(e, name) 

5572 except _core._FallbackException: 

5573 pass 

5574 try: 

5575 return tpu_embedding_activations_eager_fallback( 

5576 embedding_variable, sliced_activations, table_id=table_id, 

5577 lookup_id=lookup_id, name=name, ctx=_ctx) 

5578 except _core._SymbolicException: 

5579 pass # Add nodes to the TensorFlow graph. 

5580 # Add nodes to the TensorFlow graph. 

5581 table_id = _execute.make_int(table_id, "table_id") 

5582 lookup_id = _execute.make_int(lookup_id, "lookup_id") 

5583 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5584 "TPUEmbeddingActivations", embedding_variable=embedding_variable, 

5585 sliced_activations=sliced_activations, 

5586 table_id=table_id, lookup_id=lookup_id, 

5587 name=name) 

5588 _result = _outputs[:] 

5589 if _execute.must_record_gradient(): 

5590 _attrs = ("table_id", _op._get_attr_int("table_id"), "lookup_id", 

5591 _op._get_attr_int("lookup_id")) 

5592 _inputs_flat = _op.inputs 

5593 _execute.record_gradient( 

5594 "TPUEmbeddingActivations", _inputs_flat, _attrs, _result) 

5595 _result, = _result 

5596 return _result 

5597 

5598TPUEmbeddingActivations = tf_export("raw_ops.TPUEmbeddingActivations")(_ops.to_raw_op(tpu_embedding_activations)) 

5599 

5600 

5601def tpu_embedding_activations_eager_fallback(embedding_variable, sliced_activations, table_id, lookup_id, name, ctx): 

5602 table_id = _execute.make_int(table_id, "table_id") 

5603 lookup_id = _execute.make_int(lookup_id, "lookup_id") 

5604 embedding_variable = _ops.convert_to_tensor(embedding_variable, _dtypes.float32) 

5605 sliced_activations = _ops.convert_to_tensor(sliced_activations, _dtypes.float32) 

5606 _inputs_flat = [embedding_variable, sliced_activations] 

5607 _attrs = ("table_id", table_id, "lookup_id", lookup_id) 

5608 _result = _execute.execute(b"TPUEmbeddingActivations", 1, 

5609 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

5610 name=name) 

5611 if _execute.must_record_gradient(): 

5612 _execute.record_gradient( 

5613 "TPUEmbeddingActivations", _inputs_flat, _attrs, _result) 

5614 _result, = _result 

5615 return _result 

5616 

5617 

5618def tpu_ordinal_selector(name=None): 

5619 r"""A TPU core selector Op. 

5620 

5621 This Op produces a set of TPU cores (for warm-up) or a single TPU core 

5622 (for regular inference) to execute the TPU program on. The output is 

5623 consumed by TPUPartitionedCall. 

5624 

5625 Args: 

5626 name: A name for the operation (optional). 

5627 

5628 Returns: 

5629 A `Tensor` of type `int32`. 

5630 """ 

5631 _ctx = _context._context or _context.context() 

5632 tld = _ctx._thread_local_data 

5633 if tld.is_eager: 

5634 try: 

5635 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5636 _ctx, "TPUOrdinalSelector", name) 

5637 return _result 

5638 except _core._NotOkStatusException as e: 

5639 _ops.raise_from_not_ok_status(e, name) 

5640 except _core._FallbackException: 

5641 pass 

5642 try: 

5643 return tpu_ordinal_selector_eager_fallback( 

5644 name=name, ctx=_ctx) 

5645 except _core._SymbolicException: 

5646 pass # Add nodes to the TensorFlow graph. 

5647 # Add nodes to the TensorFlow graph. 

5648 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5649 "TPUOrdinalSelector", name=name) 

5650 _result = _outputs[:] 

5651 if _execute.must_record_gradient(): 

5652 _attrs = () 

5653 _inputs_flat = _op.inputs 

5654 _execute.record_gradient( 

5655 "TPUOrdinalSelector", _inputs_flat, _attrs, _result) 

5656 _result, = _result 

5657 return _result 

5658 

5659TPUOrdinalSelector = tf_export("raw_ops.TPUOrdinalSelector")(_ops.to_raw_op(tpu_ordinal_selector)) 

5660 

5661 

5662def tpu_ordinal_selector_eager_fallback(name, ctx): 

5663 _inputs_flat = [] 

5664 _attrs = None 

5665 _result = _execute.execute(b"TPUOrdinalSelector", 1, inputs=_inputs_flat, 

5666 attrs=_attrs, ctx=ctx, name=name) 

5667 if _execute.must_record_gradient(): 

5668 _execute.record_gradient( 

5669 "TPUOrdinalSelector", _inputs_flat, _attrs, _result) 

5670 _result, = _result 

5671 return _result 

5672 

5673 

5674def tpu_partitioned_call(args, device_ordinal, Tout, f, autotuner_thresh=0, name=None): 

5675 r"""Calls a function placed on a specified TPU device. 

5676 

5677 Args: 

5678 args: A list of `Tensor` objects. The arguments to the function. 

5679 device_ordinal: A `Tensor` of type `int32`. 

5680 The TPU device ordinal to run the function on. 

5681 Tout: A list of `tf.DTypes`. The types of the outputs of the function. 

5682 f: A function decorated with @Defun. The function to call. 

5683 autotuner_thresh: An optional `int`. Defaults to `0`. 

5684 name: A name for the operation (optional). 

5685 

5686 Returns: 

5687 A list of `Tensor` objects of type `Tout`. 

5688 """ 

5689 _ctx = _context._context or _context.context() 

5690 tld = _ctx._thread_local_data 

5691 if tld.is_eager: 

5692 try: 

5693 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5694 _ctx, "TPUPartitionedCall", name, args, device_ordinal, "Tout", Tout, 

5695 "f", f, "autotuner_thresh", autotuner_thresh) 

5696 return _result 

5697 except _core._NotOkStatusException as e: 

5698 _ops.raise_from_not_ok_status(e, name) 

5699 except _core._FallbackException: 

5700 pass 

5701 try: 

5702 return tpu_partitioned_call_eager_fallback( 

5703 args, device_ordinal, Tout=Tout, f=f, 

5704 autotuner_thresh=autotuner_thresh, name=name, ctx=_ctx) 

5705 except _core._SymbolicException: 

5706 pass # Add nodes to the TensorFlow graph. 

5707 # Add nodes to the TensorFlow graph. 

5708 if not isinstance(Tout, (list, tuple)): 

5709 raise TypeError( 

5710 "Expected list for 'Tout' argument to " 

5711 "'tpu_partitioned_call' Op, not %r." % Tout) 

5712 Tout = [_execute.make_type(_t, "Tout") for _t in Tout] 

5713 if autotuner_thresh is None: 

5714 autotuner_thresh = 0 

5715 autotuner_thresh = _execute.make_int(autotuner_thresh, "autotuner_thresh") 

5716 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5717 "TPUPartitionedCall", args=args, device_ordinal=device_ordinal, 

5718 Tout=Tout, f=f, 

5719 autotuner_thresh=autotuner_thresh, name=name) 

5720 _result = _outputs[:] 

5721 if _execute.must_record_gradient(): 

5722 _attrs = ("Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "f", 

5723 _op.get_attr("f"), "autotuner_thresh", 

5724 _op._get_attr_int("autotuner_thresh")) 

5725 _inputs_flat = _op.inputs 

5726 _execute.record_gradient( 

5727 "TPUPartitionedCall", _inputs_flat, _attrs, _result) 

5728 return _result 

5729 

5730TPUPartitionedCall = tf_export("raw_ops.TPUPartitionedCall")(_ops.to_raw_op(tpu_partitioned_call)) 

5731 

5732 

5733def tpu_partitioned_call_eager_fallback(args, device_ordinal, Tout, f, autotuner_thresh, name, ctx): 

5734 if not isinstance(Tout, (list, tuple)): 

5735 raise TypeError( 

5736 "Expected list for 'Tout' argument to " 

5737 "'tpu_partitioned_call' Op, not %r." % Tout) 

5738 Tout = [_execute.make_type(_t, "Tout") for _t in Tout] 

5739 if autotuner_thresh is None: 

5740 autotuner_thresh = 0 

5741 autotuner_thresh = _execute.make_int(autotuner_thresh, "autotuner_thresh") 

5742 _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, ctx) 

5743 device_ordinal = _ops.convert_to_tensor(device_ordinal, _dtypes.int32) 

5744 _inputs_flat = list(args) + [device_ordinal] 

5745 _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f, "autotuner_thresh", 

5746 autotuner_thresh) 

5747 _result = _execute.execute(b"TPUPartitionedCall", len(Tout), 

5748 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

5749 name=name) 

5750 if _execute.must_record_gradient(): 

5751 _execute.record_gradient( 

5752 "TPUPartitionedCall", _inputs_flat, _attrs, _result) 

5753 return _result 

5754 

5755 

5756def tpu_replicate_metadata(num_replicas, num_cores_per_replica=1, topology="", use_tpu=True, device_assignment=[], computation_shape=[], host_compute_core=[], padding_map=[], step_marker_location="STEP_MARK_AT_ENTRY", allow_soft_placement=False, use_spmd_for_xla_partitioning=False, tpu_compile_options_proto="", name=None): 

5757 r"""Metadata indicating how the TPU computation should be replicated. 

5758 

5759 This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph. 

5760 

5761 Args: 

5762 num_replicas: An `int` that is `>= 0`. 

5763 Number of replicas of the computation 

5764 num_cores_per_replica: An optional `int`. Defaults to `1`. 

5765 Number of cores per replica. Used for model parallelism. 

5766 topology: An optional `string`. Defaults to `""`. 

5767 TopologyProto indicating the topology of the TPU pod slice. 

5768 use_tpu: An optional `bool`. Defaults to `True`. 

5769 Whether to place the computation on the TPU. 

5770 device_assignment: An optional list of `ints`. Defaults to `[]`. 

5771 The assignment of devices for the computation. 

5772 computation_shape: An optional list of `ints`. Defaults to `[]`. 

5773 DEPRECATED. Use num_cores_per_replica instead. 

5774 host_compute_core: An optional list of `strings`. Defaults to `[]`. 

5775 padding_map: An optional list of `strings`. Defaults to `[]`. 

5776 step_marker_location: An optional `string`. Defaults to `"STEP_MARK_AT_ENTRY"`. 

5777 allow_soft_placement: An optional `bool`. Defaults to `False`. 

5778 use_spmd_for_xla_partitioning: An optional `bool`. Defaults to `False`. 

5779 tpu_compile_options_proto: An optional `string`. Defaults to `""`. 

5780 name: A name for the operation (optional). 

5781 

5782 Returns: 

5783 The created Operation. 

5784 """ 

5785 _ctx = _context._context or _context.context() 

5786 tld = _ctx._thread_local_data 

5787 if tld.is_eager: 

5788 try: 

5789 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5790 _ctx, "TPUReplicateMetadata", name, "num_replicas", num_replicas, 

5791 "num_cores_per_replica", num_cores_per_replica, "topology", topology, 

5792 "use_tpu", use_tpu, "device_assignment", device_assignment, 

5793 "computation_shape", computation_shape, "host_compute_core", 

5794 host_compute_core, "padding_map", padding_map, "step_marker_location", 

5795 step_marker_location, "allow_soft_placement", allow_soft_placement, 

5796 "use_spmd_for_xla_partitioning", use_spmd_for_xla_partitioning, 

5797 "tpu_compile_options_proto", tpu_compile_options_proto) 

5798 return _result 

5799 except _core._NotOkStatusException as e: 

5800 _ops.raise_from_not_ok_status(e, name) 

5801 except _core._FallbackException: 

5802 pass 

5803 try: 

5804 return tpu_replicate_metadata_eager_fallback( 

5805 num_replicas=num_replicas, 

5806 num_cores_per_replica=num_cores_per_replica, topology=topology, 

5807 use_tpu=use_tpu, device_assignment=device_assignment, 

5808 computation_shape=computation_shape, 

5809 host_compute_core=host_compute_core, padding_map=padding_map, 

5810 step_marker_location=step_marker_location, 

5811 allow_soft_placement=allow_soft_placement, 

5812 use_spmd_for_xla_partitioning=use_spmd_for_xla_partitioning, 

5813 tpu_compile_options_proto=tpu_compile_options_proto, name=name, 

5814 ctx=_ctx) 

5815 except _core._SymbolicException: 

5816 pass # Add nodes to the TensorFlow graph. 

5817 # Add nodes to the TensorFlow graph. 

5818 num_replicas = _execute.make_int(num_replicas, "num_replicas") 

5819 if num_cores_per_replica is None: 

5820 num_cores_per_replica = 1 

5821 num_cores_per_replica = _execute.make_int(num_cores_per_replica, "num_cores_per_replica") 

5822 if topology is None: 

5823 topology = "" 

5824 topology = _execute.make_str(topology, "topology") 

5825 if use_tpu is None: 

5826 use_tpu = True 

5827 use_tpu = _execute.make_bool(use_tpu, "use_tpu") 

5828 if device_assignment is None: 

5829 device_assignment = [] 

5830 if not isinstance(device_assignment, (list, tuple)): 

5831 raise TypeError( 

5832 "Expected list for 'device_assignment' argument to " 

5833 "'tpu_replicate_metadata' Op, not %r." % device_assignment) 

5834 device_assignment = [_execute.make_int(_i, "device_assignment") for _i in device_assignment] 

5835 if computation_shape is None: 

5836 computation_shape = [] 

5837 if not isinstance(computation_shape, (list, tuple)): 

5838 raise TypeError( 

5839 "Expected list for 'computation_shape' argument to " 

5840 "'tpu_replicate_metadata' Op, not %r." % computation_shape) 

5841 computation_shape = [_execute.make_int(_i, "computation_shape") for _i in computation_shape] 

5842 if host_compute_core is None: 

5843 host_compute_core = [] 

5844 if not isinstance(host_compute_core, (list, tuple)): 

5845 raise TypeError( 

5846 "Expected list for 'host_compute_core' argument to " 

5847 "'tpu_replicate_metadata' Op, not %r." % host_compute_core) 

5848 host_compute_core = [_execute.make_str(_s, "host_compute_core") for _s in host_compute_core] 

5849 if padding_map is None: 

5850 padding_map = [] 

5851 if not isinstance(padding_map, (list, tuple)): 

5852 raise TypeError( 

5853 "Expected list for 'padding_map' argument to " 

5854 "'tpu_replicate_metadata' Op, not %r." % padding_map) 

5855 padding_map = [_execute.make_str(_s, "padding_map") for _s in padding_map] 

5856 if step_marker_location is None: 

5857 step_marker_location = "STEP_MARK_AT_ENTRY" 

5858 step_marker_location = _execute.make_str(step_marker_location, "step_marker_location") 

5859 if allow_soft_placement is None: 

5860 allow_soft_placement = False 

5861 allow_soft_placement = _execute.make_bool(allow_soft_placement, "allow_soft_placement") 

5862 if use_spmd_for_xla_partitioning is None: 

5863 use_spmd_for_xla_partitioning = False 

5864 use_spmd_for_xla_partitioning = _execute.make_bool(use_spmd_for_xla_partitioning, "use_spmd_for_xla_partitioning") 

5865 if tpu_compile_options_proto is None: 

5866 tpu_compile_options_proto = "" 

5867 tpu_compile_options_proto = _execute.make_str(tpu_compile_options_proto, "tpu_compile_options_proto") 

5868 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5869 "TPUReplicateMetadata", num_replicas=num_replicas, 

5870 num_cores_per_replica=num_cores_per_replica, 

5871 topology=topology, use_tpu=use_tpu, 

5872 device_assignment=device_assignment, 

5873 computation_shape=computation_shape, 

5874 host_compute_core=host_compute_core, 

5875 padding_map=padding_map, 

5876 step_marker_location=step_marker_location, 

5877 allow_soft_placement=allow_soft_placement, 

5878 use_spmd_for_xla_partitioning=use_spmd_for_xla_partitioning, 

5879 tpu_compile_options_proto=tpu_compile_options_proto, 

5880 name=name) 

5881 return _op 

5882TPUReplicateMetadata = tf_export("raw_ops.TPUReplicateMetadata")(_ops.to_raw_op(tpu_replicate_metadata)) 

5883 

5884 

5885def tpu_replicate_metadata_eager_fallback(num_replicas, num_cores_per_replica, topology, use_tpu, device_assignment, computation_shape, host_compute_core, padding_map, step_marker_location, allow_soft_placement, use_spmd_for_xla_partitioning, tpu_compile_options_proto, name, ctx): 

5886 num_replicas = _execute.make_int(num_replicas, "num_replicas") 

5887 if num_cores_per_replica is None: 

5888 num_cores_per_replica = 1 

5889 num_cores_per_replica = _execute.make_int(num_cores_per_replica, "num_cores_per_replica") 

5890 if topology is None: 

5891 topology = "" 

5892 topology = _execute.make_str(topology, "topology") 

5893 if use_tpu is None: 

5894 use_tpu = True 

5895 use_tpu = _execute.make_bool(use_tpu, "use_tpu") 

5896 if device_assignment is None: 

5897 device_assignment = [] 

5898 if not isinstance(device_assignment, (list, tuple)): 

5899 raise TypeError( 

5900 "Expected list for 'device_assignment' argument to " 

5901 "'tpu_replicate_metadata' Op, not %r." % device_assignment) 

5902 device_assignment = [_execute.make_int(_i, "device_assignment") for _i in device_assignment] 

5903 if computation_shape is None: 

5904 computation_shape = [] 

5905 if not isinstance(computation_shape, (list, tuple)): 

5906 raise TypeError( 

5907 "Expected list for 'computation_shape' argument to " 

5908 "'tpu_replicate_metadata' Op, not %r." % computation_shape) 

5909 computation_shape = [_execute.make_int(_i, "computation_shape") for _i in computation_shape] 

5910 if host_compute_core is None: 

5911 host_compute_core = [] 

5912 if not isinstance(host_compute_core, (list, tuple)): 

5913 raise TypeError( 

5914 "Expected list for 'host_compute_core' argument to " 

5915 "'tpu_replicate_metadata' Op, not %r." % host_compute_core) 

5916 host_compute_core = [_execute.make_str(_s, "host_compute_core") for _s in host_compute_core] 

5917 if padding_map is None: 

5918 padding_map = [] 

5919 if not isinstance(padding_map, (list, tuple)): 

5920 raise TypeError( 

5921 "Expected list for 'padding_map' argument to " 

5922 "'tpu_replicate_metadata' Op, not %r." % padding_map) 

5923 padding_map = [_execute.make_str(_s, "padding_map") for _s in padding_map] 

5924 if step_marker_location is None: 

5925 step_marker_location = "STEP_MARK_AT_ENTRY" 

5926 step_marker_location = _execute.make_str(step_marker_location, "step_marker_location") 

5927 if allow_soft_placement is None: 

5928 allow_soft_placement = False 

5929 allow_soft_placement = _execute.make_bool(allow_soft_placement, "allow_soft_placement") 

5930 if use_spmd_for_xla_partitioning is None: 

5931 use_spmd_for_xla_partitioning = False 

5932 use_spmd_for_xla_partitioning = _execute.make_bool(use_spmd_for_xla_partitioning, "use_spmd_for_xla_partitioning") 

5933 if tpu_compile_options_proto is None: 

5934 tpu_compile_options_proto = "" 

5935 tpu_compile_options_proto = _execute.make_str(tpu_compile_options_proto, "tpu_compile_options_proto") 

5936 _inputs_flat = [] 

5937 _attrs = ("num_replicas", num_replicas, "num_cores_per_replica", 

5938 num_cores_per_replica, "topology", topology, "use_tpu", use_tpu, 

5939 "device_assignment", device_assignment, "computation_shape", 

5940 computation_shape, "host_compute_core", host_compute_core, "padding_map", 

5941 padding_map, "step_marker_location", step_marker_location, 

5942 "allow_soft_placement", allow_soft_placement, 

5943 "use_spmd_for_xla_partitioning", use_spmd_for_xla_partitioning, 

5944 "tpu_compile_options_proto", tpu_compile_options_proto) 

5945 _result = _execute.execute(b"TPUReplicateMetadata", 0, inputs=_inputs_flat, 

5946 attrs=_attrs, ctx=ctx, name=name) 

5947 _result = None 

5948 return _result 

5949 

5950 

5951def tpu_replicated_input(inputs, is_mirrored_variable=False, index=-1, is_packed=False, name=None): 

5952 r"""Connects N inputs to an N-way replicated TPU computation. 

5953 

5954 This operation holds a replicated input to a `tpu.replicate()` computation subgraph. 

5955 Each replicated input has the same shape and type alongside the output. 

5956 

5957 For example: 

5958 ``` 

5959 %a = "tf.opA"() 

5960 %b = "tf.opB"() 

5961 %replicated_input = "tf.TPUReplicatedInput"(%a, %b) 

5962 %computation = "tf.Computation"(%replicated_input) 

5963 ``` 

5964 The above computation has a replicated input of two replicas. 

5965 

5966 Args: 

5967 inputs: A list of at least 1 `Tensor` objects with the same type. 

5968 is_mirrored_variable: An optional `bool`. Defaults to `False`. 

5969 index: An optional `int`. Defaults to `-1`. 

5970 is_packed: An optional `bool`. Defaults to `False`. 

5971 name: A name for the operation (optional). 

5972 

5973 Returns: 

5974 A `Tensor`. Has the same type as `inputs`. 

5975 """ 

5976 _ctx = _context._context or _context.context() 

5977 tld = _ctx._thread_local_data 

5978 if tld.is_eager: 

5979 try: 

5980 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5981 _ctx, "TPUReplicatedInput", name, inputs, "is_mirrored_variable", 

5982 is_mirrored_variable, "index", index, "is_packed", is_packed) 

5983 return _result 

5984 except _core._NotOkStatusException as e: 

5985 _ops.raise_from_not_ok_status(e, name) 

5986 except _core._FallbackException: 

5987 pass 

5988 try: 

5989 return tpu_replicated_input_eager_fallback( 

5990 inputs, is_mirrored_variable=is_mirrored_variable, index=index, 

5991 is_packed=is_packed, name=name, ctx=_ctx) 

5992 except _core._SymbolicException: 

5993 pass # Add nodes to the TensorFlow graph. 

5994 # Add nodes to the TensorFlow graph. 

5995 if not isinstance(inputs, (list, tuple)): 

5996 raise TypeError( 

5997 "Expected list for 'inputs' argument to " 

5998 "'tpu_replicated_input' Op, not %r." % inputs) 

5999 _attr_N = len(inputs) 

6000 if is_mirrored_variable is None: 

6001 is_mirrored_variable = False 

6002 is_mirrored_variable = _execute.make_bool(is_mirrored_variable, "is_mirrored_variable") 

6003 if index is None: 

6004 index = -1 

6005 index = _execute.make_int(index, "index") 

6006 if is_packed is None: 

6007 is_packed = False 

6008 is_packed = _execute.make_bool(is_packed, "is_packed") 

6009 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6010 "TPUReplicatedInput", inputs=inputs, 

6011 is_mirrored_variable=is_mirrored_variable, 

6012 index=index, is_packed=is_packed, name=name) 

6013 _result = _outputs[:] 

6014 if _execute.must_record_gradient(): 

6015 _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), 

6016 "is_mirrored_variable", 

6017 _op._get_attr_bool("is_mirrored_variable"), "index", 

6018 _op._get_attr_int("index"), "is_packed", 

6019 _op._get_attr_bool("is_packed")) 

6020 _inputs_flat = _op.inputs 

6021 _execute.record_gradient( 

6022 "TPUReplicatedInput", _inputs_flat, _attrs, _result) 

6023 _result, = _result 

6024 return _result 

6025 

6026TPUReplicatedInput = tf_export("raw_ops.TPUReplicatedInput")(_ops.to_raw_op(tpu_replicated_input)) 

6027 

6028 

6029def tpu_replicated_input_eager_fallback(inputs, is_mirrored_variable, index, is_packed, name, ctx): 

6030 if not isinstance(inputs, (list, tuple)): 

6031 raise TypeError( 

6032 "Expected list for 'inputs' argument to " 

6033 "'tpu_replicated_input' Op, not %r." % inputs) 

6034 _attr_N = len(inputs) 

6035 if is_mirrored_variable is None: 

6036 is_mirrored_variable = False 

6037 is_mirrored_variable = _execute.make_bool(is_mirrored_variable, "is_mirrored_variable") 

6038 if index is None: 

6039 index = -1 

6040 index = _execute.make_int(index, "index") 

6041 if is_packed is None: 

6042 is_packed = False 

6043 is_packed = _execute.make_bool(is_packed, "is_packed") 

6044 _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), ctx, []) 

6045 _inputs_flat = list(inputs) 

6046 _attrs = ("N", _attr_N, "T", _attr_T, "is_mirrored_variable", 

6047 is_mirrored_variable, "index", index, "is_packed", is_packed) 

6048 _result = _execute.execute(b"TPUReplicatedInput", 1, inputs=_inputs_flat, 

6049 attrs=_attrs, ctx=ctx, name=name) 

6050 if _execute.must_record_gradient(): 

6051 _execute.record_gradient( 

6052 "TPUReplicatedInput", _inputs_flat, _attrs, _result) 

6053 _result, = _result 

6054 return _result 

6055 

6056 

6057def tpu_replicated_output(input, num_replicas, name=None): 

6058 r"""Connects N outputs from an N-way replicated TPU computation. 

6059 

6060 This operation holds a replicated output from a `tpu.replicate()` computation subgraph. 

6061 Each replicated output has the same shape and type alongside the input. 

6062 

6063 For example: 

6064 ``` 

6065 %computation = "tf.Computation"() 

6066 %replicated_output:2 = "tf.TPUReplicatedOutput"(%computation) 

6067 ``` 

6068 The above computation has a replicated output of two replicas. 

6069 

6070 Args: 

6071 input: A `Tensor`. 

6072 num_replicas: An `int` that is `>= 1`. 

6073 name: A name for the operation (optional). 

6074 

6075 Returns: 

6076 A list of `num_replicas` `Tensor` objects with the same type as `input`. 

6077 """ 

6078 _ctx = _context._context or _context.context() 

6079 tld = _ctx._thread_local_data 

6080 if tld.is_eager: 

6081 try: 

6082 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6083 _ctx, "TPUReplicatedOutput", name, input, "num_replicas", 

6084 num_replicas) 

6085 return _result 

6086 except _core._NotOkStatusException as e: 

6087 _ops.raise_from_not_ok_status(e, name) 

6088 except _core._FallbackException: 

6089 pass 

6090 try: 

6091 return tpu_replicated_output_eager_fallback( 

6092 input, num_replicas=num_replicas, name=name, ctx=_ctx) 

6093 except _core._SymbolicException: 

6094 pass # Add nodes to the TensorFlow graph. 

6095 # Add nodes to the TensorFlow graph. 

6096 num_replicas = _execute.make_int(num_replicas, "num_replicas") 

6097 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6098 "TPUReplicatedOutput", input=input, num_replicas=num_replicas, 

6099 name=name) 

6100 _result = _outputs[:] 

6101 if _execute.must_record_gradient(): 

6102 _attrs = ("num_replicas", _op._get_attr_int("num_replicas"), "T", 

6103 _op._get_attr_type("T")) 

6104 _inputs_flat = _op.inputs 

6105 _execute.record_gradient( 

6106 "TPUReplicatedOutput", _inputs_flat, _attrs, _result) 

6107 return _result 

6108 

6109TPUReplicatedOutput = tf_export("raw_ops.TPUReplicatedOutput")(_ops.to_raw_op(tpu_replicated_output)) 

6110 

6111 

6112def tpu_replicated_output_eager_fallback(input, num_replicas, name, ctx): 

6113 num_replicas = _execute.make_int(num_replicas, "num_replicas") 

6114 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, []) 

6115 _inputs_flat = [input] 

6116 _attrs = ("num_replicas", num_replicas, "T", _attr_T) 

6117 _result = _execute.execute(b"TPUReplicatedOutput", num_replicas, 

6118 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

6119 name=name) 

6120 if _execute.must_record_gradient(): 

6121 _execute.record_gradient( 

6122 "TPUReplicatedOutput", _inputs_flat, _attrs, _result) 

6123 return _result 

6124 

6125 

6126def worker_heartbeat(request, name=None): 

6127 r"""Worker heartbeat op. 

6128 

6129 Heartbeats may be sent periodically to indicate the coordinator is still active, 

6130 to retrieve the current worker status and to expedite shutdown when necessary. 

6131 

6132 Args: 

6133 request: A `Tensor` of type `string`. 

6134 A string tensor containing a serialized WorkerHeartbeatRequest 

6135 name: A name for the operation (optional). 

6136 

6137 Returns: 

6138 A `Tensor` of type `string`. 

6139 """ 

6140 _ctx = _context._context or _context.context() 

6141 tld = _ctx._thread_local_data 

6142 if tld.is_eager: 

6143 try: 

6144 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6145 _ctx, "WorkerHeartbeat", name, request) 

6146 return _result 

6147 except _core._NotOkStatusException as e: 

6148 _ops.raise_from_not_ok_status(e, name) 

6149 except _core._FallbackException: 

6150 pass 

6151 try: 

6152 return worker_heartbeat_eager_fallback( 

6153 request, name=name, ctx=_ctx) 

6154 except _core._SymbolicException: 

6155 pass # Add nodes to the TensorFlow graph. 

6156 # Add nodes to the TensorFlow graph. 

6157 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6158 "WorkerHeartbeat", request=request, name=name) 

6159 _result = _outputs[:] 

6160 if _execute.must_record_gradient(): 

6161 _attrs = () 

6162 _inputs_flat = _op.inputs 

6163 _execute.record_gradient( 

6164 "WorkerHeartbeat", _inputs_flat, _attrs, _result) 

6165 _result, = _result 

6166 return _result 

6167 

6168WorkerHeartbeat = tf_export("raw_ops.WorkerHeartbeat")(_ops.to_raw_op(worker_heartbeat)) 

6169 

6170 

6171def worker_heartbeat_eager_fallback(request, name, ctx): 

6172 request = _ops.convert_to_tensor(request, _dtypes.string) 

6173 _inputs_flat = [request] 

6174 _attrs = None 

6175 _result = _execute.execute(b"WorkerHeartbeat", 1, inputs=_inputs_flat, 

6176 attrs=_attrs, ctx=ctx, name=name) 

6177 if _execute.must_record_gradient(): 

6178 _execute.record_gradient( 

6179 "WorkerHeartbeat", _inputs_flat, _attrs, _result) 

6180 _result, = _result 

6181 return _result 

6182 

6183 

6184def xla_concat_nd(inputs, num_concats, paddings=[], name=None): 

6185 r"""Concats input tensor across all dimensions. 

6186 

6187 An op which merges slices the input tensor based on the given num_splits 

6188 attribute, strips paddings optionally, and returns the merged tensor without 

6189 paddings. 

6190 

6191 This op may be generated via the TPU bridge. 

6192 

6193 For example, with `input` tensor: 

6194 ``` 

6195 [[0, 1], 

6196 [4, 5]] 

6197 [[2, 3], 

6198 [6, 7]] 

6199 [[8, 9], 

6200 [12, 13]] 

6201 [[10, 11], 

6202 [14, 15]] 

6203 ``` 

6204 `num_splits`: 

6205 ``` 

6206 [2, 2] 

6207 ``` 

6208 and `paddings`: 

6209 ``` 

6210 [1, 1] 

6211 ``` 

6212 the expected `outputs` is: 

6213 ``` 

6214 [[0, 1, 2], 

6215 [4, 5, 6], 

6216 [8, 9, 10]] 

6217 ``` 

6218 

6219 Args: 

6220 inputs: A list of at least 1 `Tensor` objects with the same type. 

6221 Input tensor slices in row-major order to merge across all dimensions. All 

6222 inputs must have the same shape. 

6223 } 

6224 out_arg { 

6225 name: "output" 

6226 description: <<END 

6227 Output tensor formed from merging input slices based on num_concats defined. 

6228 num_concats: A list of `ints`. Number of ways to merge per dimension. 

6229 paddings: An optional list of `ints`. Defaults to `[]`. 

6230 Optional list of right paddings per dimension to strip from the final merged 

6231 tensor. These paddings must not exceed the dimension size of the merged result 

6232 prior to stripping paddings. 

6233 name: A name for the operation (optional). 

6234 

6235 Returns: 

6236 A `Tensor`. Has the same type as `inputs`. 

6237 """ 

6238 _ctx = _context._context or _context.context() 

6239 tld = _ctx._thread_local_data 

6240 if tld.is_eager: 

6241 try: 

6242 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6243 _ctx, "XlaConcatND", name, inputs, "num_concats", num_concats, 

6244 "paddings", paddings) 

6245 return _result 

6246 except _core._NotOkStatusException as e: 

6247 _ops.raise_from_not_ok_status(e, name) 

6248 except _core._FallbackException: 

6249 pass 

6250 try: 

6251 return xla_concat_nd_eager_fallback( 

6252 inputs, num_concats=num_concats, paddings=paddings, name=name, 

6253 ctx=_ctx) 

6254 except _core._SymbolicException: 

6255 pass # Add nodes to the TensorFlow graph. 

6256 # Add nodes to the TensorFlow graph. 

6257 if not isinstance(inputs, (list, tuple)): 

6258 raise TypeError( 

6259 "Expected list for 'inputs' argument to " 

6260 "'xla_concat_nd' Op, not %r." % inputs) 

6261 _attr_N = len(inputs) 

6262 if not isinstance(num_concats, (list, tuple)): 

6263 raise TypeError( 

6264 "Expected list for 'num_concats' argument to " 

6265 "'xla_concat_nd' Op, not %r." % num_concats) 

6266 num_concats = [_execute.make_int(_i, "num_concats") for _i in num_concats] 

6267 if paddings is None: 

6268 paddings = [] 

6269 if not isinstance(paddings, (list, tuple)): 

6270 raise TypeError( 

6271 "Expected list for 'paddings' argument to " 

6272 "'xla_concat_nd' Op, not %r." % paddings) 

6273 paddings = [_execute.make_int(_i, "paddings") for _i in paddings] 

6274 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6275 "XlaConcatND", inputs=inputs, num_concats=num_concats, 

6276 paddings=paddings, name=name) 

6277 _result = _outputs[:] 

6278 if _execute.must_record_gradient(): 

6279 _attrs = ("T", _op._get_attr_type("T"), "N", _op._get_attr_int("N"), 

6280 "num_concats", _op.get_attr("num_concats"), "paddings", 

6281 _op.get_attr("paddings")) 

6282 _inputs_flat = _op.inputs 

6283 _execute.record_gradient( 

6284 "XlaConcatND", _inputs_flat, _attrs, _result) 

6285 _result, = _result 

6286 return _result 

6287 

6288XlaConcatND = tf_export("raw_ops.XlaConcatND")(_ops.to_raw_op(xla_concat_nd)) 

6289 

6290 

6291def xla_concat_nd_eager_fallback(inputs, num_concats, paddings, name, ctx): 

6292 if not isinstance(inputs, (list, tuple)): 

6293 raise TypeError( 

6294 "Expected list for 'inputs' argument to " 

6295 "'xla_concat_nd' Op, not %r." % inputs) 

6296 _attr_N = len(inputs) 

6297 if not isinstance(num_concats, (list, tuple)): 

6298 raise TypeError( 

6299 "Expected list for 'num_concats' argument to " 

6300 "'xla_concat_nd' Op, not %r." % num_concats) 

6301 num_concats = [_execute.make_int(_i, "num_concats") for _i in num_concats] 

6302 if paddings is None: 

6303 paddings = [] 

6304 if not isinstance(paddings, (list, tuple)): 

6305 raise TypeError( 

6306 "Expected list for 'paddings' argument to " 

6307 "'xla_concat_nd' Op, not %r." % paddings) 

6308 paddings = [_execute.make_int(_i, "paddings") for _i in paddings] 

6309 _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), ctx, []) 

6310 _inputs_flat = list(inputs) 

6311 _attrs = ("T", _attr_T, "N", _attr_N, "num_concats", num_concats, 

6312 "paddings", paddings) 

6313 _result = _execute.execute(b"XlaConcatND", 1, inputs=_inputs_flat, 

6314 attrs=_attrs, ctx=ctx, name=name) 

6315 if _execute.must_record_gradient(): 

6316 _execute.record_gradient( 

6317 "XlaConcatND", _inputs_flat, _attrs, _result) 

6318 _result, = _result 

6319 return _result 

6320 

6321 

6322def xla_split_nd(input, N, num_splits, paddings=[], name=None): 

6323 r"""Splits input tensor across all dimensions. 

6324 

6325 An op which slices the input tensor based on the given num_splits attribute, 

6326 pads slices optionally, and returned the slices. Slices are returned in 

6327 row-major order. 

6328 

6329 This op may be generated via the TPU bridge. 

6330 

6331 For example, with `input` tensor: 

6332 ``` 

6333 [[0, 1, 2], 

6334 [3, 4, 5], 

6335 [6, 7, 8]] 

6336 ``` 

6337 `num_splits`: 

6338 ``` 

6339 [2, 2] 

6340 ``` 

6341 and `paddings`: 

6342 ``` 

6343 [1, 1] 

6344 ``` 

6345 the expected `outputs` is: 

6346 ``` 

6347 [[0, 1], 

6348 [3, 4]] 

6349 [[2, 0], 

6350 [5, 0]] 

6351 [[6, 7], 

6352 [0, 0]] 

6353 [[8, 0], 

6354 [0, 0]] 

6355 ``` 

6356 

6357 Args: 

6358 input: A `Tensor`. Input tensor to split across all dimensions. 

6359 } 

6360 out_arg { 

6361 name: "outputs" 

6362 description: <<END 

6363 Output slices based on input and num_splits defined, in row-major order. 

6364 N: An `int` that is `>= 1`. 

6365 num_splits: A list of `ints`. 

6366 Number of ways to split per dimension. Shape dimensions must be evenly 

6367 divisible. 

6368 paddings: An optional list of `ints`. Defaults to `[]`. 

6369 Optional list of right paddings per dimension of input tensor to apply before 

6370 splitting. This can be used to make a dimension evenly divisible. 

6371 name: A name for the operation (optional). 

6372 

6373 Returns: 

6374 A list of `N` `Tensor` objects with the same type as `input`. 

6375 """ 

6376 _ctx = _context._context or _context.context() 

6377 tld = _ctx._thread_local_data 

6378 if tld.is_eager: 

6379 try: 

6380 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6381 _ctx, "XlaSplitND", name, input, "N", N, "num_splits", num_splits, 

6382 "paddings", paddings) 

6383 return _result 

6384 except _core._NotOkStatusException as e: 

6385 _ops.raise_from_not_ok_status(e, name) 

6386 except _core._FallbackException: 

6387 pass 

6388 try: 

6389 return xla_split_nd_eager_fallback( 

6390 input, N=N, num_splits=num_splits, paddings=paddings, name=name, 

6391 ctx=_ctx) 

6392 except _core._SymbolicException: 

6393 pass # Add nodes to the TensorFlow graph. 

6394 # Add nodes to the TensorFlow graph. 

6395 N = _execute.make_int(N, "N") 

6396 if not isinstance(num_splits, (list, tuple)): 

6397 raise TypeError( 

6398 "Expected list for 'num_splits' argument to " 

6399 "'xla_split_nd' Op, not %r." % num_splits) 

6400 num_splits = [_execute.make_int(_i, "num_splits") for _i in num_splits] 

6401 if paddings is None: 

6402 paddings = [] 

6403 if not isinstance(paddings, (list, tuple)): 

6404 raise TypeError( 

6405 "Expected list for 'paddings' argument to " 

6406 "'xla_split_nd' Op, not %r." % paddings) 

6407 paddings = [_execute.make_int(_i, "paddings") for _i in paddings] 

6408 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6409 "XlaSplitND", input=input, N=N, num_splits=num_splits, 

6410 paddings=paddings, name=name) 

6411 _result = _outputs[:] 

6412 if _execute.must_record_gradient(): 

6413 _attrs = ("T", _op._get_attr_type("T"), "N", _op._get_attr_int("N"), 

6414 "num_splits", _op.get_attr("num_splits"), "paddings", 

6415 _op.get_attr("paddings")) 

6416 _inputs_flat = _op.inputs 

6417 _execute.record_gradient( 

6418 "XlaSplitND", _inputs_flat, _attrs, _result) 

6419 return _result 

6420 

6421XlaSplitND = tf_export("raw_ops.XlaSplitND")(_ops.to_raw_op(xla_split_nd)) 

6422 

6423 

6424def xla_split_nd_eager_fallback(input, N, num_splits, paddings, name, ctx): 

6425 N = _execute.make_int(N, "N") 

6426 if not isinstance(num_splits, (list, tuple)): 

6427 raise TypeError( 

6428 "Expected list for 'num_splits' argument to " 

6429 "'xla_split_nd' Op, not %r." % num_splits) 

6430 num_splits = [_execute.make_int(_i, "num_splits") for _i in num_splits] 

6431 if paddings is None: 

6432 paddings = [] 

6433 if not isinstance(paddings, (list, tuple)): 

6434 raise TypeError( 

6435 "Expected list for 'paddings' argument to " 

6436 "'xla_split_nd' Op, not %r." % paddings) 

6437 paddings = [_execute.make_int(_i, "paddings") for _i in paddings] 

6438 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, []) 

6439 _inputs_flat = [input] 

6440 _attrs = ("T", _attr_T, "N", N, "num_splits", num_splits, "paddings", 

6441 paddings) 

6442 _result = _execute.execute(b"XlaSplitND", N, inputs=_inputs_flat, 

6443 attrs=_attrs, ctx=ctx, name=name) 

6444 if _execute.must_record_gradient(): 

6445 _execute.record_gradient( 

6446 "XlaSplitND", _inputs_flat, _attrs, _result) 

6447 return _result 

6448