Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_cudnn_rnn_ops.py: 6%

923 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1"""Python wrappers around TensorFlow ops. 

2 

3This file is MACHINE GENERATED! Do not edit. 

4""" 

5 

6import collections 

7 

8from tensorflow.python import pywrap_tfe as pywrap_tfe 

9from tensorflow.python.eager import context as _context 

10from tensorflow.python.eager import core as _core 

11from tensorflow.python.eager import execute as _execute 

12from tensorflow.python.framework import dtypes as _dtypes 

13from tensorflow.security.fuzzing.py import annotation_types as _atypes 

14 

15from tensorflow.python.framework import op_def_registry as _op_def_registry 

16from tensorflow.python.framework import ops as _ops 

17from tensorflow.python.framework import op_def_library as _op_def_library 

18from tensorflow.python.util.deprecation import deprecated_endpoints 

19from tensorflow.python.util import dispatch as _dispatch 

20from tensorflow.python.util.tf_export import tf_export 

21 

22from typing import TypeVar 

23_CudnnRNNOutput = collections.namedtuple( 

24 "CudnnRNN", 

25 ["output", "output_h", "output_c", "reserve_space"]) 

26 

27 

28def cudnn_rnn(input, input_h, input_c, params, rnn_mode="lstm", input_mode="linear_input", direction="unidirectional", dropout=0, seed=0, seed2=0, is_training=True, name=None): 

29 r"""A RNN backed by cuDNN. 

30 

31 Computes the RNN from the input and initial states, with respect to the params 

32 buffer. 

33 

34 rnn_mode: Indicates the type of the RNN model. 

35 input_mode: Indicate whether there is a linear projection between the input and 

36 the actual computation before the first layer. 'skip_input' is only allowed 

37 when input_size == num_units; 'auto_select' implies 'skip_input' when 

38 input_size == num_units; otherwise, it implies 'linear_input'. 

39 direction: Indicates whether a bidirectional model will be used. Should be 

40 "unidirectional" or "bidirectional". 

41 dropout: Dropout probability. When set to 0., dropout is disabled. 

42 seed: The 1st part of a seed to initialize dropout. 

43 seed2: The 2nd part of a seed to initialize dropout. 

44 input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. 

45 input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, 

46 num_units]. 

47 input_c: For LSTM, a 3-D tensor with the shape of 

48 [num_layer * dir, batch, num_units]. For other models, it is ignored. 

49 params: A 1-D tensor that contains the weights and biases in an opaque layout. 

50 The size must be created through CudnnRNNParamsSize, and initialized 

51 separately. Note that they might not be compatible across different 

52 generations. So it is a good idea to save and restore 

53 output: A 3-D tensor with the shape of [seq_length, batch_size, 

54 dir * num_units]. 

55 output_h: The same shape has input_h. 

56 output_c: The same shape as input_c for LSTM. An empty tensor for other models. 

57 is_training: Indicates whether this operation is used for inference or 

58 training. 

59 reserve_space: An opaque tensor that can be used in backprop calculation. It 

60 is only produced if is_training is false. 

61 

62 Args: 

63 input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. 

64 input_h: A `Tensor`. Must have the same type as `input`. 

65 input_c: A `Tensor`. Must have the same type as `input`. 

66 params: A `Tensor`. Must have the same type as `input`. 

67 rnn_mode: An optional `string` from: `"rnn_relu", "rnn_tanh", "lstm", "gru"`. Defaults to `"lstm"`. 

68 input_mode: An optional `string` from: `"linear_input", "skip_input", "auto_select"`. Defaults to `"linear_input"`. 

69 direction: An optional `string` from: `"unidirectional", "bidirectional"`. Defaults to `"unidirectional"`. 

70 dropout: An optional `float`. Defaults to `0`. 

71 seed: An optional `int`. Defaults to `0`. 

72 seed2: An optional `int`. Defaults to `0`. 

73 is_training: An optional `bool`. Defaults to `True`. 

74 name: A name for the operation (optional). 

75 

76 Returns: 

77 A tuple of `Tensor` objects (output, output_h, output_c, reserve_space). 

78 

79 output: A `Tensor`. Has the same type as `input`. 

80 output_h: A `Tensor`. Has the same type as `input`. 

81 output_c: A `Tensor`. Has the same type as `input`. 

82 reserve_space: A `Tensor`. Has the same type as `input`. 

83 """ 

84 _ctx = _context._context or _context.context() 

85 tld = _ctx._thread_local_data 

86 if tld.is_eager: 

87 try: 

88 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

89 _ctx, "CudnnRNN", name, input, input_h, input_c, params, "rnn_mode", 

90 rnn_mode, "input_mode", input_mode, "direction", direction, "dropout", 

91 dropout, "seed", seed, "seed2", seed2, "is_training", is_training) 

92 _result = _CudnnRNNOutput._make(_result) 

93 return _result 

94 except _core._NotOkStatusException as e: 

95 _ops.raise_from_not_ok_status(e, name) 

96 except _core._FallbackException: 

97 pass 

98 try: 

99 return cudnn_rnn_eager_fallback( 

100 input, input_h, input_c, params, rnn_mode=rnn_mode, 

101 input_mode=input_mode, direction=direction, dropout=dropout, 

102 seed=seed, seed2=seed2, is_training=is_training, name=name, 

103 ctx=_ctx) 

104 except _core._SymbolicException: 

105 pass # Add nodes to the TensorFlow graph. 

106 # Add nodes to the TensorFlow graph. 

107 if rnn_mode is None: 

108 rnn_mode = "lstm" 

109 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

110 if input_mode is None: 

111 input_mode = "linear_input" 

112 input_mode = _execute.make_str(input_mode, "input_mode") 

113 if direction is None: 

114 direction = "unidirectional" 

115 direction = _execute.make_str(direction, "direction") 

116 if dropout is None: 

117 dropout = 0 

118 dropout = _execute.make_float(dropout, "dropout") 

119 if seed is None: 

120 seed = 0 

121 seed = _execute.make_int(seed, "seed") 

122 if seed2 is None: 

123 seed2 = 0 

124 seed2 = _execute.make_int(seed2, "seed2") 

125 if is_training is None: 

126 is_training = True 

127 is_training = _execute.make_bool(is_training, "is_training") 

128 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

129 "CudnnRNN", input=input, input_h=input_h, input_c=input_c, 

130 params=params, rnn_mode=rnn_mode, input_mode=input_mode, 

131 direction=direction, dropout=dropout, seed=seed, 

132 seed2=seed2, is_training=is_training, name=name) 

133 _result = _outputs[:] 

134 if _execute.must_record_gradient(): 

135 _attrs = ("T", _op._get_attr_type("T"), "rnn_mode", 

136 _op.get_attr("rnn_mode"), "input_mode", 

137 _op.get_attr("input_mode"), "direction", 

138 _op.get_attr("direction"), "dropout", _op.get_attr("dropout"), 

139 "seed", _op._get_attr_int("seed"), "seed2", 

140 _op._get_attr_int("seed2"), "is_training", 

141 _op._get_attr_bool("is_training")) 

142 _inputs_flat = _op.inputs 

143 _execute.record_gradient( 

144 "CudnnRNN", _inputs_flat, _attrs, _result) 

145 _result = _CudnnRNNOutput._make(_result) 

146 return _result 

147 

148CudnnRNN = tf_export("raw_ops.CudnnRNN")(_ops.to_raw_op(cudnn_rnn)) 

149 

150 

151def cudnn_rnn_eager_fallback(input, input_h, input_c, params, rnn_mode, input_mode, direction, dropout, seed, seed2, is_training, name, ctx): 

152 if rnn_mode is None: 

153 rnn_mode = "lstm" 

154 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

155 if input_mode is None: 

156 input_mode = "linear_input" 

157 input_mode = _execute.make_str(input_mode, "input_mode") 

158 if direction is None: 

159 direction = "unidirectional" 

160 direction = _execute.make_str(direction, "direction") 

161 if dropout is None: 

162 dropout = 0 

163 dropout = _execute.make_float(dropout, "dropout") 

164 if seed is None: 

165 seed = 0 

166 seed = _execute.make_int(seed, "seed") 

167 if seed2 is None: 

168 seed2 = 0 

169 seed2 = _execute.make_int(seed2, "seed2") 

170 if is_training is None: 

171 is_training = True 

172 is_training = _execute.make_bool(is_training, "is_training") 

173 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_h, input_c, params], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

174 (input, input_h, input_c, params) = _inputs_T 

175 _inputs_flat = [input, input_h, input_c, params] 

176 _attrs = ("T", _attr_T, "rnn_mode", rnn_mode, "input_mode", input_mode, 

177 "direction", direction, "dropout", dropout, "seed", seed, "seed2", seed2, 

178 "is_training", is_training) 

179 _result = _execute.execute(b"CudnnRNN", 4, inputs=_inputs_flat, 

180 attrs=_attrs, ctx=ctx, name=name) 

181 if _execute.must_record_gradient(): 

182 _execute.record_gradient( 

183 "CudnnRNN", _inputs_flat, _attrs, _result) 

184 _result = _CudnnRNNOutput._make(_result) 

185 return _result 

186 

187_CudnnRNNBackpropOutput = collections.namedtuple( 

188 "CudnnRNNBackprop", 

189 ["input_backprop", "input_h_backprop", "input_c_backprop", "params_backprop"]) 

190 

191 

192def cudnn_rnn_backprop(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, rnn_mode="lstm", input_mode="linear_input", direction="unidirectional", dropout=0, seed=0, seed2=0, name=None): 

193 r"""Backprop step of CudnnRNN. 

194 

195 Compute the backprop of both data and weights in a RNN. 

196 

197 rnn_mode: Indicates the type of the RNN model. 

198 input_mode: Indicate whether there is a linear projection between the input and 

199 the actual computation before the first layer. 'skip_input' is only allowed 

200 when input_size == num_units; 'auto_select' implies 'skip_input' when 

201 input_size == num_units; otherwise, it implies 'linear_input'. 

202 direction: Indicates whether a bidirectional model will be used. Should be 

203 "unidirectional" or "bidirectional". 

204 dropout: Dropout probability. When set to 0., dropout is disabled. 

205 seed: The 1st part of a seed to initialize dropout. 

206 seed2: The 2nd part of a seed to initialize dropout. 

207 input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. 

208 input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, 

209 num_units]. 

210 input_c: For LSTM, a 3-D tensor with the shape of 

211 [num_layer * dir, batch, num_units]. For other models, it is ignored. 

212 params: A 1-D tensor that contains the weights and biases in an opaque layout. 

213 The size must be created through CudnnRNNParamsSize, and initialized 

214 separately. Note that they might not be compatible across different 

215 generations. So it is a good idea to save and restore 

216 output: A 3-D tensor with the shape of [seq_length, batch_size, 

217 dir * num_units]. 

218 output_h: The same shape has input_h. 

219 output_c: The same shape as input_c for LSTM. An empty tensor for other models. 

220 output_backprop: A 3-D tensor with the same shape as output in the forward pass. 

221 output_h_backprop: A 3-D tensor with the same shape as output_h in the forward 

222 pass. 

223 output_c_backprop: A 3-D tensor with the same shape as output_c in the forward 

224 pass. 

225 reserve_space: The same reserve_space produced in for forward operation. 

226 input_backprop: The backprop to input in the forward pass. Has the same shape 

227 as input. 

228 input_h_backprop: The backprop to input_h in the forward pass. Has the same 

229 shape as input_h. 

230 input_c_backprop: The backprop to input_c in the forward pass. Has the same 

231 shape as input_c. 

232 params_backprop: The backprop to the params buffer in the forward pass. Has the 

233 same shape as params. 

234 

235 Args: 

236 input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. 

237 input_h: A `Tensor`. Must have the same type as `input`. 

238 input_c: A `Tensor`. Must have the same type as `input`. 

239 params: A `Tensor`. Must have the same type as `input`. 

240 output: A `Tensor`. Must have the same type as `input`. 

241 output_h: A `Tensor`. Must have the same type as `input`. 

242 output_c: A `Tensor`. Must have the same type as `input`. 

243 output_backprop: A `Tensor`. Must have the same type as `input`. 

244 output_h_backprop: A `Tensor`. Must have the same type as `input`. 

245 output_c_backprop: A `Tensor`. Must have the same type as `input`. 

246 reserve_space: A `Tensor`. Must have the same type as `input`. 

247 rnn_mode: An optional `string` from: `"rnn_relu", "rnn_tanh", "lstm", "gru"`. Defaults to `"lstm"`. 

248 input_mode: An optional `string` from: `"linear_input", "skip_input", "auto_select"`. Defaults to `"linear_input"`. 

249 direction: An optional `string` from: `"unidirectional", "bidirectional"`. Defaults to `"unidirectional"`. 

250 dropout: An optional `float`. Defaults to `0`. 

251 seed: An optional `int`. Defaults to `0`. 

252 seed2: An optional `int`. Defaults to `0`. 

253 name: A name for the operation (optional). 

254 

255 Returns: 

256 A tuple of `Tensor` objects (input_backprop, input_h_backprop, input_c_backprop, params_backprop). 

257 

258 input_backprop: A `Tensor`. Has the same type as `input`. 

259 input_h_backprop: A `Tensor`. Has the same type as `input`. 

260 input_c_backprop: A `Tensor`. Has the same type as `input`. 

261 params_backprop: A `Tensor`. Has the same type as `input`. 

262 """ 

263 _ctx = _context._context or _context.context() 

264 tld = _ctx._thread_local_data 

265 if tld.is_eager: 

266 try: 

267 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

268 _ctx, "CudnnRNNBackprop", name, input, input_h, input_c, params, 

269 output, output_h, output_c, output_backprop, output_h_backprop, 

270 output_c_backprop, reserve_space, "rnn_mode", rnn_mode, "input_mode", 

271 input_mode, "direction", direction, "dropout", dropout, "seed", seed, 

272 "seed2", seed2) 

273 _result = _CudnnRNNBackpropOutput._make(_result) 

274 return _result 

275 except _core._NotOkStatusException as e: 

276 _ops.raise_from_not_ok_status(e, name) 

277 except _core._FallbackException: 

278 pass 

279 try: 

280 return cudnn_rnn_backprop_eager_fallback( 

281 input, input_h, input_c, params, output, output_h, output_c, 

282 output_backprop, output_h_backprop, output_c_backprop, 

283 reserve_space, rnn_mode=rnn_mode, input_mode=input_mode, 

284 direction=direction, dropout=dropout, seed=seed, seed2=seed2, 

285 name=name, ctx=_ctx) 

286 except _core._SymbolicException: 

287 pass # Add nodes to the TensorFlow graph. 

288 # Add nodes to the TensorFlow graph. 

289 if rnn_mode is None: 

290 rnn_mode = "lstm" 

291 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

292 if input_mode is None: 

293 input_mode = "linear_input" 

294 input_mode = _execute.make_str(input_mode, "input_mode") 

295 if direction is None: 

296 direction = "unidirectional" 

297 direction = _execute.make_str(direction, "direction") 

298 if dropout is None: 

299 dropout = 0 

300 dropout = _execute.make_float(dropout, "dropout") 

301 if seed is None: 

302 seed = 0 

303 seed = _execute.make_int(seed, "seed") 

304 if seed2 is None: 

305 seed2 = 0 

306 seed2 = _execute.make_int(seed2, "seed2") 

307 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

308 "CudnnRNNBackprop", input=input, input_h=input_h, input_c=input_c, 

309 params=params, output=output, output_h=output_h, 

310 output_c=output_c, 

311 output_backprop=output_backprop, 

312 output_h_backprop=output_h_backprop, 

313 output_c_backprop=output_c_backprop, 

314 reserve_space=reserve_space, rnn_mode=rnn_mode, 

315 input_mode=input_mode, direction=direction, 

316 dropout=dropout, seed=seed, seed2=seed2, 

317 name=name) 

318 _result = _outputs[:] 

319 if _execute.must_record_gradient(): 

320 _attrs = ("T", _op._get_attr_type("T"), "rnn_mode", 

321 _op.get_attr("rnn_mode"), "input_mode", 

322 _op.get_attr("input_mode"), "direction", 

323 _op.get_attr("direction"), "dropout", _op.get_attr("dropout"), 

324 "seed", _op._get_attr_int("seed"), "seed2", 

325 _op._get_attr_int("seed2")) 

326 _inputs_flat = _op.inputs 

327 _execute.record_gradient( 

328 "CudnnRNNBackprop", _inputs_flat, _attrs, _result) 

329 _result = _CudnnRNNBackpropOutput._make(_result) 

330 return _result 

331 

332CudnnRNNBackprop = tf_export("raw_ops.CudnnRNNBackprop")(_ops.to_raw_op(cudnn_rnn_backprop)) 

333 

334 

335def cudnn_rnn_backprop_eager_fallback(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, rnn_mode, input_mode, direction, dropout, seed, seed2, name, ctx): 

336 if rnn_mode is None: 

337 rnn_mode = "lstm" 

338 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

339 if input_mode is None: 

340 input_mode = "linear_input" 

341 input_mode = _execute.make_str(input_mode, "input_mode") 

342 if direction is None: 

343 direction = "unidirectional" 

344 direction = _execute.make_str(direction, "direction") 

345 if dropout is None: 

346 dropout = 0 

347 dropout = _execute.make_float(dropout, "dropout") 

348 if seed is None: 

349 seed = 0 

350 seed = _execute.make_int(seed, "seed") 

351 if seed2 is None: 

352 seed2 = 0 

353 seed2 = _execute.make_int(seed2, "seed2") 

354 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

355 (input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space) = _inputs_T 

356 _inputs_flat = [input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space] 

357 _attrs = ("T", _attr_T, "rnn_mode", rnn_mode, "input_mode", input_mode, 

358 "direction", direction, "dropout", dropout, "seed", seed, "seed2", seed2) 

359 _result = _execute.execute(b"CudnnRNNBackprop", 4, inputs=_inputs_flat, 

360 attrs=_attrs, ctx=ctx, name=name) 

361 if _execute.must_record_gradient(): 

362 _execute.record_gradient( 

363 "CudnnRNNBackprop", _inputs_flat, _attrs, _result) 

364 _result = _CudnnRNNBackpropOutput._make(_result) 

365 return _result 

366 

367_CudnnRNNBackpropV2Output = collections.namedtuple( 

368 "CudnnRNNBackpropV2", 

369 ["input_backprop", "input_h_backprop", "input_c_backprop", "params_backprop"]) 

370 

371 

372def cudnn_rnn_backprop_v2(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved, rnn_mode="lstm", input_mode="linear_input", direction="unidirectional", dropout=0, seed=0, seed2=0, name=None): 

373 r"""Backprop step of CudnnRNN. 

374 

375 Compute the backprop of both data and weights in a RNN. Takes an extra 

376 "host_reserved" inupt than CudnnRNNBackprop, which is used to determine RNN 

377 cudnnRNNAlgo_t and cudnnMathType_t. 

378 

379 rnn_mode: Indicates the type of the RNN model. 

380 input_mode: Indicates whether there is a linear projection between the input and 

381 the actual computation before the first layer. 'skip_input' is only allowed 

382 when input_size == num_units; 'auto_select' implies 'skip_input' when 

383 input_size == num_units; otherwise, it implies 'linear_input'. 

384 direction: Indicates whether a bidirectional model will be used. Should be 

385 "unidirectional" or "bidirectional". 

386 dropout: Dropout probability. When set to 0., dropout is disabled. 

387 seed: The 1st part of a seed to initialize dropout. 

388 seed2: The 2nd part of a seed to initialize dropout. 

389 input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. 

390 input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, 

391 num_units]. 

392 input_c: For LSTM, a 3-D tensor with the shape of 

393 [num_layer * dir, batch, num_units]. For other models, it is ignored. 

394 params: A 1-D tensor that contains the weights and biases in an opaque layout. 

395 The size must be created through CudnnRNNParamsSize, and initialized 

396 separately. Note that they might not be compatible across different 

397 generations. So it is a good idea to save and restore 

398 output: A 3-D tensor with the shape of [seq_length, batch_size, 

399 dir * num_units]. 

400 output_h: The same shape has input_h. 

401 output_c: The same shape as input_c for LSTM. An empty tensor for other models. 

402 output_backprop: A 3-D tensor with the same shape as output in the forward pass. 

403 output_h_backprop: A 3-D tensor with the same shape as output_h in the forward 

404 pass. 

405 output_c_backprop: A 3-D tensor with the same shape as output_c in the forward 

406 pass. 

407 reserve_space: The same reserve_space produced in the forward operation. 

408 host_reserved: The same host_reserved produced in the forward operation. 

409 input_backprop: The backprop to input in the forward pass. Has the same shape 

410 as input. 

411 input_h_backprop: The backprop to input_h in the forward pass. Has the same 

412 shape as input_h. 

413 input_c_backprop: The backprop to input_c in the forward pass. Has the same 

414 shape as input_c. 

415 params_backprop: The backprop to the params buffer in the forward pass. Has the 

416 same shape as params. 

417 

418 Args: 

419 input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. 

420 input_h: A `Tensor`. Must have the same type as `input`. 

421 input_c: A `Tensor`. Must have the same type as `input`. 

422 params: A `Tensor`. Must have the same type as `input`. 

423 output: A `Tensor`. Must have the same type as `input`. 

424 output_h: A `Tensor`. Must have the same type as `input`. 

425 output_c: A `Tensor`. Must have the same type as `input`. 

426 output_backprop: A `Tensor`. Must have the same type as `input`. 

427 output_h_backprop: A `Tensor`. Must have the same type as `input`. 

428 output_c_backprop: A `Tensor`. Must have the same type as `input`. 

429 reserve_space: A `Tensor`. Must have the same type as `input`. 

430 host_reserved: A `Tensor` of type `int8`. 

431 rnn_mode: An optional `string` from: `"rnn_relu", "rnn_tanh", "lstm", "gru"`. Defaults to `"lstm"`. 

432 input_mode: An optional `string` from: `"linear_input", "skip_input", "auto_select"`. Defaults to `"linear_input"`. 

433 direction: An optional `string` from: `"unidirectional", "bidirectional"`. Defaults to `"unidirectional"`. 

434 dropout: An optional `float`. Defaults to `0`. 

435 seed: An optional `int`. Defaults to `0`. 

436 seed2: An optional `int`. Defaults to `0`. 

437 name: A name for the operation (optional). 

438 

439 Returns: 

440 A tuple of `Tensor` objects (input_backprop, input_h_backprop, input_c_backprop, params_backprop). 

441 

442 input_backprop: A `Tensor`. Has the same type as `input`. 

443 input_h_backprop: A `Tensor`. Has the same type as `input`. 

444 input_c_backprop: A `Tensor`. Has the same type as `input`. 

445 params_backprop: A `Tensor`. Has the same type as `input`. 

446 """ 

447 _ctx = _context._context or _context.context() 

448 tld = _ctx._thread_local_data 

449 if tld.is_eager: 

450 try: 

451 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

452 _ctx, "CudnnRNNBackpropV2", name, input, input_h, input_c, params, 

453 output, output_h, output_c, output_backprop, output_h_backprop, 

454 output_c_backprop, reserve_space, host_reserved, "rnn_mode", rnn_mode, 

455 "input_mode", input_mode, "direction", direction, "dropout", dropout, 

456 "seed", seed, "seed2", seed2) 

457 _result = _CudnnRNNBackpropV2Output._make(_result) 

458 return _result 

459 except _core._NotOkStatusException as e: 

460 _ops.raise_from_not_ok_status(e, name) 

461 except _core._FallbackException: 

462 pass 

463 try: 

464 return cudnn_rnn_backprop_v2_eager_fallback( 

465 input, input_h, input_c, params, output, output_h, output_c, 

466 output_backprop, output_h_backprop, output_c_backprop, 

467 reserve_space, host_reserved, rnn_mode=rnn_mode, 

468 input_mode=input_mode, direction=direction, dropout=dropout, 

469 seed=seed, seed2=seed2, name=name, ctx=_ctx) 

470 except _core._SymbolicException: 

471 pass # Add nodes to the TensorFlow graph. 

472 # Add nodes to the TensorFlow graph. 

473 if rnn_mode is None: 

474 rnn_mode = "lstm" 

475 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

476 if input_mode is None: 

477 input_mode = "linear_input" 

478 input_mode = _execute.make_str(input_mode, "input_mode") 

479 if direction is None: 

480 direction = "unidirectional" 

481 direction = _execute.make_str(direction, "direction") 

482 if dropout is None: 

483 dropout = 0 

484 dropout = _execute.make_float(dropout, "dropout") 

485 if seed is None: 

486 seed = 0 

487 seed = _execute.make_int(seed, "seed") 

488 if seed2 is None: 

489 seed2 = 0 

490 seed2 = _execute.make_int(seed2, "seed2") 

491 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

492 "CudnnRNNBackpropV2", input=input, input_h=input_h, input_c=input_c, 

493 params=params, output=output, output_h=output_h, 

494 output_c=output_c, 

495 output_backprop=output_backprop, 

496 output_h_backprop=output_h_backprop, 

497 output_c_backprop=output_c_backprop, 

498 reserve_space=reserve_space, 

499 host_reserved=host_reserved, rnn_mode=rnn_mode, 

500 input_mode=input_mode, direction=direction, 

501 dropout=dropout, seed=seed, seed2=seed2, 

502 name=name) 

503 _result = _outputs[:] 

504 if _execute.must_record_gradient(): 

505 _attrs = ("T", _op._get_attr_type("T"), "rnn_mode", 

506 _op.get_attr("rnn_mode"), "input_mode", 

507 _op.get_attr("input_mode"), "direction", 

508 _op.get_attr("direction"), "dropout", _op.get_attr("dropout"), 

509 "seed", _op._get_attr_int("seed"), "seed2", 

510 _op._get_attr_int("seed2")) 

511 _inputs_flat = _op.inputs 

512 _execute.record_gradient( 

513 "CudnnRNNBackpropV2", _inputs_flat, _attrs, _result) 

514 _result = _CudnnRNNBackpropV2Output._make(_result) 

515 return _result 

516 

517CudnnRNNBackpropV2 = tf_export("raw_ops.CudnnRNNBackpropV2")(_ops.to_raw_op(cudnn_rnn_backprop_v2)) 

518 

519 

520def cudnn_rnn_backprop_v2_eager_fallback(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved, rnn_mode, input_mode, direction, dropout, seed, seed2, name, ctx): 

521 if rnn_mode is None: 

522 rnn_mode = "lstm" 

523 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

524 if input_mode is None: 

525 input_mode = "linear_input" 

526 input_mode = _execute.make_str(input_mode, "input_mode") 

527 if direction is None: 

528 direction = "unidirectional" 

529 direction = _execute.make_str(direction, "direction") 

530 if dropout is None: 

531 dropout = 0 

532 dropout = _execute.make_float(dropout, "dropout") 

533 if seed is None: 

534 seed = 0 

535 seed = _execute.make_int(seed, "seed") 

536 if seed2 is None: 

537 seed2 = 0 

538 seed2 = _execute.make_int(seed2, "seed2") 

539 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

540 (input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space) = _inputs_T 

541 host_reserved = _ops.convert_to_tensor(host_reserved, _dtypes.int8) 

542 _inputs_flat = [input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved] 

543 _attrs = ("T", _attr_T, "rnn_mode", rnn_mode, "input_mode", input_mode, 

544 "direction", direction, "dropout", dropout, "seed", seed, "seed2", seed2) 

545 _result = _execute.execute(b"CudnnRNNBackpropV2", 4, inputs=_inputs_flat, 

546 attrs=_attrs, ctx=ctx, name=name) 

547 if _execute.must_record_gradient(): 

548 _execute.record_gradient( 

549 "CudnnRNNBackpropV2", _inputs_flat, _attrs, _result) 

550 _result = _CudnnRNNBackpropV2Output._make(_result) 

551 return _result 

552 

553_CudnnRNNBackpropV3Output = collections.namedtuple( 

554 "CudnnRNNBackpropV3", 

555 ["input_backprop", "input_h_backprop", "input_c_backprop", "params_backprop"]) 

556 

557 

558def cudnn_rnn_backprop_v3(input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved, rnn_mode="lstm", input_mode="linear_input", direction="unidirectional", dropout=0, seed=0, seed2=0, num_proj=0, time_major=True, name=None): 

559 r"""Backprop step of CudnnRNNV3. 

560 

561 Compute the backprop of both data and weights in a RNN. Takes an extra 

562 "sequence_lengths" input than CudnnRNNBackprop. 

563 

564 rnn_mode: Indicates the type of the RNN model. 

565 input_mode: Indicates whether there is a linear projection between the input and 

566 the actual computation before the first layer. 'skip_input' is only allowed 

567 when input_size == num_units; 'auto_select' implies 'skip_input' when 

568 input_size == num_units; otherwise, it implies 'linear_input'. 

569 direction: Indicates whether a bidirectional model will be used. Should be 

570 "unidirectional" or "bidirectional". 

571 dropout: Dropout probability. When set to 0., dropout is disabled. 

572 seed: The 1st part of a seed to initialize dropout. 

573 seed2: The 2nd part of a seed to initialize dropout. 

574 input: If time_major is true, this is a 3-D tensor with the shape of 

575 [seq_length, batch_size, input_size]. If time_major is false, the shape is 

576 [batch_size, seq_length, input_size]. 

577 input_h: If time_major is true, this is a 3-D tensor with the shape of 

578 [num_layer * dir, batch_size, num_units]. If time_major is false, the shape 

579 is [batch_size, num_layer * dir, num_units]. 

580 input_c: For LSTM, a 3-D tensor with the shape of 

581 [num_layer * dir, batch, num_units]. For other models, it is ignored. 

582 params: A 1-D tensor that contains the weights and biases in an opaque layout. 

583 The size must be created through CudnnRNNParamsSize, and initialized 

584 separately. Note that they might not be compatible across different 

585 generations. So it is a good idea to save and restore 

586 sequence_lengths: a vector of lengths of each input sequence. 

587 output: If time_major is true, this is a 3-D tensor with the shape of 

588 [seq_length, batch_size, dir * num_units]. If time_major is false, the 

589 shape is [batch_size, seq_length, dir * num_units]. 

590 output_h: The same shape has input_h. 

591 output_c: The same shape as input_c for LSTM. An empty tensor for other models. 

592 output_backprop: A 3-D tensor with the same shape as output in the forward pass. 

593 output_h_backprop: A 3-D tensor with the same shape as output_h in the forward 

594 pass. 

595 output_c_backprop: A 3-D tensor with the same shape as output_c in the forward 

596 pass. 

597 time_major: Indicates whether the input/output format is time major or batch 

598 major. 

599 reserve_space: The same reserve_space produced in the forward operation. 

600 input_backprop: The backprop to input in the forward pass. Has the same shape 

601 as input. 

602 input_h_backprop: The backprop to input_h in the forward pass. Has the same 

603 shape as input_h. 

604 input_c_backprop: The backprop to input_c in the forward pass. Has the same 

605 shape as input_c. 

606 params_backprop: The backprop to the params buffer in the forward pass. Has the 

607 same shape as params. 

608 

609 Args: 

610 input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. 

611 input_h: A `Tensor`. Must have the same type as `input`. 

612 input_c: A `Tensor`. Must have the same type as `input`. 

613 params: A `Tensor`. Must have the same type as `input`. 

614 sequence_lengths: A `Tensor` of type `int32`. 

615 output: A `Tensor`. Must have the same type as `input`. 

616 output_h: A `Tensor`. Must have the same type as `input`. 

617 output_c: A `Tensor`. Must have the same type as `input`. 

618 output_backprop: A `Tensor`. Must have the same type as `input`. 

619 output_h_backprop: A `Tensor`. Must have the same type as `input`. 

620 output_c_backprop: A `Tensor`. Must have the same type as `input`. 

621 reserve_space: A `Tensor`. Must have the same type as `input`. 

622 host_reserved: A `Tensor` of type `int8`. 

623 rnn_mode: An optional `string` from: `"rnn_relu", "rnn_tanh", "lstm", "gru"`. Defaults to `"lstm"`. 

624 input_mode: An optional `string` from: `"linear_input", "skip_input", "auto_select"`. Defaults to `"linear_input"`. 

625 direction: An optional `string` from: `"unidirectional", "bidirectional"`. Defaults to `"unidirectional"`. 

626 dropout: An optional `float`. Defaults to `0`. 

627 seed: An optional `int`. Defaults to `0`. 

628 seed2: An optional `int`. Defaults to `0`. 

629 num_proj: An optional `int`. Defaults to `0`. 

630 time_major: An optional `bool`. Defaults to `True`. 

631 name: A name for the operation (optional). 

632 

633 Returns: 

634 A tuple of `Tensor` objects (input_backprop, input_h_backprop, input_c_backprop, params_backprop). 

635 

636 input_backprop: A `Tensor`. Has the same type as `input`. 

637 input_h_backprop: A `Tensor`. Has the same type as `input`. 

638 input_c_backprop: A `Tensor`. Has the same type as `input`. 

639 params_backprop: A `Tensor`. Has the same type as `input`. 

640 """ 

641 _ctx = _context._context or _context.context() 

642 tld = _ctx._thread_local_data 

643 if tld.is_eager: 

644 try: 

645 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

646 _ctx, "CudnnRNNBackpropV3", name, input, input_h, input_c, params, 

647 sequence_lengths, output, output_h, output_c, output_backprop, 

648 output_h_backprop, output_c_backprop, reserve_space, host_reserved, 

649 "rnn_mode", rnn_mode, "input_mode", input_mode, "direction", 

650 direction, "dropout", dropout, "seed", seed, "seed2", seed2, 

651 "num_proj", num_proj, "time_major", time_major) 

652 _result = _CudnnRNNBackpropV3Output._make(_result) 

653 return _result 

654 except _core._NotOkStatusException as e: 

655 _ops.raise_from_not_ok_status(e, name) 

656 except _core._FallbackException: 

657 pass 

658 try: 

659 return cudnn_rnn_backprop_v3_eager_fallback( 

660 input, input_h, input_c, params, sequence_lengths, output, output_h, 

661 output_c, output_backprop, output_h_backprop, output_c_backprop, 

662 reserve_space, host_reserved, rnn_mode=rnn_mode, 

663 input_mode=input_mode, direction=direction, dropout=dropout, 

664 seed=seed, seed2=seed2, num_proj=num_proj, time_major=time_major, 

665 name=name, ctx=_ctx) 

666 except _core._SymbolicException: 

667 pass # Add nodes to the TensorFlow graph. 

668 # Add nodes to the TensorFlow graph. 

669 if rnn_mode is None: 

670 rnn_mode = "lstm" 

671 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

672 if input_mode is None: 

673 input_mode = "linear_input" 

674 input_mode = _execute.make_str(input_mode, "input_mode") 

675 if direction is None: 

676 direction = "unidirectional" 

677 direction = _execute.make_str(direction, "direction") 

678 if dropout is None: 

679 dropout = 0 

680 dropout = _execute.make_float(dropout, "dropout") 

681 if seed is None: 

682 seed = 0 

683 seed = _execute.make_int(seed, "seed") 

684 if seed2 is None: 

685 seed2 = 0 

686 seed2 = _execute.make_int(seed2, "seed2") 

687 if num_proj is None: 

688 num_proj = 0 

689 num_proj = _execute.make_int(num_proj, "num_proj") 

690 if time_major is None: 

691 time_major = True 

692 time_major = _execute.make_bool(time_major, "time_major") 

693 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

694 "CudnnRNNBackpropV3", input=input, input_h=input_h, input_c=input_c, 

695 params=params, 

696 sequence_lengths=sequence_lengths, 

697 output=output, output_h=output_h, 

698 output_c=output_c, 

699 output_backprop=output_backprop, 

700 output_h_backprop=output_h_backprop, 

701 output_c_backprop=output_c_backprop, 

702 reserve_space=reserve_space, 

703 host_reserved=host_reserved, rnn_mode=rnn_mode, 

704 input_mode=input_mode, direction=direction, 

705 dropout=dropout, seed=seed, seed2=seed2, 

706 num_proj=num_proj, time_major=time_major, 

707 name=name) 

708 _result = _outputs[:] 

709 if _execute.must_record_gradient(): 

710 _attrs = ("T", _op._get_attr_type("T"), "rnn_mode", 

711 _op.get_attr("rnn_mode"), "input_mode", 

712 _op.get_attr("input_mode"), "direction", 

713 _op.get_attr("direction"), "dropout", _op.get_attr("dropout"), 

714 "seed", _op._get_attr_int("seed"), "seed2", 

715 _op._get_attr_int("seed2"), "num_proj", 

716 _op._get_attr_int("num_proj"), "time_major", 

717 _op._get_attr_bool("time_major")) 

718 _inputs_flat = _op.inputs 

719 _execute.record_gradient( 

720 "CudnnRNNBackpropV3", _inputs_flat, _attrs, _result) 

721 _result = _CudnnRNNBackpropV3Output._make(_result) 

722 return _result 

723 

724CudnnRNNBackpropV3 = tf_export("raw_ops.CudnnRNNBackpropV3")(_ops.to_raw_op(cudnn_rnn_backprop_v3)) 

725 

726 

727def cudnn_rnn_backprop_v3_eager_fallback(input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved, rnn_mode, input_mode, direction, dropout, seed, seed2, num_proj, time_major, name, ctx): 

728 if rnn_mode is None: 

729 rnn_mode = "lstm" 

730 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

731 if input_mode is None: 

732 input_mode = "linear_input" 

733 input_mode = _execute.make_str(input_mode, "input_mode") 

734 if direction is None: 

735 direction = "unidirectional" 

736 direction = _execute.make_str(direction, "direction") 

737 if dropout is None: 

738 dropout = 0 

739 dropout = _execute.make_float(dropout, "dropout") 

740 if seed is None: 

741 seed = 0 

742 seed = _execute.make_int(seed, "seed") 

743 if seed2 is None: 

744 seed2 = 0 

745 seed2 = _execute.make_int(seed2, "seed2") 

746 if num_proj is None: 

747 num_proj = 0 

748 num_proj = _execute.make_int(num_proj, "num_proj") 

749 if time_major is None: 

750 time_major = True 

751 time_major = _execute.make_bool(time_major, "time_major") 

752 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

753 (input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space) = _inputs_T 

754 sequence_lengths = _ops.convert_to_tensor(sequence_lengths, _dtypes.int32) 

755 host_reserved = _ops.convert_to_tensor(host_reserved, _dtypes.int8) 

756 _inputs_flat = [input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved] 

757 _attrs = ("T", _attr_T, "rnn_mode", rnn_mode, "input_mode", input_mode, 

758 "direction", direction, "dropout", dropout, "seed", seed, "seed2", seed2, 

759 "num_proj", num_proj, "time_major", time_major) 

760 _result = _execute.execute(b"CudnnRNNBackpropV3", 4, inputs=_inputs_flat, 

761 attrs=_attrs, ctx=ctx, name=name) 

762 if _execute.must_record_gradient(): 

763 _execute.record_gradient( 

764 "CudnnRNNBackpropV3", _inputs_flat, _attrs, _result) 

765 _result = _CudnnRNNBackpropV3Output._make(_result) 

766 return _result 

767 

768 

769def cudnn_rnn_canonical_to_params(num_layers, num_units, input_size, weights, biases, rnn_mode="lstm", input_mode="linear_input", direction="unidirectional", dropout=0, seed=0, seed2=0, name=None): 

770 r"""Converts CudnnRNN params from canonical form to usable form. 

771 

772 Writes a set of weights into the opaque params buffer so they can be used in 

773 upcoming training or inferences. 

774 

775 Note that the params buffer may not be compatible across different GPUs. So any 

776 save and restoration should be converted to and from the canonical weights and 

777 biases. 

778 

779 num_layers: Specifies the number of layers in the RNN model. 

780 num_units: Specifies the size of the hidden state. 

781 input_size: Specifies the size of the input state. 

782 weights: the canonical form of weights that can be used for saving 

783 and restoration. They are more likely to be compatible across different 

784 generations. 

785 biases: the canonical form of biases that can be used for saving 

786 and restoration. They are more likely to be compatible across different 

787 generations. 

788 num_params: number of parameter sets for all layers. 

789 Each layer may contain multiple parameter sets, with each set consisting of 

790 a weight matrix and a bias vector. 

791 rnn_mode: Indicates the type of the RNN model. 

792 input_mode: Indicate whether there is a linear projection between the input and 

793 The actual computation before the first layer. 'skip_input' is only allowed 

794 when input_size == num_units; 'auto_select' implies 'skip_input' when 

795 input_size == num_units; otherwise, it implies 'linear_input'. 

796 direction: Indicates whether a bidirectional model will be used. 

797 dir = (direction == bidirectional) ? 2 : 1 

798 dropout: dropout probability. When set to 0., dropout is disabled. 

799 seed: the 1st part of a seed to initialize dropout. 

800 seed2: the 2nd part of a seed to initialize dropout. 

801 

802 Args: 

803 num_layers: A `Tensor` of type `int32`. 

804 num_units: A `Tensor` of type `int32`. 

805 input_size: A `Tensor` of type `int32`. 

806 weights: A list of at least 1 `Tensor` objects with the same type in: `bfloat16`, `half`, `float32`, `float64`. 

807 biases: A list with the same length as `weights` of `Tensor` objects with the same type as `weights`. 

808 rnn_mode: An optional `string` from: `"rnn_relu", "rnn_tanh", "lstm", "gru"`. Defaults to `"lstm"`. 

809 input_mode: An optional `string` from: `"linear_input", "skip_input", "auto_select"`. Defaults to `"linear_input"`. 

810 direction: An optional `string` from: `"unidirectional", "bidirectional"`. Defaults to `"unidirectional"`. 

811 dropout: An optional `float`. Defaults to `0`. 

812 seed: An optional `int`. Defaults to `0`. 

813 seed2: An optional `int`. Defaults to `0`. 

814 name: A name for the operation (optional). 

815 

816 Returns: 

817 A `Tensor`. Has the same type as `weights`. 

818 """ 

819 _ctx = _context._context or _context.context() 

820 tld = _ctx._thread_local_data 

821 if tld.is_eager: 

822 try: 

823 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

824 _ctx, "CudnnRNNCanonicalToParams", name, num_layers, num_units, 

825 input_size, weights, biases, "rnn_mode", rnn_mode, "input_mode", 

826 input_mode, "direction", direction, "dropout", dropout, "seed", seed, 

827 "seed2", seed2) 

828 return _result 

829 except _core._NotOkStatusException as e: 

830 _ops.raise_from_not_ok_status(e, name) 

831 except _core._FallbackException: 

832 pass 

833 try: 

834 return cudnn_rnn_canonical_to_params_eager_fallback( 

835 num_layers, num_units, input_size, weights, biases, 

836 rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, 

837 dropout=dropout, seed=seed, seed2=seed2, name=name, ctx=_ctx) 

838 except _core._SymbolicException: 

839 pass # Add nodes to the TensorFlow graph. 

840 # Add nodes to the TensorFlow graph. 

841 if not isinstance(weights, (list, tuple)): 

842 raise TypeError( 

843 "Expected list for 'weights' argument to " 

844 "'cudnn_rnn_canonical_to_params' Op, not %r." % weights) 

845 _attr_num_params = len(weights) 

846 if not isinstance(biases, (list, tuple)): 

847 raise TypeError( 

848 "Expected list for 'biases' argument to " 

849 "'cudnn_rnn_canonical_to_params' Op, not %r." % biases) 

850 if len(biases) != _attr_num_params: 

851 raise ValueError( 

852 "List argument 'biases' to 'cudnn_rnn_canonical_to_params' Op with length %d " 

853 "must match length %d of argument 'weights'." % 

854 (len(biases), _attr_num_params)) 

855 if rnn_mode is None: 

856 rnn_mode = "lstm" 

857 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

858 if input_mode is None: 

859 input_mode = "linear_input" 

860 input_mode = _execute.make_str(input_mode, "input_mode") 

861 if direction is None: 

862 direction = "unidirectional" 

863 direction = _execute.make_str(direction, "direction") 

864 if dropout is None: 

865 dropout = 0 

866 dropout = _execute.make_float(dropout, "dropout") 

867 if seed is None: 

868 seed = 0 

869 seed = _execute.make_int(seed, "seed") 

870 if seed2 is None: 

871 seed2 = 0 

872 seed2 = _execute.make_int(seed2, "seed2") 

873 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

874 "CudnnRNNCanonicalToParams", num_layers=num_layers, 

875 num_units=num_units, 

876 input_size=input_size, weights=weights, 

877 biases=biases, rnn_mode=rnn_mode, 

878 input_mode=input_mode, 

879 direction=direction, dropout=dropout, 

880 seed=seed, seed2=seed2, name=name) 

881 _result = _outputs[:] 

882 if _execute.must_record_gradient(): 

883 _attrs = ("T", _op._get_attr_type("T"), "num_params", 

884 _op._get_attr_int("num_params"), "rnn_mode", 

885 _op.get_attr("rnn_mode"), "input_mode", 

886 _op.get_attr("input_mode"), "direction", 

887 _op.get_attr("direction"), "dropout", _op.get_attr("dropout"), 

888 "seed", _op._get_attr_int("seed"), "seed2", 

889 _op._get_attr_int("seed2")) 

890 _inputs_flat = _op.inputs 

891 _execute.record_gradient( 

892 "CudnnRNNCanonicalToParams", _inputs_flat, _attrs, _result) 

893 _result, = _result 

894 return _result 

895 

896CudnnRNNCanonicalToParams = tf_export("raw_ops.CudnnRNNCanonicalToParams")(_ops.to_raw_op(cudnn_rnn_canonical_to_params)) 

897 

898 

899def cudnn_rnn_canonical_to_params_eager_fallback(num_layers, num_units, input_size, weights, biases, rnn_mode, input_mode, direction, dropout, seed, seed2, name, ctx): 

900 if not isinstance(weights, (list, tuple)): 

901 raise TypeError( 

902 "Expected list for 'weights' argument to " 

903 "'cudnn_rnn_canonical_to_params' Op, not %r." % weights) 

904 _attr_num_params = len(weights) 

905 if not isinstance(biases, (list, tuple)): 

906 raise TypeError( 

907 "Expected list for 'biases' argument to " 

908 "'cudnn_rnn_canonical_to_params' Op, not %r." % biases) 

909 if len(biases) != _attr_num_params: 

910 raise ValueError( 

911 "List argument 'biases' to 'cudnn_rnn_canonical_to_params' Op with length %d " 

912 "must match length %d of argument 'weights'." % 

913 (len(biases), _attr_num_params)) 

914 if rnn_mode is None: 

915 rnn_mode = "lstm" 

916 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

917 if input_mode is None: 

918 input_mode = "linear_input" 

919 input_mode = _execute.make_str(input_mode, "input_mode") 

920 if direction is None: 

921 direction = "unidirectional" 

922 direction = _execute.make_str(direction, "direction") 

923 if dropout is None: 

924 dropout = 0 

925 dropout = _execute.make_float(dropout, "dropout") 

926 if seed is None: 

927 seed = 0 

928 seed = _execute.make_int(seed, "seed") 

929 if seed2 is None: 

930 seed2 = 0 

931 seed2 = _execute.make_int(seed2, "seed2") 

932 _attr_T, _inputs_T = _execute.args_to_matching_eager(list(weights) + list(biases), ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

933 _inputs_T = [_inputs_T[:_attr_num_params]] + _inputs_T[_attr_num_params:] 

934 _inputs_T = _inputs_T[:1] + [_inputs_T[1:]] 

935 (weights, biases) = _inputs_T 

936 num_layers = _ops.convert_to_tensor(num_layers, _dtypes.int32) 

937 num_units = _ops.convert_to_tensor(num_units, _dtypes.int32) 

938 input_size = _ops.convert_to_tensor(input_size, _dtypes.int32) 

939 _inputs_flat = [num_layers, num_units, input_size] + list(weights) + list(biases) 

940 _attrs = ("T", _attr_T, "num_params", _attr_num_params, "rnn_mode", 

941 rnn_mode, "input_mode", input_mode, "direction", direction, "dropout", 

942 dropout, "seed", seed, "seed2", seed2) 

943 _result = _execute.execute(b"CudnnRNNCanonicalToParams", 1, 

944 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

945 name=name) 

946 if _execute.must_record_gradient(): 

947 _execute.record_gradient( 

948 "CudnnRNNCanonicalToParams", _inputs_flat, _attrs, _result) 

949 _result, = _result 

950 return _result 

951 

952 

953def cudnn_rnn_canonical_to_params_v2(num_layers, num_units, input_size, weights, biases, rnn_mode="lstm", input_mode="linear_input", direction="unidirectional", dropout=0, seed=0, seed2=0, num_proj=0, name=None): 

954 r"""Converts CudnnRNN params from canonical form to usable form. It supports the projection in LSTM. 

955 

956 Writes a set of weights into the opaque params buffer so they can be used in 

957 upcoming training or inferences. 

958 

959 Note that the params buffer may not be compatible across different GPUs. So any 

960 save and restoration should be converted to and from the canonical weights and 

961 biases. 

962 

963 num_layers: Specifies the number of layers in the RNN model. 

964 num_units: Specifies the size of the hidden state. 

965 input_size: Specifies the size of the input state. 

966 weights: the canonical form of weights that can be used for saving 

967 and restoration. They are more likely to be compatible across different 

968 generations. 

969 biases: the canonical form of biases that can be used for saving 

970 and restoration. They are more likely to be compatible across different 

971 generations. 

972 num_params_weights: number of weight parameter matrix for all layers. 

973 num_params_biases: number of bias parameter vector for all layers. 

974 rnn_mode: Indicates the type of the RNN model. 

975 input_mode: Indicate whether there is a linear projection between the input and 

976 The actual computation before the first layer. 'skip_input' is only allowed 

977 when input_size == num_units; 'auto_select' implies 'skip_input' when 

978 input_size == num_units; otherwise, it implies 'linear_input'. 

979 direction: Indicates whether a bidirectional model will be used. 

980 dir = (direction == bidirectional) ? 2 : 1 

981 dropout: dropout probability. When set to 0., dropout is disabled. 

982 seed: the 1st part of a seed to initialize dropout. 

983 seed2: the 2nd part of a seed to initialize dropout. 

984 num_proj: The output dimensionality for the projection matrices. If None or 0, 

985 no projection is performed. 

986 

987 Args: 

988 num_layers: A `Tensor` of type `int32`. 

989 num_units: A `Tensor` of type `int32`. 

990 input_size: A `Tensor` of type `int32`. 

991 weights: A list of at least 1 `Tensor` objects with the same type in: `bfloat16`, `half`, `float32`, `float64`. 

992 biases: A list of at least 1 `Tensor` objects with the same type as `weights`. 

993 rnn_mode: An optional `string` from: `"rnn_relu", "rnn_tanh", "lstm", "gru"`. Defaults to `"lstm"`. 

994 input_mode: An optional `string` from: `"linear_input", "skip_input", "auto_select"`. Defaults to `"linear_input"`. 

995 direction: An optional `string` from: `"unidirectional", "bidirectional"`. Defaults to `"unidirectional"`. 

996 dropout: An optional `float`. Defaults to `0`. 

997 seed: An optional `int`. Defaults to `0`. 

998 seed2: An optional `int`. Defaults to `0`. 

999 num_proj: An optional `int`. Defaults to `0`. 

1000 name: A name for the operation (optional). 

1001 

1002 Returns: 

1003 A `Tensor`. Has the same type as `weights`. 

1004 """ 

1005 _ctx = _context._context or _context.context() 

1006 tld = _ctx._thread_local_data 

1007 if tld.is_eager: 

1008 try: 

1009 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1010 _ctx, "CudnnRNNCanonicalToParamsV2", name, num_layers, num_units, 

1011 input_size, weights, biases, "rnn_mode", rnn_mode, "input_mode", 

1012 input_mode, "direction", direction, "dropout", dropout, "seed", seed, 

1013 "seed2", seed2, "num_proj", num_proj) 

1014 return _result 

1015 except _core._NotOkStatusException as e: 

1016 _ops.raise_from_not_ok_status(e, name) 

1017 except _core._FallbackException: 

1018 pass 

1019 try: 

1020 return cudnn_rnn_canonical_to_params_v2_eager_fallback( 

1021 num_layers, num_units, input_size, weights, biases, 

1022 rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, 

1023 dropout=dropout, seed=seed, seed2=seed2, num_proj=num_proj, 

1024 name=name, ctx=_ctx) 

1025 except _core._SymbolicException: 

1026 pass # Add nodes to the TensorFlow graph. 

1027 # Add nodes to the TensorFlow graph. 

1028 if not isinstance(weights, (list, tuple)): 

1029 raise TypeError( 

1030 "Expected list for 'weights' argument to " 

1031 "'cudnn_rnn_canonical_to_params_v2' Op, not %r." % weights) 

1032 _attr_num_params_weights = len(weights) 

1033 if not isinstance(biases, (list, tuple)): 

1034 raise TypeError( 

1035 "Expected list for 'biases' argument to " 

1036 "'cudnn_rnn_canonical_to_params_v2' Op, not %r." % biases) 

1037 _attr_num_params_biases = len(biases) 

1038 if rnn_mode is None: 

1039 rnn_mode = "lstm" 

1040 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

1041 if input_mode is None: 

1042 input_mode = "linear_input" 

1043 input_mode = _execute.make_str(input_mode, "input_mode") 

1044 if direction is None: 

1045 direction = "unidirectional" 

1046 direction = _execute.make_str(direction, "direction") 

1047 if dropout is None: 

1048 dropout = 0 

1049 dropout = _execute.make_float(dropout, "dropout") 

1050 if seed is None: 

1051 seed = 0 

1052 seed = _execute.make_int(seed, "seed") 

1053 if seed2 is None: 

1054 seed2 = 0 

1055 seed2 = _execute.make_int(seed2, "seed2") 

1056 if num_proj is None: 

1057 num_proj = 0 

1058 num_proj = _execute.make_int(num_proj, "num_proj") 

1059 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1060 "CudnnRNNCanonicalToParamsV2", num_layers=num_layers, 

1061 num_units=num_units, 

1062 input_size=input_size, weights=weights, 

1063 biases=biases, rnn_mode=rnn_mode, 

1064 input_mode=input_mode, 

1065 direction=direction, dropout=dropout, 

1066 seed=seed, seed2=seed2, 

1067 num_proj=num_proj, name=name) 

1068 _result = _outputs[:] 

1069 if _execute.must_record_gradient(): 

1070 _attrs = ("T", _op._get_attr_type("T"), "num_params_weights", 

1071 _op._get_attr_int("num_params_weights"), "num_params_biases", 

1072 _op._get_attr_int("num_params_biases"), "rnn_mode", 

1073 _op.get_attr("rnn_mode"), "input_mode", 

1074 _op.get_attr("input_mode"), "direction", 

1075 _op.get_attr("direction"), "dropout", _op.get_attr("dropout"), 

1076 "seed", _op._get_attr_int("seed"), "seed2", 

1077 _op._get_attr_int("seed2"), "num_proj", 

1078 _op._get_attr_int("num_proj")) 

1079 _inputs_flat = _op.inputs 

1080 _execute.record_gradient( 

1081 "CudnnRNNCanonicalToParamsV2", _inputs_flat, _attrs, _result) 

1082 _result, = _result 

1083 return _result 

1084 

1085CudnnRNNCanonicalToParamsV2 = tf_export("raw_ops.CudnnRNNCanonicalToParamsV2")(_ops.to_raw_op(cudnn_rnn_canonical_to_params_v2)) 

1086 

1087 

1088def cudnn_rnn_canonical_to_params_v2_eager_fallback(num_layers, num_units, input_size, weights, biases, rnn_mode, input_mode, direction, dropout, seed, seed2, num_proj, name, ctx): 

1089 if not isinstance(weights, (list, tuple)): 

1090 raise TypeError( 

1091 "Expected list for 'weights' argument to " 

1092 "'cudnn_rnn_canonical_to_params_v2' Op, not %r." % weights) 

1093 _attr_num_params_weights = len(weights) 

1094 if not isinstance(biases, (list, tuple)): 

1095 raise TypeError( 

1096 "Expected list for 'biases' argument to " 

1097 "'cudnn_rnn_canonical_to_params_v2' Op, not %r." % biases) 

1098 _attr_num_params_biases = len(biases) 

1099 if rnn_mode is None: 

1100 rnn_mode = "lstm" 

1101 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

1102 if input_mode is None: 

1103 input_mode = "linear_input" 

1104 input_mode = _execute.make_str(input_mode, "input_mode") 

1105 if direction is None: 

1106 direction = "unidirectional" 

1107 direction = _execute.make_str(direction, "direction") 

1108 if dropout is None: 

1109 dropout = 0 

1110 dropout = _execute.make_float(dropout, "dropout") 

1111 if seed is None: 

1112 seed = 0 

1113 seed = _execute.make_int(seed, "seed") 

1114 if seed2 is None: 

1115 seed2 = 0 

1116 seed2 = _execute.make_int(seed2, "seed2") 

1117 if num_proj is None: 

1118 num_proj = 0 

1119 num_proj = _execute.make_int(num_proj, "num_proj") 

1120 _attr_T, _inputs_T = _execute.args_to_matching_eager(list(weights) + list(biases), ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

1121 _inputs_T = [_inputs_T[:_attr_num_params_weights]] + _inputs_T[_attr_num_params_weights:] 

1122 _inputs_T = _inputs_T[:1] + [_inputs_T[1:]] 

1123 (weights, biases) = _inputs_T 

1124 num_layers = _ops.convert_to_tensor(num_layers, _dtypes.int32) 

1125 num_units = _ops.convert_to_tensor(num_units, _dtypes.int32) 

1126 input_size = _ops.convert_to_tensor(input_size, _dtypes.int32) 

1127 _inputs_flat = [num_layers, num_units, input_size] + list(weights) + list(biases) 

1128 _attrs = ("T", _attr_T, "num_params_weights", _attr_num_params_weights, 

1129 "num_params_biases", _attr_num_params_biases, "rnn_mode", rnn_mode, 

1130 "input_mode", input_mode, "direction", direction, "dropout", dropout, 

1131 "seed", seed, "seed2", seed2, "num_proj", num_proj) 

1132 _result = _execute.execute(b"CudnnRNNCanonicalToParamsV2", 1, 

1133 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1134 name=name) 

1135 if _execute.must_record_gradient(): 

1136 _execute.record_gradient( 

1137 "CudnnRNNCanonicalToParamsV2", _inputs_flat, _attrs, _result) 

1138 _result, = _result 

1139 return _result 

1140 

1141 

1142def cudnn_rnn_params_size(num_layers, num_units, input_size, T, S, rnn_mode="lstm", input_mode="linear_input", direction="unidirectional", dropout=0, seed=0, seed2=0, num_proj=0, name=None): 

1143 r"""Computes size of weights that can be used by a Cudnn RNN model. 

1144 

1145 Return the params size that can be used by the Cudnn RNN model. Subsequent 

1146 weight allocation and initialization should use this size. 

1147 

1148 num_layers: Specifies the number of layers in the RNN model. 

1149 num_units: Specifies the size of the hidden state. 

1150 input_size: Specifies the size of the input state. 

1151 rnn_mode: Indicates the type of the RNN model. 

1152 input_mode: Indicate whether there is a linear projection between the input and 

1153 The actual computation before the first layer. 'skip_input' is only allowed 

1154 when input_size == num_units; 'auto_select' implies 'skip_input' when 

1155 input_size == num_units; otherwise, it implies 'linear_input'. 

1156 direction: Indicates whether a bidirectional model will be used. 

1157 dir = (direction == bidirectional) ? 2 : 1 

1158 dropout: dropout probability. When set to 0., dropout is disabled. 

1159 seed: the 1st part of a seed to initialize dropout. 

1160 seed2: the 2nd part of a seed to initialize dropout. 

1161 params_size: The size of the params buffer that should be allocated and 

1162 initialized for this RNN model. Note that this params buffer may not be 

1163 compatible across GPUs. Please use CudnnRNNParamsWeights and 

1164 CudnnRNNParamsBiases to save and restore them in a way that is compatible 

1165 across different runs. 

1166 

1167 Args: 

1168 num_layers: A `Tensor` of type `int32`. 

1169 num_units: A `Tensor` of type `int32`. 

1170 input_size: A `Tensor` of type `int32`. 

1171 T: A `tf.DType` from: `tf.bfloat16, tf.half, tf.float32, tf.float64`. 

1172 S: A `tf.DType` from: `tf.int32, tf.int64`. 

1173 rnn_mode: An optional `string` from: `"rnn_relu", "rnn_tanh", "lstm", "gru"`. Defaults to `"lstm"`. 

1174 input_mode: An optional `string` from: `"linear_input", "skip_input", "auto_select"`. Defaults to `"linear_input"`. 

1175 direction: An optional `string` from: `"unidirectional", "bidirectional"`. Defaults to `"unidirectional"`. 

1176 dropout: An optional `float`. Defaults to `0`. 

1177 seed: An optional `int`. Defaults to `0`. 

1178 seed2: An optional `int`. Defaults to `0`. 

1179 num_proj: An optional `int`. Defaults to `0`. 

1180 name: A name for the operation (optional). 

1181 

1182 Returns: 

1183 A `Tensor` of type `S`. 

1184 """ 

1185 _ctx = _context._context or _context.context() 

1186 tld = _ctx._thread_local_data 

1187 if tld.is_eager: 

1188 try: 

1189 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1190 _ctx, "CudnnRNNParamsSize", name, num_layers, num_units, input_size, 

1191 "T", T, "S", S, "rnn_mode", rnn_mode, "input_mode", input_mode, 

1192 "direction", direction, "dropout", dropout, "seed", seed, "seed2", 

1193 seed2, "num_proj", num_proj) 

1194 return _result 

1195 except _core._NotOkStatusException as e: 

1196 _ops.raise_from_not_ok_status(e, name) 

1197 except _core._FallbackException: 

1198 pass 

1199 try: 

1200 return cudnn_rnn_params_size_eager_fallback( 

1201 num_layers, num_units, input_size, T=T, S=S, rnn_mode=rnn_mode, 

1202 input_mode=input_mode, direction=direction, dropout=dropout, 

1203 seed=seed, seed2=seed2, num_proj=num_proj, name=name, ctx=_ctx) 

1204 except _core._SymbolicException: 

1205 pass # Add nodes to the TensorFlow graph. 

1206 # Add nodes to the TensorFlow graph. 

1207 T = _execute.make_type(T, "T") 

1208 S = _execute.make_type(S, "S") 

1209 if rnn_mode is None: 

1210 rnn_mode = "lstm" 

1211 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

1212 if input_mode is None: 

1213 input_mode = "linear_input" 

1214 input_mode = _execute.make_str(input_mode, "input_mode") 

1215 if direction is None: 

1216 direction = "unidirectional" 

1217 direction = _execute.make_str(direction, "direction") 

1218 if dropout is None: 

1219 dropout = 0 

1220 dropout = _execute.make_float(dropout, "dropout") 

1221 if seed is None: 

1222 seed = 0 

1223 seed = _execute.make_int(seed, "seed") 

1224 if seed2 is None: 

1225 seed2 = 0 

1226 seed2 = _execute.make_int(seed2, "seed2") 

1227 if num_proj is None: 

1228 num_proj = 0 

1229 num_proj = _execute.make_int(num_proj, "num_proj") 

1230 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1231 "CudnnRNNParamsSize", num_layers=num_layers, num_units=num_units, 

1232 input_size=input_size, T=T, S=S, 

1233 rnn_mode=rnn_mode, input_mode=input_mode, 

1234 direction=direction, dropout=dropout, seed=seed, 

1235 seed2=seed2, num_proj=num_proj, name=name) 

1236 _result = _outputs[:] 

1237 if _execute.must_record_gradient(): 

1238 _attrs = ("T", _op._get_attr_type("T"), "S", _op._get_attr_type("S"), 

1239 "rnn_mode", _op.get_attr("rnn_mode"), "input_mode", 

1240 _op.get_attr("input_mode"), "direction", 

1241 _op.get_attr("direction"), "dropout", _op.get_attr("dropout"), 

1242 "seed", _op._get_attr_int("seed"), "seed2", 

1243 _op._get_attr_int("seed2"), "num_proj", 

1244 _op._get_attr_int("num_proj")) 

1245 _inputs_flat = _op.inputs 

1246 _execute.record_gradient( 

1247 "CudnnRNNParamsSize", _inputs_flat, _attrs, _result) 

1248 _result, = _result 

1249 return _result 

1250 

1251CudnnRNNParamsSize = tf_export("raw_ops.CudnnRNNParamsSize")(_ops.to_raw_op(cudnn_rnn_params_size)) 

1252 

1253 

1254def cudnn_rnn_params_size_eager_fallback(num_layers, num_units, input_size, T, S, rnn_mode, input_mode, direction, dropout, seed, seed2, num_proj, name, ctx): 

1255 T = _execute.make_type(T, "T") 

1256 S = _execute.make_type(S, "S") 

1257 if rnn_mode is None: 

1258 rnn_mode = "lstm" 

1259 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

1260 if input_mode is None: 

1261 input_mode = "linear_input" 

1262 input_mode = _execute.make_str(input_mode, "input_mode") 

1263 if direction is None: 

1264 direction = "unidirectional" 

1265 direction = _execute.make_str(direction, "direction") 

1266 if dropout is None: 

1267 dropout = 0 

1268 dropout = _execute.make_float(dropout, "dropout") 

1269 if seed is None: 

1270 seed = 0 

1271 seed = _execute.make_int(seed, "seed") 

1272 if seed2 is None: 

1273 seed2 = 0 

1274 seed2 = _execute.make_int(seed2, "seed2") 

1275 if num_proj is None: 

1276 num_proj = 0 

1277 num_proj = _execute.make_int(num_proj, "num_proj") 

1278 num_layers = _ops.convert_to_tensor(num_layers, _dtypes.int32) 

1279 num_units = _ops.convert_to_tensor(num_units, _dtypes.int32) 

1280 input_size = _ops.convert_to_tensor(input_size, _dtypes.int32) 

1281 _inputs_flat = [num_layers, num_units, input_size] 

1282 _attrs = ("T", T, "S", S, "rnn_mode", rnn_mode, "input_mode", input_mode, 

1283 "direction", direction, "dropout", dropout, "seed", seed, "seed2", seed2, 

1284 "num_proj", num_proj) 

1285 _result = _execute.execute(b"CudnnRNNParamsSize", 1, inputs=_inputs_flat, 

1286 attrs=_attrs, ctx=ctx, name=name) 

1287 if _execute.must_record_gradient(): 

1288 _execute.record_gradient( 

1289 "CudnnRNNParamsSize", _inputs_flat, _attrs, _result) 

1290 _result, = _result 

1291 return _result 

1292 

1293_CudnnRNNParamsToCanonicalOutput = collections.namedtuple( 

1294 "CudnnRNNParamsToCanonical", 

1295 ["weights", "biases"]) 

1296 

1297 

1298def cudnn_rnn_params_to_canonical(num_layers, num_units, input_size, params, num_params, rnn_mode="lstm", input_mode="linear_input", direction="unidirectional", dropout=0, seed=0, seed2=0, name=None): 

1299 r"""Retrieves CudnnRNN params in canonical form. 

1300 

1301 Retrieves a set of weights from the opaque params buffer that can be saved and 

1302 restored in a way compatible with future runs. 

1303 

1304 Note that the params buffer may not be compatible across different GPUs. So any 

1305 save and restoration should be converted to and from the canonical weights and 

1306 biases. 

1307 

1308 num_layers: Specifies the number of layers in the RNN model. 

1309 num_units: Specifies the size of the hidden state. 

1310 input_size: Specifies the size of the input state. 

1311 num_params: number of parameter sets for all layers. 

1312 Each layer may contain multiple parameter sets, with each set consisting of 

1313 a weight matrix and a bias vector. 

1314 weights: the canonical form of weights that can be used for saving 

1315 and restoration. They are more likely to be compatible across different 

1316 generations. 

1317 biases: the canonical form of biases that can be used for saving 

1318 and restoration. They are more likely to be compatible across different 

1319 generations. 

1320 rnn_mode: Indicates the type of the RNN model. 

1321 input_mode: Indicate whether there is a linear projection between the input and 

1322 The actual computation before the first layer. 'skip_input' is only allowed 

1323 when input_size == num_units; 'auto_select' implies 'skip_input' when 

1324 input_size == num_units; otherwise, it implies 'linear_input'. 

1325 direction: Indicates whether a bidirectional model will be used. 

1326 dir = (direction == bidirectional) ? 2 : 1 

1327 dropout: dropout probability. When set to 0., dropout is disabled. 

1328 seed: the 1st part of a seed to initialize dropout. 

1329 seed2: the 2nd part of a seed to initialize dropout. 

1330 

1331 Args: 

1332 num_layers: A `Tensor` of type `int32`. 

1333 num_units: A `Tensor` of type `int32`. 

1334 input_size: A `Tensor` of type `int32`. 

1335 params: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. 

1336 num_params: An `int` that is `>= 1`. 

1337 rnn_mode: An optional `string` from: `"rnn_relu", "rnn_tanh", "lstm", "gru"`. Defaults to `"lstm"`. 

1338 input_mode: An optional `string` from: `"linear_input", "skip_input", "auto_select"`. Defaults to `"linear_input"`. 

1339 direction: An optional `string` from: `"unidirectional", "bidirectional"`. Defaults to `"unidirectional"`. 

1340 dropout: An optional `float`. Defaults to `0`. 

1341 seed: An optional `int`. Defaults to `0`. 

1342 seed2: An optional `int`. Defaults to `0`. 

1343 name: A name for the operation (optional). 

1344 

1345 Returns: 

1346 A tuple of `Tensor` objects (weights, biases). 

1347 

1348 weights: A list of `num_params` `Tensor` objects with the same type as `params`. 

1349 biases: A list of `num_params` `Tensor` objects with the same type as `params`. 

1350 """ 

1351 _ctx = _context._context or _context.context() 

1352 tld = _ctx._thread_local_data 

1353 if tld.is_eager: 

1354 try: 

1355 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1356 _ctx, "CudnnRNNParamsToCanonical", name, num_layers, num_units, 

1357 input_size, params, "num_params", num_params, "rnn_mode", rnn_mode, 

1358 "input_mode", input_mode, "direction", direction, "dropout", dropout, 

1359 "seed", seed, "seed2", seed2) 

1360 _result = _CudnnRNNParamsToCanonicalOutput._make(_result) 

1361 return _result 

1362 except _core._NotOkStatusException as e: 

1363 _ops.raise_from_not_ok_status(e, name) 

1364 except _core._FallbackException: 

1365 pass 

1366 try: 

1367 return cudnn_rnn_params_to_canonical_eager_fallback( 

1368 num_layers, num_units, input_size, params, num_params=num_params, 

1369 rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, 

1370 dropout=dropout, seed=seed, seed2=seed2, name=name, ctx=_ctx) 

1371 except _core._SymbolicException: 

1372 pass # Add nodes to the TensorFlow graph. 

1373 # Add nodes to the TensorFlow graph. 

1374 num_params = _execute.make_int(num_params, "num_params") 

1375 if rnn_mode is None: 

1376 rnn_mode = "lstm" 

1377 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

1378 if input_mode is None: 

1379 input_mode = "linear_input" 

1380 input_mode = _execute.make_str(input_mode, "input_mode") 

1381 if direction is None: 

1382 direction = "unidirectional" 

1383 direction = _execute.make_str(direction, "direction") 

1384 if dropout is None: 

1385 dropout = 0 

1386 dropout = _execute.make_float(dropout, "dropout") 

1387 if seed is None: 

1388 seed = 0 

1389 seed = _execute.make_int(seed, "seed") 

1390 if seed2 is None: 

1391 seed2 = 0 

1392 seed2 = _execute.make_int(seed2, "seed2") 

1393 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1394 "CudnnRNNParamsToCanonical", num_layers=num_layers, 

1395 num_units=num_units, 

1396 input_size=input_size, params=params, 

1397 num_params=num_params, rnn_mode=rnn_mode, 

1398 input_mode=input_mode, 

1399 direction=direction, dropout=dropout, 

1400 seed=seed, seed2=seed2, name=name) 

1401 _result = _outputs[:] 

1402 if _execute.must_record_gradient(): 

1403 _attrs = ("T", _op._get_attr_type("T"), "num_params", 

1404 _op._get_attr_int("num_params"), "rnn_mode", 

1405 _op.get_attr("rnn_mode"), "input_mode", 

1406 _op.get_attr("input_mode"), "direction", 

1407 _op.get_attr("direction"), "dropout", _op.get_attr("dropout"), 

1408 "seed", _op._get_attr_int("seed"), "seed2", 

1409 _op._get_attr_int("seed2")) 

1410 _inputs_flat = _op.inputs 

1411 _execute.record_gradient( 

1412 "CudnnRNNParamsToCanonical", _inputs_flat, _attrs, _result) 

1413 _result = [_result[:num_params]] + _result[num_params:] 

1414 _result = _result[:1] + [_result[1:]] 

1415 _result = _CudnnRNNParamsToCanonicalOutput._make(_result) 

1416 return _result 

1417 

1418CudnnRNNParamsToCanonical = tf_export("raw_ops.CudnnRNNParamsToCanonical")(_ops.to_raw_op(cudnn_rnn_params_to_canonical)) 

1419 

1420 

1421def cudnn_rnn_params_to_canonical_eager_fallback(num_layers, num_units, input_size, params, num_params, rnn_mode, input_mode, direction, dropout, seed, seed2, name, ctx): 

1422 num_params = _execute.make_int(num_params, "num_params") 

1423 if rnn_mode is None: 

1424 rnn_mode = "lstm" 

1425 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

1426 if input_mode is None: 

1427 input_mode = "linear_input" 

1428 input_mode = _execute.make_str(input_mode, "input_mode") 

1429 if direction is None: 

1430 direction = "unidirectional" 

1431 direction = _execute.make_str(direction, "direction") 

1432 if dropout is None: 

1433 dropout = 0 

1434 dropout = _execute.make_float(dropout, "dropout") 

1435 if seed is None: 

1436 seed = 0 

1437 seed = _execute.make_int(seed, "seed") 

1438 if seed2 is None: 

1439 seed2 = 0 

1440 seed2 = _execute.make_int(seed2, "seed2") 

1441 _attr_T, (params,) = _execute.args_to_matching_eager([params], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

1442 num_layers = _ops.convert_to_tensor(num_layers, _dtypes.int32) 

1443 num_units = _ops.convert_to_tensor(num_units, _dtypes.int32) 

1444 input_size = _ops.convert_to_tensor(input_size, _dtypes.int32) 

1445 _inputs_flat = [num_layers, num_units, input_size, params] 

1446 _attrs = ("T", _attr_T, "num_params", num_params, "rnn_mode", rnn_mode, 

1447 "input_mode", input_mode, "direction", direction, "dropout", dropout, 

1448 "seed", seed, "seed2", seed2) 

1449 _result = _execute.execute(b"CudnnRNNParamsToCanonical", num_params + 

1450 num_params, inputs=_inputs_flat, attrs=_attrs, 

1451 ctx=ctx, name=name) 

1452 if _execute.must_record_gradient(): 

1453 _execute.record_gradient( 

1454 "CudnnRNNParamsToCanonical", _inputs_flat, _attrs, _result) 

1455 _result = [_result[:num_params]] + _result[num_params:] 

1456 _result = _result[:1] + [_result[1:]] 

1457 _result = _CudnnRNNParamsToCanonicalOutput._make(_result) 

1458 return _result 

1459 

1460_CudnnRNNParamsToCanonicalV2Output = collections.namedtuple( 

1461 "CudnnRNNParamsToCanonicalV2", 

1462 ["weights", "biases"]) 

1463 

1464 

1465def cudnn_rnn_params_to_canonical_v2(num_layers, num_units, input_size, params, num_params_weights, num_params_biases, rnn_mode="lstm", input_mode="linear_input", direction="unidirectional", dropout=0, seed=0, seed2=0, num_proj=0, name=None): 

1466 r"""Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM. 

1467 

1468 Retrieves a set of weights from the opaque params buffer that can be saved and 

1469 restored in a way compatible with future runs. 

1470 

1471 Note that the params buffer may not be compatible across different GPUs. So any 

1472 save and restoration should be converted to and from the canonical weights and 

1473 biases. 

1474 

1475 num_layers: Specifies the number of layers in the RNN model. 

1476 num_units: Specifies the size of the hidden state. 

1477 input_size: Specifies the size of the input state. 

1478 num_params_weights: number of weight parameter matrix for all layers. 

1479 num_params_biases: number of bias parameter vector for all layers. 

1480 weights: the canonical form of weights that can be used for saving 

1481 and restoration. They are more likely to be compatible across different 

1482 generations. 

1483 biases: the canonical form of biases that can be used for saving 

1484 and restoration. They are more likely to be compatible across different 

1485 generations. 

1486 rnn_mode: Indicates the type of the RNN model. 

1487 input_mode: Indicate whether there is a linear projection between the input and 

1488 The actual computation before the first layer. 'skip_input' is only allowed 

1489 when input_size == num_units; 'auto_select' implies 'skip_input' when 

1490 input_size == num_units; otherwise, it implies 'linear_input'. 

1491 direction: Indicates whether a bidirectional model will be used. 

1492 dir = (direction == bidirectional) ? 2 : 1 

1493 dropout: dropout probability. When set to 0., dropout is disabled. 

1494 seed: the 1st part of a seed to initialize dropout. 

1495 seed2: the 2nd part of a seed to initialize dropout. 

1496 num_proj: The output dimensionality for the projection matrices. If None or 0, 

1497 no projection is performed. 

1498 

1499 Args: 

1500 num_layers: A `Tensor` of type `int32`. 

1501 num_units: A `Tensor` of type `int32`. 

1502 input_size: A `Tensor` of type `int32`. 

1503 params: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. 

1504 num_params_weights: An `int` that is `>= 1`. 

1505 num_params_biases: An `int` that is `>= 1`. 

1506 rnn_mode: An optional `string` from: `"rnn_relu", "rnn_tanh", "lstm", "gru"`. Defaults to `"lstm"`. 

1507 input_mode: An optional `string` from: `"linear_input", "skip_input", "auto_select"`. Defaults to `"linear_input"`. 

1508 direction: An optional `string` from: `"unidirectional", "bidirectional"`. Defaults to `"unidirectional"`. 

1509 dropout: An optional `float`. Defaults to `0`. 

1510 seed: An optional `int`. Defaults to `0`. 

1511 seed2: An optional `int`. Defaults to `0`. 

1512 num_proj: An optional `int`. Defaults to `0`. 

1513 name: A name for the operation (optional). 

1514 

1515 Returns: 

1516 A tuple of `Tensor` objects (weights, biases). 

1517 

1518 weights: A list of `num_params_weights` `Tensor` objects with the same type as `params`. 

1519 biases: A list of `num_params_biases` `Tensor` objects with the same type as `params`. 

1520 """ 

1521 _ctx = _context._context or _context.context() 

1522 tld = _ctx._thread_local_data 

1523 if tld.is_eager: 

1524 try: 

1525 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1526 _ctx, "CudnnRNNParamsToCanonicalV2", name, num_layers, num_units, 

1527 input_size, params, "num_params_weights", num_params_weights, 

1528 "num_params_biases", num_params_biases, "rnn_mode", rnn_mode, 

1529 "input_mode", input_mode, "direction", direction, "dropout", dropout, 

1530 "seed", seed, "seed2", seed2, "num_proj", num_proj) 

1531 _result = _CudnnRNNParamsToCanonicalV2Output._make(_result) 

1532 return _result 

1533 except _core._NotOkStatusException as e: 

1534 _ops.raise_from_not_ok_status(e, name) 

1535 except _core._FallbackException: 

1536 pass 

1537 try: 

1538 return cudnn_rnn_params_to_canonical_v2_eager_fallback( 

1539 num_layers, num_units, input_size, params, 

1540 num_params_weights=num_params_weights, 

1541 num_params_biases=num_params_biases, rnn_mode=rnn_mode, 

1542 input_mode=input_mode, direction=direction, dropout=dropout, 

1543 seed=seed, seed2=seed2, num_proj=num_proj, name=name, ctx=_ctx) 

1544 except _core._SymbolicException: 

1545 pass # Add nodes to the TensorFlow graph. 

1546 # Add nodes to the TensorFlow graph. 

1547 num_params_weights = _execute.make_int(num_params_weights, "num_params_weights") 

1548 num_params_biases = _execute.make_int(num_params_biases, "num_params_biases") 

1549 if rnn_mode is None: 

1550 rnn_mode = "lstm" 

1551 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

1552 if input_mode is None: 

1553 input_mode = "linear_input" 

1554 input_mode = _execute.make_str(input_mode, "input_mode") 

1555 if direction is None: 

1556 direction = "unidirectional" 

1557 direction = _execute.make_str(direction, "direction") 

1558 if dropout is None: 

1559 dropout = 0 

1560 dropout = _execute.make_float(dropout, "dropout") 

1561 if seed is None: 

1562 seed = 0 

1563 seed = _execute.make_int(seed, "seed") 

1564 if seed2 is None: 

1565 seed2 = 0 

1566 seed2 = _execute.make_int(seed2, "seed2") 

1567 if num_proj is None: 

1568 num_proj = 0 

1569 num_proj = _execute.make_int(num_proj, "num_proj") 

1570 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1571 "CudnnRNNParamsToCanonicalV2", num_layers=num_layers, 

1572 num_units=num_units, 

1573 input_size=input_size, params=params, 

1574 num_params_weights=num_params_weights, 

1575 num_params_biases=num_params_biases, 

1576 rnn_mode=rnn_mode, 

1577 input_mode=input_mode, 

1578 direction=direction, dropout=dropout, 

1579 seed=seed, seed2=seed2, 

1580 num_proj=num_proj, name=name) 

1581 _result = _outputs[:] 

1582 if _execute.must_record_gradient(): 

1583 _attrs = ("T", _op._get_attr_type("T"), "num_params_weights", 

1584 _op._get_attr_int("num_params_weights"), "num_params_biases", 

1585 _op._get_attr_int("num_params_biases"), "rnn_mode", 

1586 _op.get_attr("rnn_mode"), "input_mode", 

1587 _op.get_attr("input_mode"), "direction", 

1588 _op.get_attr("direction"), "dropout", _op.get_attr("dropout"), 

1589 "seed", _op._get_attr_int("seed"), "seed2", 

1590 _op._get_attr_int("seed2"), "num_proj", 

1591 _op._get_attr_int("num_proj")) 

1592 _inputs_flat = _op.inputs 

1593 _execute.record_gradient( 

1594 "CudnnRNNParamsToCanonicalV2", _inputs_flat, _attrs, _result) 

1595 _result = [_result[:num_params_weights]] + _result[num_params_weights:] 

1596 _result = _result[:1] + [_result[1:]] 

1597 _result = _CudnnRNNParamsToCanonicalV2Output._make(_result) 

1598 return _result 

1599 

1600CudnnRNNParamsToCanonicalV2 = tf_export("raw_ops.CudnnRNNParamsToCanonicalV2")(_ops.to_raw_op(cudnn_rnn_params_to_canonical_v2)) 

1601 

1602 

1603def cudnn_rnn_params_to_canonical_v2_eager_fallback(num_layers, num_units, input_size, params, num_params_weights, num_params_biases, rnn_mode, input_mode, direction, dropout, seed, seed2, num_proj, name, ctx): 

1604 num_params_weights = _execute.make_int(num_params_weights, "num_params_weights") 

1605 num_params_biases = _execute.make_int(num_params_biases, "num_params_biases") 

1606 if rnn_mode is None: 

1607 rnn_mode = "lstm" 

1608 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

1609 if input_mode is None: 

1610 input_mode = "linear_input" 

1611 input_mode = _execute.make_str(input_mode, "input_mode") 

1612 if direction is None: 

1613 direction = "unidirectional" 

1614 direction = _execute.make_str(direction, "direction") 

1615 if dropout is None: 

1616 dropout = 0 

1617 dropout = _execute.make_float(dropout, "dropout") 

1618 if seed is None: 

1619 seed = 0 

1620 seed = _execute.make_int(seed, "seed") 

1621 if seed2 is None: 

1622 seed2 = 0 

1623 seed2 = _execute.make_int(seed2, "seed2") 

1624 if num_proj is None: 

1625 num_proj = 0 

1626 num_proj = _execute.make_int(num_proj, "num_proj") 

1627 _attr_T, (params,) = _execute.args_to_matching_eager([params], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

1628 num_layers = _ops.convert_to_tensor(num_layers, _dtypes.int32) 

1629 num_units = _ops.convert_to_tensor(num_units, _dtypes.int32) 

1630 input_size = _ops.convert_to_tensor(input_size, _dtypes.int32) 

1631 _inputs_flat = [num_layers, num_units, input_size, params] 

1632 _attrs = ("T", _attr_T, "num_params_weights", num_params_weights, 

1633 "num_params_biases", num_params_biases, "rnn_mode", rnn_mode, "input_mode", 

1634 input_mode, "direction", direction, "dropout", dropout, "seed", seed, 

1635 "seed2", seed2, "num_proj", num_proj) 

1636 _result = _execute.execute(b"CudnnRNNParamsToCanonicalV2", 

1637 num_params_weights + num_params_biases, 

1638 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1639 name=name) 

1640 if _execute.must_record_gradient(): 

1641 _execute.record_gradient( 

1642 "CudnnRNNParamsToCanonicalV2", _inputs_flat, _attrs, _result) 

1643 _result = [_result[:num_params_weights]] + _result[num_params_weights:] 

1644 _result = _result[:1] + [_result[1:]] 

1645 _result = _CudnnRNNParamsToCanonicalV2Output._make(_result) 

1646 return _result 

1647 

1648_CudnnRNNV2Output = collections.namedtuple( 

1649 "CudnnRNNV2", 

1650 ["output", "output_h", "output_c", "reserve_space", "host_reserved"]) 

1651 

1652 

1653def cudnn_rnnv2(input, input_h, input_c, params, rnn_mode="lstm", input_mode="linear_input", direction="unidirectional", dropout=0, seed=0, seed2=0, is_training=True, name=None): 

1654 r"""A RNN backed by cuDNN. 

1655 

1656 Computes the RNN from the input and initial states, with respect to the params 

1657 buffer. Produces one extra output "host_reserved" than CudnnRNN. 

1658 

1659 rnn_mode: Indicates the type of the RNN model. 

1660 input_mode: Indicates whether there is a linear projection between the input and 

1661 the actual computation before the first layer. 'skip_input' is only allowed 

1662 when input_size == num_units; 'auto_select' implies 'skip_input' when 

1663 input_size == num_units; otherwise, it implies 'linear_input'. 

1664 direction: Indicates whether a bidirectional model will be used. Should be 

1665 "unidirectional" or "bidirectional". 

1666 dropout: Dropout probability. When set to 0., dropout is disabled. 

1667 seed: The 1st part of a seed to initialize dropout. 

1668 seed2: The 2nd part of a seed to initialize dropout. 

1669 input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. 

1670 input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, 

1671 num_units]. 

1672 input_c: For LSTM, a 3-D tensor with the shape of 

1673 [num_layer * dir, batch, num_units]. For other models, it is ignored. 

1674 params: A 1-D tensor that contains the weights and biases in an opaque layout. 

1675 The size must be created through CudnnRNNParamsSize, and initialized 

1676 separately. Note that they might not be compatible across different 

1677 generations. So it is a good idea to save and restore 

1678 output: A 3-D tensor with the shape of [seq_length, batch_size, 

1679 dir * num_units]. 

1680 output_h: The same shape has input_h. 

1681 output_c: The same shape as input_c for LSTM. An empty tensor for other models. 

1682 is_training: Indicates whether this operation is used for inference or 

1683 training. 

1684 reserve_space: An opaque tensor that can be used in backprop calculation. It 

1685 is only produced if is_training is true. 

1686 host_reserved: An opaque tensor that can be used in backprop calculation. It is 

1687 only produced if is_training is true. It is output on host memory rather than 

1688 device memory. 

1689 

1690 Args: 

1691 input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. 

1692 input_h: A `Tensor`. Must have the same type as `input`. 

1693 input_c: A `Tensor`. Must have the same type as `input`. 

1694 params: A `Tensor`. Must have the same type as `input`. 

1695 rnn_mode: An optional `string` from: `"rnn_relu", "rnn_tanh", "lstm", "gru"`. Defaults to `"lstm"`. 

1696 input_mode: An optional `string` from: `"linear_input", "skip_input", "auto_select"`. Defaults to `"linear_input"`. 

1697 direction: An optional `string` from: `"unidirectional", "bidirectional"`. Defaults to `"unidirectional"`. 

1698 dropout: An optional `float`. Defaults to `0`. 

1699 seed: An optional `int`. Defaults to `0`. 

1700 seed2: An optional `int`. Defaults to `0`. 

1701 is_training: An optional `bool`. Defaults to `True`. 

1702 name: A name for the operation (optional). 

1703 

1704 Returns: 

1705 A tuple of `Tensor` objects (output, output_h, output_c, reserve_space, host_reserved). 

1706 

1707 output: A `Tensor`. Has the same type as `input`. 

1708 output_h: A `Tensor`. Has the same type as `input`. 

1709 output_c: A `Tensor`. Has the same type as `input`. 

1710 reserve_space: A `Tensor`. Has the same type as `input`. 

1711 host_reserved: A `Tensor` of type `int8`. 

1712 """ 

1713 _ctx = _context._context or _context.context() 

1714 tld = _ctx._thread_local_data 

1715 if tld.is_eager: 

1716 try: 

1717 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1718 _ctx, "CudnnRNNV2", name, input, input_h, input_c, params, "rnn_mode", 

1719 rnn_mode, "input_mode", input_mode, "direction", direction, "dropout", 

1720 dropout, "seed", seed, "seed2", seed2, "is_training", is_training) 

1721 _result = _CudnnRNNV2Output._make(_result) 

1722 return _result 

1723 except _core._NotOkStatusException as e: 

1724 _ops.raise_from_not_ok_status(e, name) 

1725 except _core._FallbackException: 

1726 pass 

1727 try: 

1728 return cudnn_rnnv2_eager_fallback( 

1729 input, input_h, input_c, params, rnn_mode=rnn_mode, 

1730 input_mode=input_mode, direction=direction, dropout=dropout, 

1731 seed=seed, seed2=seed2, is_training=is_training, name=name, 

1732 ctx=_ctx) 

1733 except _core._SymbolicException: 

1734 pass # Add nodes to the TensorFlow graph. 

1735 # Add nodes to the TensorFlow graph. 

1736 if rnn_mode is None: 

1737 rnn_mode = "lstm" 

1738 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

1739 if input_mode is None: 

1740 input_mode = "linear_input" 

1741 input_mode = _execute.make_str(input_mode, "input_mode") 

1742 if direction is None: 

1743 direction = "unidirectional" 

1744 direction = _execute.make_str(direction, "direction") 

1745 if dropout is None: 

1746 dropout = 0 

1747 dropout = _execute.make_float(dropout, "dropout") 

1748 if seed is None: 

1749 seed = 0 

1750 seed = _execute.make_int(seed, "seed") 

1751 if seed2 is None: 

1752 seed2 = 0 

1753 seed2 = _execute.make_int(seed2, "seed2") 

1754 if is_training is None: 

1755 is_training = True 

1756 is_training = _execute.make_bool(is_training, "is_training") 

1757 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1758 "CudnnRNNV2", input=input, input_h=input_h, input_c=input_c, 

1759 params=params, rnn_mode=rnn_mode, input_mode=input_mode, 

1760 direction=direction, dropout=dropout, seed=seed, 

1761 seed2=seed2, is_training=is_training, name=name) 

1762 _result = _outputs[:] 

1763 if _execute.must_record_gradient(): 

1764 _attrs = ("T", _op._get_attr_type("T"), "rnn_mode", 

1765 _op.get_attr("rnn_mode"), "input_mode", 

1766 _op.get_attr("input_mode"), "direction", 

1767 _op.get_attr("direction"), "dropout", _op.get_attr("dropout"), 

1768 "seed", _op._get_attr_int("seed"), "seed2", 

1769 _op._get_attr_int("seed2"), "is_training", 

1770 _op._get_attr_bool("is_training")) 

1771 _inputs_flat = _op.inputs 

1772 _execute.record_gradient( 

1773 "CudnnRNNV2", _inputs_flat, _attrs, _result) 

1774 _result = _CudnnRNNV2Output._make(_result) 

1775 return _result 

1776 

1777CudnnRNNV2 = tf_export("raw_ops.CudnnRNNV2")(_ops.to_raw_op(cudnn_rnnv2)) 

1778 

1779 

1780def cudnn_rnnv2_eager_fallback(input, input_h, input_c, params, rnn_mode, input_mode, direction, dropout, seed, seed2, is_training, name, ctx): 

1781 if rnn_mode is None: 

1782 rnn_mode = "lstm" 

1783 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

1784 if input_mode is None: 

1785 input_mode = "linear_input" 

1786 input_mode = _execute.make_str(input_mode, "input_mode") 

1787 if direction is None: 

1788 direction = "unidirectional" 

1789 direction = _execute.make_str(direction, "direction") 

1790 if dropout is None: 

1791 dropout = 0 

1792 dropout = _execute.make_float(dropout, "dropout") 

1793 if seed is None: 

1794 seed = 0 

1795 seed = _execute.make_int(seed, "seed") 

1796 if seed2 is None: 

1797 seed2 = 0 

1798 seed2 = _execute.make_int(seed2, "seed2") 

1799 if is_training is None: 

1800 is_training = True 

1801 is_training = _execute.make_bool(is_training, "is_training") 

1802 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_h, input_c, params], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

1803 (input, input_h, input_c, params) = _inputs_T 

1804 _inputs_flat = [input, input_h, input_c, params] 

1805 _attrs = ("T", _attr_T, "rnn_mode", rnn_mode, "input_mode", input_mode, 

1806 "direction", direction, "dropout", dropout, "seed", seed, "seed2", seed2, 

1807 "is_training", is_training) 

1808 _result = _execute.execute(b"CudnnRNNV2", 5, inputs=_inputs_flat, 

1809 attrs=_attrs, ctx=ctx, name=name) 

1810 if _execute.must_record_gradient(): 

1811 _execute.record_gradient( 

1812 "CudnnRNNV2", _inputs_flat, _attrs, _result) 

1813 _result = _CudnnRNNV2Output._make(_result) 

1814 return _result 

1815 

1816_CudnnRNNV3Output = collections.namedtuple( 

1817 "CudnnRNNV3", 

1818 ["output", "output_h", "output_c", "reserve_space", "host_reserved"]) 

1819 

1820 

1821def cudnn_rnnv3(input, input_h, input_c, params, sequence_lengths, rnn_mode="lstm", input_mode="linear_input", direction="unidirectional", dropout=0, seed=0, seed2=0, num_proj=0, is_training=True, time_major=True, name=None): 

1822 r"""A RNN backed by cuDNN. 

1823 

1824 Computes the RNN from the input and initial states, with respect to the params 

1825 buffer. Accepts one extra input "sequence_lengths" than CudnnRNN. 

1826 

1827 rnn_mode: Indicates the type of the RNN model. 

1828 input_mode: Indicates whether there is a linear projection between the input and 

1829 the actual computation before the first layer. 'skip_input' is only allowed 

1830 when input_size == num_units; 'auto_select' implies 'skip_input' when 

1831 input_size == num_units; otherwise, it implies 'linear_input'. 

1832 direction: Indicates whether a bidirectional model will be used. Should be 

1833 "unidirectional" or "bidirectional". 

1834 dropout: Dropout probability. When set to 0., dropout is disabled. 

1835 seed: The 1st part of a seed to initialize dropout. 

1836 seed2: The 2nd part of a seed to initialize dropout. 

1837 input: If time_major is true, this is a 3-D tensor with the shape of 

1838 [seq_length, batch_size, input_size]. If time_major is false, the shape is 

1839 [batch_size, seq_length, input_size]. 

1840 input_h: If time_major is true, this is a 3-D tensor with the shape of 

1841 [num_layer * dir, batch_size, num_units]. If time_major is false, the shape 

1842 is [batch_size, num_layer * dir, num_units]. 

1843 input_c: For LSTM, a 3-D tensor with the shape of 

1844 [num_layer * dir, batch, num_units]. For other models, it is ignored. 

1845 params: A 1-D tensor that contains the weights and biases in an opaque layout. 

1846 The size must be created through CudnnRNNParamsSize, and initialized 

1847 separately. Note that they might not be compatible across different 

1848 generations. So it is a good idea to save and restore 

1849 sequence_lengths: a vector of lengths of each input sequence. 

1850 output: If time_major is true, this is a 3-D tensor with the shape of 

1851 [seq_length, batch_size, dir * num_units]. If time_major is false, the 

1852 shape is [batch_size, seq_length, dir * num_units]. 

1853 output_h: The same shape has input_h. 

1854 output_c: The same shape as input_c for LSTM. An empty tensor for other models. 

1855 is_training: Indicates whether this operation is used for inference or 

1856 training. 

1857 time_major: Indicates whether the input/output format is time major or batch 

1858 major. 

1859 reserve_space: An opaque tensor that can be used in backprop calculation. It 

1860 is only produced if is_training is true. 

1861 

1862 Args: 

1863 input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. 

1864 input_h: A `Tensor`. Must have the same type as `input`. 

1865 input_c: A `Tensor`. Must have the same type as `input`. 

1866 params: A `Tensor`. Must have the same type as `input`. 

1867 sequence_lengths: A `Tensor` of type `int32`. 

1868 rnn_mode: An optional `string` from: `"rnn_relu", "rnn_tanh", "lstm", "gru"`. Defaults to `"lstm"`. 

1869 input_mode: An optional `string` from: `"linear_input", "skip_input", "auto_select"`. Defaults to `"linear_input"`. 

1870 direction: An optional `string` from: `"unidirectional", "bidirectional"`. Defaults to `"unidirectional"`. 

1871 dropout: An optional `float`. Defaults to `0`. 

1872 seed: An optional `int`. Defaults to `0`. 

1873 seed2: An optional `int`. Defaults to `0`. 

1874 num_proj: An optional `int`. Defaults to `0`. 

1875 is_training: An optional `bool`. Defaults to `True`. 

1876 time_major: An optional `bool`. Defaults to `True`. 

1877 name: A name for the operation (optional). 

1878 

1879 Returns: 

1880 A tuple of `Tensor` objects (output, output_h, output_c, reserve_space, host_reserved). 

1881 

1882 output: A `Tensor`. Has the same type as `input`. 

1883 output_h: A `Tensor`. Has the same type as `input`. 

1884 output_c: A `Tensor`. Has the same type as `input`. 

1885 reserve_space: A `Tensor`. Has the same type as `input`. 

1886 host_reserved: A `Tensor` of type `int8`. 

1887 """ 

1888 _ctx = _context._context or _context.context() 

1889 tld = _ctx._thread_local_data 

1890 if tld.is_eager: 

1891 try: 

1892 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1893 _ctx, "CudnnRNNV3", name, input, input_h, input_c, params, 

1894 sequence_lengths, "rnn_mode", rnn_mode, "input_mode", input_mode, 

1895 "direction", direction, "dropout", dropout, "seed", seed, "seed2", 

1896 seed2, "num_proj", num_proj, "is_training", is_training, "time_major", 

1897 time_major) 

1898 _result = _CudnnRNNV3Output._make(_result) 

1899 return _result 

1900 except _core._NotOkStatusException as e: 

1901 _ops.raise_from_not_ok_status(e, name) 

1902 except _core._FallbackException: 

1903 pass 

1904 try: 

1905 return cudnn_rnnv3_eager_fallback( 

1906 input, input_h, input_c, params, sequence_lengths, 

1907 rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, 

1908 dropout=dropout, seed=seed, seed2=seed2, num_proj=num_proj, 

1909 is_training=is_training, time_major=time_major, name=name, ctx=_ctx) 

1910 except _core._SymbolicException: 

1911 pass # Add nodes to the TensorFlow graph. 

1912 # Add nodes to the TensorFlow graph. 

1913 if rnn_mode is None: 

1914 rnn_mode = "lstm" 

1915 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

1916 if input_mode is None: 

1917 input_mode = "linear_input" 

1918 input_mode = _execute.make_str(input_mode, "input_mode") 

1919 if direction is None: 

1920 direction = "unidirectional" 

1921 direction = _execute.make_str(direction, "direction") 

1922 if dropout is None: 

1923 dropout = 0 

1924 dropout = _execute.make_float(dropout, "dropout") 

1925 if seed is None: 

1926 seed = 0 

1927 seed = _execute.make_int(seed, "seed") 

1928 if seed2 is None: 

1929 seed2 = 0 

1930 seed2 = _execute.make_int(seed2, "seed2") 

1931 if num_proj is None: 

1932 num_proj = 0 

1933 num_proj = _execute.make_int(num_proj, "num_proj") 

1934 if is_training is None: 

1935 is_training = True 

1936 is_training = _execute.make_bool(is_training, "is_training") 

1937 if time_major is None: 

1938 time_major = True 

1939 time_major = _execute.make_bool(time_major, "time_major") 

1940 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1941 "CudnnRNNV3", input=input, input_h=input_h, input_c=input_c, 

1942 params=params, sequence_lengths=sequence_lengths, 

1943 rnn_mode=rnn_mode, input_mode=input_mode, 

1944 direction=direction, dropout=dropout, seed=seed, 

1945 seed2=seed2, num_proj=num_proj, is_training=is_training, 

1946 time_major=time_major, name=name) 

1947 _result = _outputs[:] 

1948 if _execute.must_record_gradient(): 

1949 _attrs = ("T", _op._get_attr_type("T"), "rnn_mode", 

1950 _op.get_attr("rnn_mode"), "input_mode", 

1951 _op.get_attr("input_mode"), "direction", 

1952 _op.get_attr("direction"), "dropout", _op.get_attr("dropout"), 

1953 "seed", _op._get_attr_int("seed"), "seed2", 

1954 _op._get_attr_int("seed2"), "num_proj", 

1955 _op._get_attr_int("num_proj"), "is_training", 

1956 _op._get_attr_bool("is_training"), "time_major", 

1957 _op._get_attr_bool("time_major")) 

1958 _inputs_flat = _op.inputs 

1959 _execute.record_gradient( 

1960 "CudnnRNNV3", _inputs_flat, _attrs, _result) 

1961 _result = _CudnnRNNV3Output._make(_result) 

1962 return _result 

1963 

1964CudnnRNNV3 = tf_export("raw_ops.CudnnRNNV3")(_ops.to_raw_op(cudnn_rnnv3)) 

1965 

1966 

1967def cudnn_rnnv3_eager_fallback(input, input_h, input_c, params, sequence_lengths, rnn_mode, input_mode, direction, dropout, seed, seed2, num_proj, is_training, time_major, name, ctx): 

1968 if rnn_mode is None: 

1969 rnn_mode = "lstm" 

1970 rnn_mode = _execute.make_str(rnn_mode, "rnn_mode") 

1971 if input_mode is None: 

1972 input_mode = "linear_input" 

1973 input_mode = _execute.make_str(input_mode, "input_mode") 

1974 if direction is None: 

1975 direction = "unidirectional" 

1976 direction = _execute.make_str(direction, "direction") 

1977 if dropout is None: 

1978 dropout = 0 

1979 dropout = _execute.make_float(dropout, "dropout") 

1980 if seed is None: 

1981 seed = 0 

1982 seed = _execute.make_int(seed, "seed") 

1983 if seed2 is None: 

1984 seed2 = 0 

1985 seed2 = _execute.make_int(seed2, "seed2") 

1986 if num_proj is None: 

1987 num_proj = 0 

1988 num_proj = _execute.make_int(num_proj, "num_proj") 

1989 if is_training is None: 

1990 is_training = True 

1991 is_training = _execute.make_bool(is_training, "is_training") 

1992 if time_major is None: 

1993 time_major = True 

1994 time_major = _execute.make_bool(time_major, "time_major") 

1995 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_h, input_c, params], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

1996 (input, input_h, input_c, params) = _inputs_T 

1997 sequence_lengths = _ops.convert_to_tensor(sequence_lengths, _dtypes.int32) 

1998 _inputs_flat = [input, input_h, input_c, params, sequence_lengths] 

1999 _attrs = ("T", _attr_T, "rnn_mode", rnn_mode, "input_mode", input_mode, 

2000 "direction", direction, "dropout", dropout, "seed", seed, "seed2", seed2, 

2001 "num_proj", num_proj, "is_training", is_training, "time_major", time_major) 

2002 _result = _execute.execute(b"CudnnRNNV3", 5, inputs=_inputs_flat, 

2003 attrs=_attrs, ctx=ctx, name=name) 

2004 if _execute.must_record_gradient(): 

2005 _execute.record_gradient( 

2006 "CudnnRNNV3", _inputs_flat, _attrs, _result) 

2007 _result = _CudnnRNNV3Output._make(_result) 

2008 return _result 

2009