Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_rnn_ops.py: 13%
360 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1"""Python wrappers around TensorFlow ops.
3This file is MACHINE GENERATED! Do not edit.
4"""
6import collections
8from tensorflow.python import pywrap_tfe as pywrap_tfe
9from tensorflow.python.eager import context as _context
10from tensorflow.python.eager import core as _core
11from tensorflow.python.eager import execute as _execute
12from tensorflow.python.framework import dtypes as _dtypes
13from tensorflow.security.fuzzing.py import annotation_types as _atypes
15from tensorflow.python.framework import op_def_registry as _op_def_registry
16from tensorflow.python.framework import ops as _ops
17from tensorflow.python.framework import op_def_library as _op_def_library
18from tensorflow.python.util.deprecation import deprecated_endpoints
19from tensorflow.python.util import dispatch as _dispatch
20from tensorflow.python.util.tf_export import tf_export
22from typing import TypeVar
23_BlockLSTMOutput = collections.namedtuple(
24 "BlockLSTM",
25 ["i", "cs", "f", "o", "ci", "co", "h"])
28def block_lstm(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
29 r"""Computes the LSTM cell forward propagation for all the time steps.
31 This is equivalent to applying LSTMBlockCell in a loop, like so:
33 ```python
34 for x1 in unpack(x):
35 i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
36 x1, cs_prev, h_prev, w, wci, wcf, wco, b)
37 cs_prev = cs1
38 h_prev = h1
39 i.append(i1)
40 cs.append(cs1)
41 f.append(f1)
42 o.append(o1)
43 ci.append(ci1)
44 co.append(co1)
45 h.append(h1)
46 return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
47 ```
49 Args:
50 seq_len_max: A `Tensor` of type `int64`.
51 Maximum time length actually used by this input. Outputs are padded
52 with zeros beyond this length.
53 x: A `Tensor`. Must be one of the following types: `half`, `float32`.
54 The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
55 cs_prev: A `Tensor`. Must have the same type as `x`.
56 Value of the initial cell state.
57 h_prev: A `Tensor`. Must have the same type as `x`.
58 Initial output of cell (to be used for peephole).
59 w: A `Tensor`. Must have the same type as `x`. The weight matrix.
60 wci: A `Tensor`. Must have the same type as `x`.
61 The weight matrix for input gate peephole connection.
62 wcf: A `Tensor`. Must have the same type as `x`.
63 The weight matrix for forget gate peephole connection.
64 wco: A `Tensor`. Must have the same type as `x`.
65 The weight matrix for output gate peephole connection.
66 b: A `Tensor`. Must have the same type as `x`. The bias vector.
67 forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
68 cell_clip: An optional `float`. Defaults to `3`.
69 Value to clip the 'cs' value to.
70 use_peephole: An optional `bool`. Defaults to `False`.
71 Whether to use peephole weights.
72 name: A name for the operation (optional).
74 Returns:
75 A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
77 i: A `Tensor`. Has the same type as `x`.
78 cs: A `Tensor`. Has the same type as `x`.
79 f: A `Tensor`. Has the same type as `x`.
80 o: A `Tensor`. Has the same type as `x`.
81 ci: A `Tensor`. Has the same type as `x`.
82 co: A `Tensor`. Has the same type as `x`.
83 h: A `Tensor`. Has the same type as `x`.
84 """
85 _ctx = _context._context or _context.context()
86 tld = _ctx._thread_local_data
87 if tld.is_eager:
88 try:
89 _result = pywrap_tfe.TFE_Py_FastPathExecute(
90 _ctx, "BlockLSTM", name, seq_len_max, x, cs_prev, h_prev, w, wci, wcf,
91 wco, b, "forget_bias", forget_bias, "cell_clip", cell_clip,
92 "use_peephole", use_peephole)
93 _result = _BlockLSTMOutput._make(_result)
94 return _result
95 except _core._NotOkStatusException as e:
96 _ops.raise_from_not_ok_status(e, name)
97 except _core._FallbackException:
98 pass
99 try:
100 return block_lstm_eager_fallback(
101 seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
102 forget_bias=forget_bias, cell_clip=cell_clip,
103 use_peephole=use_peephole, name=name, ctx=_ctx)
104 except _core._SymbolicException:
105 pass # Add nodes to the TensorFlow graph.
106 # Add nodes to the TensorFlow graph.
107 if forget_bias is None:
108 forget_bias = 1
109 forget_bias = _execute.make_float(forget_bias, "forget_bias")
110 if cell_clip is None:
111 cell_clip = 3
112 cell_clip = _execute.make_float(cell_clip, "cell_clip")
113 if use_peephole is None:
114 use_peephole = False
115 use_peephole = _execute.make_bool(use_peephole, "use_peephole")
116 _, _, _op, _outputs = _op_def_library._apply_op_helper(
117 "BlockLSTM", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,
118 h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b,
119 forget_bias=forget_bias, cell_clip=cell_clip,
120 use_peephole=use_peephole, name=name)
121 _result = _outputs[:]
122 if _execute.must_record_gradient():
123 _attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip",
124 _op.get_attr("cell_clip"), "use_peephole",
125 _op._get_attr_bool("use_peephole"), "T",
126 _op._get_attr_type("T"))
127 _inputs_flat = _op.inputs
128 _execute.record_gradient(
129 "BlockLSTM", _inputs_flat, _attrs, _result)
130 _result = _BlockLSTMOutput._make(_result)
131 return _result
133BlockLSTM = tf_export("raw_ops.BlockLSTM")(_ops.to_raw_op(block_lstm))
136def block_lstm_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias, cell_clip, use_peephole, name, ctx):
137 if forget_bias is None:
138 forget_bias = 1
139 forget_bias = _execute.make_float(forget_bias, "forget_bias")
140 if cell_clip is None:
141 cell_clip = 3
142 cell_clip = _execute.make_float(cell_clip, "cell_clip")
143 if use_peephole is None:
144 use_peephole = False
145 use_peephole = _execute.make_bool(use_peephole, "use_peephole")
146 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], ctx, [_dtypes.half, _dtypes.float32, ])
147 (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
148 seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
149 _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b]
150 _attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
151 "use_peephole", use_peephole, "T", _attr_T)
152 _result = _execute.execute(b"BlockLSTM", 7, inputs=_inputs_flat,
153 attrs=_attrs, ctx=ctx, name=name)
154 if _execute.must_record_gradient():
155 _execute.record_gradient(
156 "BlockLSTM", _inputs_flat, _attrs, _result)
157 _result = _BlockLSTMOutput._make(_result)
158 return _result
160_BlockLSTMGradOutput = collections.namedtuple(
161 "BlockLSTMGrad",
162 ["x_grad", "cs_prev_grad", "h_prev_grad", "w_grad", "wci_grad", "wcf_grad", "wco_grad", "b_grad"])
165def block_lstm_grad(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None):
166 r"""Computes the LSTM cell backward propagation for the entire time sequence.
168 This implementation is to be used in conjunction of LSTMBlock.
170 Args:
171 seq_len_max: A `Tensor` of type `int64`.
172 Maximum time length actually used by this input. Outputs are padded
173 with zeros beyond this length.
174 x: A `Tensor`. Must be one of the following types: `half`, `float32`.
175 The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
176 cs_prev: A `Tensor`. Must have the same type as `x`.
177 Value of the initial cell state.
178 h_prev: A `Tensor`. Must have the same type as `x`.
179 Initial output of cell (to be used for peephole).
180 w: A `Tensor`. Must have the same type as `x`. The weight matrix.
181 wci: A `Tensor`. Must have the same type as `x`.
182 The weight matrix for input gate peephole connection.
183 wcf: A `Tensor`. Must have the same type as `x`.
184 The weight matrix for forget gate peephole connection.
185 wco: A `Tensor`. Must have the same type as `x`.
186 The weight matrix for output gate peephole connection.
187 b: A `Tensor`. Must have the same type as `x`. The bias vector.
188 i: A `Tensor`. Must have the same type as `x`.
189 The input gate over the whole time sequence.
190 cs: A `Tensor`. Must have the same type as `x`.
191 The cell state before the tanh over the whole time sequence.
192 f: A `Tensor`. Must have the same type as `x`.
193 The forget gate over the whole time sequence.
194 o: A `Tensor`. Must have the same type as `x`.
195 The output gate over the whole time sequence.
196 ci: A `Tensor`. Must have the same type as `x`.
197 The cell input over the whole time sequence.
198 co: A `Tensor`. Must have the same type as `x`.
199 The cell after the tanh over the whole time sequence.
200 h: A `Tensor`. Must have the same type as `x`.
201 The output h vector over the whole time sequence.
202 cs_grad: A `Tensor`. Must have the same type as `x`.
203 The current gradient of cs.
204 h_grad: A `Tensor`. Must have the same type as `x`.
205 The gradient of h vector.
206 use_peephole: A `bool`. Whether to use peephole weights.
207 name: A name for the operation (optional).
209 Returns:
210 A tuple of `Tensor` objects (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad).
212 x_grad: A `Tensor`. Has the same type as `x`.
213 cs_prev_grad: A `Tensor`. Has the same type as `x`.
214 h_prev_grad: A `Tensor`. Has the same type as `x`.
215 w_grad: A `Tensor`. Has the same type as `x`.
216 wci_grad: A `Tensor`. Has the same type as `x`.
217 wcf_grad: A `Tensor`. Has the same type as `x`.
218 wco_grad: A `Tensor`. Has the same type as `x`.
219 b_grad: A `Tensor`. Has the same type as `x`.
220 """
221 _ctx = _context._context or _context.context()
222 tld = _ctx._thread_local_data
223 if tld.is_eager:
224 try:
225 _result = pywrap_tfe.TFE_Py_FastPathExecute(
226 _ctx, "BlockLSTMGrad", name, seq_len_max, x, cs_prev, h_prev, w, wci,
227 wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, "use_peephole",
228 use_peephole)
229 _result = _BlockLSTMGradOutput._make(_result)
230 return _result
231 except _core._NotOkStatusException as e:
232 _ops.raise_from_not_ok_status(e, name)
233 except _core._FallbackException:
234 pass
235 try:
236 return block_lstm_grad_eager_fallback(
237 seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o,
238 ci, co, h, cs_grad, h_grad, use_peephole=use_peephole, name=name,
239 ctx=_ctx)
240 except _core._SymbolicException:
241 pass # Add nodes to the TensorFlow graph.
242 # Add nodes to the TensorFlow graph.
243 use_peephole = _execute.make_bool(use_peephole, "use_peephole")
244 _, _, _op, _outputs = _op_def_library._apply_op_helper(
245 "BlockLSTMGrad", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,
246 h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b,
247 i=i, cs=cs, f=f, o=o, ci=ci, co=co, h=h,
248 cs_grad=cs_grad, h_grad=h_grad,
249 use_peephole=use_peephole, name=name)
250 _result = _outputs[:]
251 if _execute.must_record_gradient():
252 _attrs = ("use_peephole", _op._get_attr_bool("use_peephole"), "T",
253 _op._get_attr_type("T"))
254 _inputs_flat = _op.inputs
255 _execute.record_gradient(
256 "BlockLSTMGrad", _inputs_flat, _attrs, _result)
257 _result = _BlockLSTMGradOutput._make(_result)
258 return _result
260BlockLSTMGrad = tf_export("raw_ops.BlockLSTMGrad")(_ops.to_raw_op(block_lstm_grad))
263def block_lstm_grad_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name, ctx):
264 use_peephole = _execute.make_bool(use_peephole, "use_peephole")
265 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad], ctx, [_dtypes.half, _dtypes.float32, ])
266 (x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad) = _inputs_T
267 seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
268 _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad]
269 _attrs = ("use_peephole", use_peephole, "T", _attr_T)
270 _result = _execute.execute(b"BlockLSTMGrad", 8, inputs=_inputs_flat,
271 attrs=_attrs, ctx=ctx, name=name)
272 if _execute.must_record_gradient():
273 _execute.record_gradient(
274 "BlockLSTMGrad", _inputs_flat, _attrs, _result)
275 _result = _BlockLSTMGradOutput._make(_result)
276 return _result
278_BlockLSTMGradV2Output = collections.namedtuple(
279 "BlockLSTMGradV2",
280 ["x_grad", "cs_prev_grad", "h_prev_grad", "w_grad", "wci_grad", "wcf_grad", "wco_grad", "b_grad"])
283def block_lstm_grad_v2(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None):
284 r"""Computes the LSTM cell backward propagation for the entire time sequence.
286 This implementation is to be used in conjunction of BlockLSTMV2.
288 Args:
289 seq_len_max: A `Tensor` of type `int64`.
290 Maximum time length actually used by this input. Outputs are padded
291 with zeros beyond this length.
292 x: A `Tensor`. Must be one of the following types: `half`, `float32`.
293 The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
294 cs_prev: A `Tensor`. Must have the same type as `x`.
295 Value of the initial cell state.
296 h_prev: A `Tensor`. Must have the same type as `x`.
297 Initial output of cell (to be used for peephole).
298 w: A `Tensor`. Must have the same type as `x`. The weight matrix.
299 wci: A `Tensor`. Must have the same type as `x`.
300 The weight matrix for input gate peephole connection.
301 wcf: A `Tensor`. Must have the same type as `x`.
302 The weight matrix for forget gate peephole connection.
303 wco: A `Tensor`. Must have the same type as `x`.
304 The weight matrix for output gate peephole connection.
305 b: A `Tensor`. Must have the same type as `x`. The bias vector.
306 i: A `Tensor`. Must have the same type as `x`.
307 The input gate over the whole time sequence.
308 cs: A `Tensor`. Must have the same type as `x`.
309 The cell state before the tanh over the whole time sequence.
310 f: A `Tensor`. Must have the same type as `x`.
311 The forget gate over the whole time sequence.
312 o: A `Tensor`. Must have the same type as `x`.
313 The output gate over the whole time sequence.
314 ci: A `Tensor`. Must have the same type as `x`.
315 The cell input over the whole time sequence.
316 co: A `Tensor`. Must have the same type as `x`.
317 The cell after the tanh over the whole time sequence.
318 h: A `Tensor`. Must have the same type as `x`.
319 The output h vector over the whole time sequence.
320 cs_grad: A `Tensor`. Must have the same type as `x`.
321 The current gradient of cs.
322 h_grad: A `Tensor`. Must have the same type as `x`.
323 The gradient of h vector.
324 use_peephole: A `bool`. Whether to use peephole weights.
325 name: A name for the operation (optional).
327 Returns:
328 A tuple of `Tensor` objects (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad).
330 x_grad: A `Tensor`. Has the same type as `x`.
331 cs_prev_grad: A `Tensor`. Has the same type as `x`.
332 h_prev_grad: A `Tensor`. Has the same type as `x`.
333 w_grad: A `Tensor`. Has the same type as `x`.
334 wci_grad: A `Tensor`. Has the same type as `x`.
335 wcf_grad: A `Tensor`. Has the same type as `x`.
336 wco_grad: A `Tensor`. Has the same type as `x`.
337 b_grad: A `Tensor`. Has the same type as `x`.
338 """
339 _ctx = _context._context or _context.context()
340 tld = _ctx._thread_local_data
341 if tld.is_eager:
342 try:
343 _result = pywrap_tfe.TFE_Py_FastPathExecute(
344 _ctx, "BlockLSTMGradV2", name, seq_len_max, x, cs_prev, h_prev, w,
345 wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad,
346 "use_peephole", use_peephole)
347 _result = _BlockLSTMGradV2Output._make(_result)
348 return _result
349 except _core._NotOkStatusException as e:
350 _ops.raise_from_not_ok_status(e, name)
351 except _core._FallbackException:
352 pass
353 try:
354 return block_lstm_grad_v2_eager_fallback(
355 seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o,
356 ci, co, h, cs_grad, h_grad, use_peephole=use_peephole, name=name,
357 ctx=_ctx)
358 except _core._SymbolicException:
359 pass # Add nodes to the TensorFlow graph.
360 # Add nodes to the TensorFlow graph.
361 use_peephole = _execute.make_bool(use_peephole, "use_peephole")
362 _, _, _op, _outputs = _op_def_library._apply_op_helper(
363 "BlockLSTMGradV2", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,
364 h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b,
365 i=i, cs=cs, f=f, o=o, ci=ci, co=co, h=h,
366 cs_grad=cs_grad, h_grad=h_grad,
367 use_peephole=use_peephole, name=name)
368 _result = _outputs[:]
369 if _execute.must_record_gradient():
370 _attrs = ("use_peephole", _op._get_attr_bool("use_peephole"), "T",
371 _op._get_attr_type("T"))
372 _inputs_flat = _op.inputs
373 _execute.record_gradient(
374 "BlockLSTMGradV2", _inputs_flat, _attrs, _result)
375 _result = _BlockLSTMGradV2Output._make(_result)
376 return _result
378BlockLSTMGradV2 = tf_export("raw_ops.BlockLSTMGradV2")(_ops.to_raw_op(block_lstm_grad_v2))
381def block_lstm_grad_v2_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name, ctx):
382 use_peephole = _execute.make_bool(use_peephole, "use_peephole")
383 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad], ctx, [_dtypes.half, _dtypes.float32, ])
384 (x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad) = _inputs_T
385 seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
386 _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad]
387 _attrs = ("use_peephole", use_peephole, "T", _attr_T)
388 _result = _execute.execute(b"BlockLSTMGradV2", 8, inputs=_inputs_flat,
389 attrs=_attrs, ctx=ctx, name=name)
390 if _execute.must_record_gradient():
391 _execute.record_gradient(
392 "BlockLSTMGradV2", _inputs_flat, _attrs, _result)
393 _result = _BlockLSTMGradV2Output._make(_result)
394 return _result
396_BlockLSTMV2Output = collections.namedtuple(
397 "BlockLSTMV2",
398 ["i", "cs", "f", "o", "ci", "co", "h"])
401def block_lstmv2(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, cell_clip=0, use_peephole=False, name=None):
402 r"""Computes the LSTM cell forward propagation for all the time steps.
404 This is equivalent to applying LSTMBlockCell in a loop, like so:
406 ```python
407 for x1 in unpack(x):
408 i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
409 x1, cs_prev, h_prev, w, wci, wcf, wco, b)
410 cs_prev = cs1
411 h_prev = h1
412 i.append(i1)
413 cs.append(cs1)
414 f.append(f1)
415 o.append(o1)
416 ci.append(ci1)
417 co.append(co1)
418 h.append(h1)
419 return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
421 Note that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout,
422 this op uses IFCO. So in order for the following snippet to be equivalent
423 all gate-related outputs should be reordered.
424 ```
426 Args:
427 seq_len_max: A `Tensor` of type `int64`.
428 Maximum time length actually used by this input. Outputs are padded
429 with zeros beyond this length.
430 x: A `Tensor`. Must be one of the following types: `half`, `float32`.
431 The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
432 cs_prev: A `Tensor`. Must have the same type as `x`.
433 Value of the initial cell state.
434 h_prev: A `Tensor`. Must have the same type as `x`.
435 Initial output of cell (to be used for peephole).
436 w: A `Tensor`. Must have the same type as `x`. The weight matrix.
437 wci: A `Tensor`. Must have the same type as `x`.
438 The weight matrix for input gate peephole connection.
439 wcf: A `Tensor`. Must have the same type as `x`.
440 The weight matrix for forget gate peephole connection.
441 wco: A `Tensor`. Must have the same type as `x`.
442 The weight matrix for output gate peephole connection.
443 b: A `Tensor`. Must have the same type as `x`. The bias vector.
444 cell_clip: An optional `float`. Defaults to `0`.
445 Value to clip the 'cs' value to.
446 use_peephole: An optional `bool`. Defaults to `False`.
447 Whether to use peephole weights.
448 name: A name for the operation (optional).
450 Returns:
451 A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
453 i: A `Tensor`. Has the same type as `x`.
454 cs: A `Tensor`. Has the same type as `x`.
455 f: A `Tensor`. Has the same type as `x`.
456 o: A `Tensor`. Has the same type as `x`.
457 ci: A `Tensor`. Has the same type as `x`.
458 co: A `Tensor`. Has the same type as `x`.
459 h: A `Tensor`. Has the same type as `x`.
460 """
461 _ctx = _context._context or _context.context()
462 tld = _ctx._thread_local_data
463 if tld.is_eager:
464 try:
465 _result = pywrap_tfe.TFE_Py_FastPathExecute(
466 _ctx, "BlockLSTMV2", name, seq_len_max, x, cs_prev, h_prev, w, wci,
467 wcf, wco, b, "cell_clip", cell_clip, "use_peephole", use_peephole)
468 _result = _BlockLSTMV2Output._make(_result)
469 return _result
470 except _core._NotOkStatusException as e:
471 _ops.raise_from_not_ok_status(e, name)
472 except _core._FallbackException:
473 pass
474 try:
475 return block_lstmv2_eager_fallback(
476 seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
477 cell_clip=cell_clip, use_peephole=use_peephole, name=name, ctx=_ctx)
478 except _core._SymbolicException:
479 pass # Add nodes to the TensorFlow graph.
480 # Add nodes to the TensorFlow graph.
481 if cell_clip is None:
482 cell_clip = 0
483 cell_clip = _execute.make_float(cell_clip, "cell_clip")
484 if use_peephole is None:
485 use_peephole = False
486 use_peephole = _execute.make_bool(use_peephole, "use_peephole")
487 _, _, _op, _outputs = _op_def_library._apply_op_helper(
488 "BlockLSTMV2", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,
489 h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b,
490 cell_clip=cell_clip, use_peephole=use_peephole,
491 name=name)
492 _result = _outputs[:]
493 if _execute.must_record_gradient():
494 _attrs = ("cell_clip", _op.get_attr("cell_clip"), "use_peephole",
495 _op._get_attr_bool("use_peephole"), "T",
496 _op._get_attr_type("T"))
497 _inputs_flat = _op.inputs
498 _execute.record_gradient(
499 "BlockLSTMV2", _inputs_flat, _attrs, _result)
500 _result = _BlockLSTMV2Output._make(_result)
501 return _result
503BlockLSTMV2 = tf_export("raw_ops.BlockLSTMV2")(_ops.to_raw_op(block_lstmv2))
506def block_lstmv2_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, cell_clip, use_peephole, name, ctx):
507 if cell_clip is None:
508 cell_clip = 0
509 cell_clip = _execute.make_float(cell_clip, "cell_clip")
510 if use_peephole is None:
511 use_peephole = False
512 use_peephole = _execute.make_bool(use_peephole, "use_peephole")
513 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], ctx, [_dtypes.half, _dtypes.float32, ])
514 (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
515 seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
516 _inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b]
517 _attrs = ("cell_clip", cell_clip, "use_peephole", use_peephole, "T",
518 _attr_T)
519 _result = _execute.execute(b"BlockLSTMV2", 7, inputs=_inputs_flat,
520 attrs=_attrs, ctx=ctx, name=name)
521 if _execute.must_record_gradient():
522 _execute.record_gradient(
523 "BlockLSTMV2", _inputs_flat, _attrs, _result)
524 _result = _BlockLSTMV2Output._make(_result)
525 return _result
527_GRUBlockCellOutput = collections.namedtuple(
528 "GRUBlockCell",
529 ["r", "u", "c", "h"])
532def gru_block_cell(x, h_prev, w_ru, w_c, b_ru, b_c, name=None):
533 r"""Computes the GRU cell forward propagation for 1 time step.
535 Args
536 x: Input to the GRU cell.
537 h_prev: State input from the previous GRU cell.
538 w_ru: Weight matrix for the reset and update gate.
539 w_c: Weight matrix for the cell connection gate.
540 b_ru: Bias vector for the reset and update gate.
541 b_c: Bias vector for the cell connection gate.
543 Returns
544 r: Output of the reset gate.
545 u: Output of the update gate.
546 c: Output of the cell connection gate.
547 h: Current state of the GRU cell.
549 Note on notation of the variables:
551 Concatenation of a and b is represented by a_b
552 Element-wise dot product of a and b is represented by ab
553 Element-wise dot product is represented by \circ
554 Matrix multiplication is represented by *
556 Biases are initialized with :
557 `b_ru` - constant_initializer(1.0)
558 `b_c` - constant_initializer(0.0)
560 This kernel op implements the following mathematical equations:
562 ```
563 x_h_prev = [x, h_prev]
565 [r_bar u_bar] = x_h_prev * w_ru + b_ru
567 r = sigmoid(r_bar)
568 u = sigmoid(u_bar)
570 h_prevr = h_prev \circ r
572 x_h_prevr = [x h_prevr]
574 c_bar = x_h_prevr * w_c + b_c
575 c = tanh(c_bar)
577 h = (1-u) \circ c + u \circ h_prev
578 ```
580 Args:
581 x: A `Tensor`. Must be one of the following types: `float32`.
582 h_prev: A `Tensor`. Must have the same type as `x`.
583 w_ru: A `Tensor`. Must have the same type as `x`.
584 w_c: A `Tensor`. Must have the same type as `x`.
585 b_ru: A `Tensor`. Must have the same type as `x`.
586 b_c: A `Tensor`. Must have the same type as `x`.
587 name: A name for the operation (optional).
589 Returns:
590 A tuple of `Tensor` objects (r, u, c, h).
592 r: A `Tensor`. Has the same type as `x`.
593 u: A `Tensor`. Has the same type as `x`.
594 c: A `Tensor`. Has the same type as `x`.
595 h: A `Tensor`. Has the same type as `x`.
596 """
597 _ctx = _context._context or _context.context()
598 tld = _ctx._thread_local_data
599 if tld.is_eager:
600 try:
601 _result = pywrap_tfe.TFE_Py_FastPathExecute(
602 _ctx, "GRUBlockCell", name, x, h_prev, w_ru, w_c, b_ru, b_c)
603 _result = _GRUBlockCellOutput._make(_result)
604 return _result
605 except _core._NotOkStatusException as e:
606 _ops.raise_from_not_ok_status(e, name)
607 except _core._FallbackException:
608 pass
609 try:
610 return gru_block_cell_eager_fallback(
611 x, h_prev, w_ru, w_c, b_ru, b_c, name=name, ctx=_ctx)
612 except _core._SymbolicException:
613 pass # Add nodes to the TensorFlow graph.
614 # Add nodes to the TensorFlow graph.
615 _, _, _op, _outputs = _op_def_library._apply_op_helper(
616 "GRUBlockCell", x=x, h_prev=h_prev, w_ru=w_ru, w_c=w_c, b_ru=b_ru,
617 b_c=b_c, name=name)
618 _result = _outputs[:]
619 if _execute.must_record_gradient():
620 _attrs = ("T", _op._get_attr_type("T"))
621 _inputs_flat = _op.inputs
622 _execute.record_gradient(
623 "GRUBlockCell", _inputs_flat, _attrs, _result)
624 _result = _GRUBlockCellOutput._make(_result)
625 return _result
627GRUBlockCell = tf_export("raw_ops.GRUBlockCell")(_ops.to_raw_op(gru_block_cell))
630def gru_block_cell_eager_fallback(x, h_prev, w_ru, w_c, b_ru, b_c, name, ctx):
631 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, h_prev, w_ru, w_c, b_ru, b_c], ctx, [_dtypes.float32, ])
632 (x, h_prev, w_ru, w_c, b_ru, b_c) = _inputs_T
633 _inputs_flat = [x, h_prev, w_ru, w_c, b_ru, b_c]
634 _attrs = ("T", _attr_T)
635 _result = _execute.execute(b"GRUBlockCell", 4, inputs=_inputs_flat,
636 attrs=_attrs, ctx=ctx, name=name)
637 if _execute.must_record_gradient():
638 _execute.record_gradient(
639 "GRUBlockCell", _inputs_flat, _attrs, _result)
640 _result = _GRUBlockCellOutput._make(_result)
641 return _result
643_GRUBlockCellGradOutput = collections.namedtuple(
644 "GRUBlockCellGrad",
645 ["d_x", "d_h_prev", "d_c_bar", "d_r_bar_u_bar"])
648def gru_block_cell_grad(x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h, name=None):
649 r"""Computes the GRU cell back-propagation for 1 time step.
651 Args
652 x: Input to the GRU cell.
653 h_prev: State input from the previous GRU cell.
654 w_ru: Weight matrix for the reset and update gate.
655 w_c: Weight matrix for the cell connection gate.
656 b_ru: Bias vector for the reset and update gate.
657 b_c: Bias vector for the cell connection gate.
658 r: Output of the reset gate.
659 u: Output of the update gate.
660 c: Output of the cell connection gate.
661 d_h: Gradients of the h_new wrt to objective function.
663 Returns
664 d_x: Gradients of the x wrt to objective function.
665 d_h_prev: Gradients of the h wrt to objective function.
666 d_c_bar Gradients of the c_bar wrt to objective function.
667 d_r_bar_u_bar Gradients of the r_bar & u_bar wrt to objective function.
669 This kernel op implements the following mathematical equations:
671 Note on notation of the variables:
673 Concatenation of a and b is represented by a_b
674 Element-wise dot product of a and b is represented by ab
675 Element-wise dot product is represented by \circ
676 Matrix multiplication is represented by *
678 Additional notes for clarity:
680 `w_ru` can be segmented into 4 different matrices.
681 ```
682 w_ru = [w_r_x w_u_x
683 w_r_h_prev w_u_h_prev]
684 ```
685 Similarly, `w_c` can be segmented into 2 different matrices.
686 ```
687 w_c = [w_c_x w_c_h_prevr]
688 ```
689 Same goes for biases.
690 ```
691 b_ru = [b_ru_x b_ru_h]
692 b_c = [b_c_x b_c_h]
693 ```
694 Another note on notation:
695 ```
696 d_x = d_x_component_1 + d_x_component_2
698 where d_x_component_1 = d_r_bar * w_r_x^T + d_u_bar * w_r_x^T
699 and d_x_component_2 = d_c_bar * w_c_x^T
701 d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + d_h \circ u
702 where d_h_prev_componenet_1 = d_r_bar * w_r_h_prev^T + d_u_bar * w_r_h_prev^T
703 ```
705 Mathematics behind the Gradients below:
706 ```
707 d_c_bar = d_h \circ (1-u) \circ (1-c \circ c)
708 d_u_bar = d_h \circ (h-c) \circ u \circ (1-u)
710 d_r_bar_u_bar = [d_r_bar d_u_bar]
712 [d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T
714 [d_x_component_2 d_h_prevr] = d_c_bar * w_c^T
716 d_x = d_x_component_1 + d_x_component_2
718 d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u
719 ```
720 Below calculation is performed in the python wrapper for the Gradients
721 (not in the gradient kernel.)
722 ```
723 d_w_ru = x_h_prevr^T * d_c_bar
725 d_w_c = x_h_prev^T * d_r_bar_u_bar
727 d_b_ru = sum of d_r_bar_u_bar along axis = 0
729 d_b_c = sum of d_c_bar along axis = 0
730 ```
732 Args:
733 x: A `Tensor`. Must be one of the following types: `float32`.
734 h_prev: A `Tensor`. Must have the same type as `x`.
735 w_ru: A `Tensor`. Must have the same type as `x`.
736 w_c: A `Tensor`. Must have the same type as `x`.
737 b_ru: A `Tensor`. Must have the same type as `x`.
738 b_c: A `Tensor`. Must have the same type as `x`.
739 r: A `Tensor`. Must have the same type as `x`.
740 u: A `Tensor`. Must have the same type as `x`.
741 c: A `Tensor`. Must have the same type as `x`.
742 d_h: A `Tensor`. Must have the same type as `x`.
743 name: A name for the operation (optional).
745 Returns:
746 A tuple of `Tensor` objects (d_x, d_h_prev, d_c_bar, d_r_bar_u_bar).
748 d_x: A `Tensor`. Has the same type as `x`.
749 d_h_prev: A `Tensor`. Has the same type as `x`.
750 d_c_bar: A `Tensor`. Has the same type as `x`.
751 d_r_bar_u_bar: A `Tensor`. Has the same type as `x`.
752 """
753 _ctx = _context._context or _context.context()
754 tld = _ctx._thread_local_data
755 if tld.is_eager:
756 try:
757 _result = pywrap_tfe.TFE_Py_FastPathExecute(
758 _ctx, "GRUBlockCellGrad", name, x, h_prev, w_ru, w_c, b_ru, b_c, r, u,
759 c, d_h)
760 _result = _GRUBlockCellGradOutput._make(_result)
761 return _result
762 except _core._NotOkStatusException as e:
763 _ops.raise_from_not_ok_status(e, name)
764 except _core._FallbackException:
765 pass
766 try:
767 return gru_block_cell_grad_eager_fallback(
768 x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h, name=name, ctx=_ctx)
769 except _core._SymbolicException:
770 pass # Add nodes to the TensorFlow graph.
771 # Add nodes to the TensorFlow graph.
772 _, _, _op, _outputs = _op_def_library._apply_op_helper(
773 "GRUBlockCellGrad", x=x, h_prev=h_prev, w_ru=w_ru, w_c=w_c, b_ru=b_ru,
774 b_c=b_c, r=r, u=u, c=c, d_h=d_h, name=name)
775 _result = _outputs[:]
776 if _execute.must_record_gradient():
777 _attrs = ("T", _op._get_attr_type("T"))
778 _inputs_flat = _op.inputs
779 _execute.record_gradient(
780 "GRUBlockCellGrad", _inputs_flat, _attrs, _result)
781 _result = _GRUBlockCellGradOutput._make(_result)
782 return _result
784GRUBlockCellGrad = tf_export("raw_ops.GRUBlockCellGrad")(_ops.to_raw_op(gru_block_cell_grad))
787def gru_block_cell_grad_eager_fallback(x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h, name, ctx):
788 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h], ctx, [_dtypes.float32, ])
789 (x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h) = _inputs_T
790 _inputs_flat = [x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h]
791 _attrs = ("T", _attr_T)
792 _result = _execute.execute(b"GRUBlockCellGrad", 4, inputs=_inputs_flat,
793 attrs=_attrs, ctx=ctx, name=name)
794 if _execute.must_record_gradient():
795 _execute.record_gradient(
796 "GRUBlockCellGrad", _inputs_flat, _attrs, _result)
797 _result = _GRUBlockCellGradOutput._make(_result)
798 return _result
800_LSTMBlockCellOutput = collections.namedtuple(
801 "LSTMBlockCell",
802 ["i", "cs", "f", "o", "ci", "co", "h"])
805def lstm_block_cell(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
806 r"""Computes the LSTM cell forward propagation for 1 time step.
808 This implementation uses 1 weight matrix and 1 bias vector, and there's an
809 optional peephole connection.
811 This kernel op implements the following mathematical equations:
813 ```python
814 xh = [x, h_prev]
815 [i, f, ci, o] = xh * w + b
816 f = f + forget_bias
818 if not use_peephole:
819 wci = wcf = wco = 0
821 i = sigmoid(cs_prev * wci + i)
822 f = sigmoid(cs_prev * wcf + f)
823 ci = tanh(ci)
825 cs = ci .* i + cs_prev .* f
826 cs = clip(cs, cell_clip)
828 o = sigmoid(cs * wco + o)
829 co = tanh(cs)
830 h = co .* o
831 ```
833 Args:
834 x: A `Tensor`. Must be one of the following types: `half`, `float32`.
835 The input to the LSTM cell, shape (batch_size, num_inputs).
836 cs_prev: A `Tensor`. Must have the same type as `x`.
837 Value of the cell state at previous time step.
838 h_prev: A `Tensor`. Must have the same type as `x`.
839 Output of the previous cell at previous time step.
840 w: A `Tensor`. Must have the same type as `x`. The weight matrix.
841 wci: A `Tensor`. Must have the same type as `x`.
842 The weight matrix for input gate peephole connection.
843 wcf: A `Tensor`. Must have the same type as `x`.
844 The weight matrix for forget gate peephole connection.
845 wco: A `Tensor`. Must have the same type as `x`.
846 The weight matrix for output gate peephole connection.
847 b: A `Tensor`. Must have the same type as `x`. The bias vector.
848 forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
849 cell_clip: An optional `float`. Defaults to `3`.
850 Value to clip the 'cs' value to.
851 use_peephole: An optional `bool`. Defaults to `False`.
852 Whether to use peephole weights.
853 name: A name for the operation (optional).
855 Returns:
856 A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
858 i: A `Tensor`. Has the same type as `x`.
859 cs: A `Tensor`. Has the same type as `x`.
860 f: A `Tensor`. Has the same type as `x`.
861 o: A `Tensor`. Has the same type as `x`.
862 ci: A `Tensor`. Has the same type as `x`.
863 co: A `Tensor`. Has the same type as `x`.
864 h: A `Tensor`. Has the same type as `x`.
865 """
866 _ctx = _context._context or _context.context()
867 tld = _ctx._thread_local_data
868 if tld.is_eager:
869 try:
870 _result = pywrap_tfe.TFE_Py_FastPathExecute(
871 _ctx, "LSTMBlockCell", name, x, cs_prev, h_prev, w, wci, wcf, wco, b,
872 "forget_bias", forget_bias, "cell_clip", cell_clip, "use_peephole",
873 use_peephole)
874 _result = _LSTMBlockCellOutput._make(_result)
875 return _result
876 except _core._NotOkStatusException as e:
877 _ops.raise_from_not_ok_status(e, name)
878 except _core._FallbackException:
879 pass
880 try:
881 return lstm_block_cell_eager_fallback(
882 x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=forget_bias,
883 cell_clip=cell_clip, use_peephole=use_peephole, name=name, ctx=_ctx)
884 except _core._SymbolicException:
885 pass # Add nodes to the TensorFlow graph.
886 # Add nodes to the TensorFlow graph.
887 if forget_bias is None:
888 forget_bias = 1
889 forget_bias = _execute.make_float(forget_bias, "forget_bias")
890 if cell_clip is None:
891 cell_clip = 3
892 cell_clip = _execute.make_float(cell_clip, "cell_clip")
893 if use_peephole is None:
894 use_peephole = False
895 use_peephole = _execute.make_bool(use_peephole, "use_peephole")
896 _, _, _op, _outputs = _op_def_library._apply_op_helper(
897 "LSTMBlockCell", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci,
898 wcf=wcf, wco=wco, b=b, forget_bias=forget_bias,
899 cell_clip=cell_clip, use_peephole=use_peephole,
900 name=name)
901 _result = _outputs[:]
902 if _execute.must_record_gradient():
903 _attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip",
904 _op.get_attr("cell_clip"), "use_peephole",
905 _op._get_attr_bool("use_peephole"), "T",
906 _op._get_attr_type("T"))
907 _inputs_flat = _op.inputs
908 _execute.record_gradient(
909 "LSTMBlockCell", _inputs_flat, _attrs, _result)
910 _result = _LSTMBlockCellOutput._make(_result)
911 return _result
913LSTMBlockCell = tf_export("raw_ops.LSTMBlockCell")(_ops.to_raw_op(lstm_block_cell))
916def lstm_block_cell_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias, cell_clip, use_peephole, name, ctx):
917 if forget_bias is None:
918 forget_bias = 1
919 forget_bias = _execute.make_float(forget_bias, "forget_bias")
920 if cell_clip is None:
921 cell_clip = 3
922 cell_clip = _execute.make_float(cell_clip, "cell_clip")
923 if use_peephole is None:
924 use_peephole = False
925 use_peephole = _execute.make_bool(use_peephole, "use_peephole")
926 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], ctx, [_dtypes.half, _dtypes.float32, ])
927 (x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
928 _inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b]
929 _attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
930 "use_peephole", use_peephole, "T", _attr_T)
931 _result = _execute.execute(b"LSTMBlockCell", 7, inputs=_inputs_flat,
932 attrs=_attrs, ctx=ctx, name=name)
933 if _execute.must_record_gradient():
934 _execute.record_gradient(
935 "LSTMBlockCell", _inputs_flat, _attrs, _result)
936 _result = _LSTMBlockCellOutput._make(_result)
937 return _result
939_LSTMBlockCellGradOutput = collections.namedtuple(
940 "LSTMBlockCellGrad",
941 ["cs_prev_grad", "dicfo", "wci_grad", "wcf_grad", "wco_grad"])
944def lstm_block_cell_grad(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None):
945 r"""Computes the LSTM cell backward propagation for 1 timestep.
947 This implementation is to be used in conjunction of LSTMBlockCell.
949 Args:
950 x: A `Tensor`. Must be one of the following types: `half`, `float32`.
951 The input to the LSTM cell, shape (batch_size, num_inputs).
952 cs_prev: A `Tensor`. Must have the same type as `x`.
953 The previous cell state.
954 h_prev: A `Tensor`. Must have the same type as `x`. The previous h state.
955 w: A `Tensor`. Must have the same type as `x`. The weight matrix.
956 wci: A `Tensor`. Must have the same type as `x`.
957 The weight matrix for input gate peephole connection.
958 wcf: A `Tensor`. Must have the same type as `x`.
959 The weight matrix for forget gate peephole connection.
960 wco: A `Tensor`. Must have the same type as `x`.
961 The weight matrix for output gate peephole connection.
962 b: A `Tensor`. Must have the same type as `x`. The bias vector.
963 i: A `Tensor`. Must have the same type as `x`. The input gate.
964 cs: A `Tensor`. Must have the same type as `x`.
965 The cell state before the tanh.
966 f: A `Tensor`. Must have the same type as `x`. The forget gate.
967 o: A `Tensor`. Must have the same type as `x`. The output gate.
968 ci: A `Tensor`. Must have the same type as `x`. The cell input.
969 co: A `Tensor`. Must have the same type as `x`. The cell after the tanh.
970 cs_grad: A `Tensor`. Must have the same type as `x`.
971 The current gradient of cs.
972 h_grad: A `Tensor`. Must have the same type as `x`.
973 The gradient of h vector.
974 use_peephole: A `bool`. Whether the cell uses peephole connections.
975 name: A name for the operation (optional).
977 Returns:
978 A tuple of `Tensor` objects (cs_prev_grad, dicfo, wci_grad, wcf_grad, wco_grad).
980 cs_prev_grad: A `Tensor`. Has the same type as `x`.
981 dicfo: A `Tensor`. Has the same type as `x`.
982 wci_grad: A `Tensor`. Has the same type as `x`.
983 wcf_grad: A `Tensor`. Has the same type as `x`.
984 wco_grad: A `Tensor`. Has the same type as `x`.
985 """
986 _ctx = _context._context or _context.context()
987 tld = _ctx._thread_local_data
988 if tld.is_eager:
989 try:
990 _result = pywrap_tfe.TFE_Py_FastPathExecute(
991 _ctx, "LSTMBlockCellGrad", name, x, cs_prev, h_prev, w, wci, wcf, wco,
992 b, i, cs, f, o, ci, co, cs_grad, h_grad, "use_peephole", use_peephole)
993 _result = _LSTMBlockCellGradOutput._make(_result)
994 return _result
995 except _core._NotOkStatusException as e:
996 _ops.raise_from_not_ok_status(e, name)
997 except _core._FallbackException:
998 pass
999 try:
1000 return lstm_block_cell_grad_eager_fallback(
1001 x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co,
1002 cs_grad, h_grad, use_peephole=use_peephole, name=name, ctx=_ctx)
1003 except _core._SymbolicException:
1004 pass # Add nodes to the TensorFlow graph.
1005 # Add nodes to the TensorFlow graph.
1006 use_peephole = _execute.make_bool(use_peephole, "use_peephole")
1007 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1008 "LSTMBlockCellGrad", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w,
1009 wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f,
1010 o=o, ci=ci, co=co, cs_grad=cs_grad,
1011 h_grad=h_grad, use_peephole=use_peephole,
1012 name=name)
1013 _result = _outputs[:]
1014 if _execute.must_record_gradient():
1015 _attrs = ("use_peephole", _op._get_attr_bool("use_peephole"), "T",
1016 _op._get_attr_type("T"))
1017 _inputs_flat = _op.inputs
1018 _execute.record_gradient(
1019 "LSTMBlockCellGrad", _inputs_flat, _attrs, _result)
1020 _result = _LSTMBlockCellGradOutput._make(_result)
1021 return _result
1023LSTMBlockCellGrad = tf_export("raw_ops.LSTMBlockCellGrad")(_ops.to_raw_op(lstm_block_cell_grad))
1026def lstm_block_cell_grad_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name, ctx):
1027 use_peephole = _execute.make_bool(use_peephole, "use_peephole")
1028 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad], ctx, [_dtypes.half, _dtypes.float32, ])
1029 (x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad) = _inputs_T
1030 _inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad]
1031 _attrs = ("use_peephole", use_peephole, "T", _attr_T)
1032 _result = _execute.execute(b"LSTMBlockCellGrad", 5, inputs=_inputs_flat,
1033 attrs=_attrs, ctx=ctx, name=name)
1034 if _execute.must_record_gradient():
1035 _execute.record_gradient(
1036 "LSTMBlockCellGrad", _inputs_flat, _attrs, _result)
1037 _result = _LSTMBlockCellGradOutput._make(_result)
1038 return _result