Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_ctc_ops.py: 13%
226 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1"""Python wrappers around TensorFlow ops.
3This file is MACHINE GENERATED! Do not edit.
4"""
6import collections
8from tensorflow.python import pywrap_tfe as pywrap_tfe
9from tensorflow.python.eager import context as _context
10from tensorflow.python.eager import core as _core
11from tensorflow.python.eager import execute as _execute
12from tensorflow.python.framework import dtypes as _dtypes
13from tensorflow.security.fuzzing.py import annotation_types as _atypes
15from tensorflow.python.framework import op_def_registry as _op_def_registry
16from tensorflow.python.framework import ops as _ops
17from tensorflow.python.framework import op_def_library as _op_def_library
18from tensorflow.python.util.deprecation import deprecated_endpoints
19from tensorflow.python.util import dispatch as _dispatch
20from tensorflow.python.util.tf_export import tf_export
22from typing import TypeVar
23_CTCBeamSearchDecoderOutput = collections.namedtuple(
24 "CTCBeamSearchDecoder",
25 ["decoded_indices", "decoded_values", "decoded_shape", "log_probability"])
28def ctc_beam_search_decoder(inputs, sequence_length, beam_width, top_paths, merge_repeated=True, name=None):
29 r"""Performs beam search decoding on the logits given in input.
31 A note about the attribute merge_repeated: For the beam search decoder,
32 this means that if consecutive entries in a beam are the same, only
33 the first of these is emitted. That is, when the top path is "A B B B B",
34 "A B" is returned if merge_repeated = True but "A B B B B" is
35 returned if merge_repeated = False.
37 Args:
38 inputs: A `Tensor`. Must be one of the following types: `float32`, `float64`.
39 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
40 sequence_length: A `Tensor` of type `int32`.
41 A vector containing sequence lengths, size `(batch)`.
42 beam_width: An `int` that is `>= 1`.
43 A scalar >= 0 (beam search beam width).
44 top_paths: An `int` that is `>= 1`.
45 A scalar >= 0, <= beam_width (controls output size).
46 merge_repeated: An optional `bool`. Defaults to `True`.
47 If true, merge repeated classes in output.
48 name: A name for the operation (optional).
50 Returns:
51 A tuple of `Tensor` objects (decoded_indices, decoded_values, decoded_shape, log_probability).
53 decoded_indices: A list of `top_paths` `Tensor` objects with type `int64`.
54 decoded_values: A list of `top_paths` `Tensor` objects with type `int64`.
55 decoded_shape: A list of `top_paths` `Tensor` objects with type `int64`.
56 log_probability: A `Tensor`. Has the same type as `inputs`.
57 """
58 _ctx = _context._context or _context.context()
59 tld = _ctx._thread_local_data
60 if tld.is_eager:
61 try:
62 _result = pywrap_tfe.TFE_Py_FastPathExecute(
63 _ctx, "CTCBeamSearchDecoder", name, inputs, sequence_length,
64 "beam_width", beam_width, "top_paths", top_paths, "merge_repeated",
65 merge_repeated)
66 _result = _CTCBeamSearchDecoderOutput._make(_result)
67 return _result
68 except _core._NotOkStatusException as e:
69 _ops.raise_from_not_ok_status(e, name)
70 except _core._FallbackException:
71 pass
72 try:
73 return ctc_beam_search_decoder_eager_fallback(
74 inputs, sequence_length, beam_width=beam_width, top_paths=top_paths,
75 merge_repeated=merge_repeated, name=name, ctx=_ctx)
76 except _core._SymbolicException:
77 pass # Add nodes to the TensorFlow graph.
78 # Add nodes to the TensorFlow graph.
79 beam_width = _execute.make_int(beam_width, "beam_width")
80 top_paths = _execute.make_int(top_paths, "top_paths")
81 if merge_repeated is None:
82 merge_repeated = True
83 merge_repeated = _execute.make_bool(merge_repeated, "merge_repeated")
84 _, _, _op, _outputs = _op_def_library._apply_op_helper(
85 "CTCBeamSearchDecoder", inputs=inputs,
86 sequence_length=sequence_length,
87 beam_width=beam_width, top_paths=top_paths,
88 merge_repeated=merge_repeated, name=name)
89 _result = _outputs[:]
90 if _execute.must_record_gradient():
91 _attrs = ("beam_width", _op._get_attr_int("beam_width"), "top_paths",
92 _op._get_attr_int("top_paths"), "merge_repeated",
93 _op._get_attr_bool("merge_repeated"), "T",
94 _op._get_attr_type("T"))
95 _inputs_flat = _op.inputs
96 _execute.record_gradient(
97 "CTCBeamSearchDecoder", _inputs_flat, _attrs, _result)
98 _result = [_result[:top_paths]] + _result[top_paths:]
99 _result = _result[:1] + [_result[1:1 + top_paths]] + _result[1 + top_paths:]
100 _result = _result[:2] + [_result[2:2 + top_paths]] + _result[2 + top_paths:]
101 _result = _CTCBeamSearchDecoderOutput._make(_result)
102 return _result
104CTCBeamSearchDecoder = tf_export("raw_ops.CTCBeamSearchDecoder")(_ops.to_raw_op(ctc_beam_search_decoder))
107def ctc_beam_search_decoder_eager_fallback(inputs, sequence_length, beam_width, top_paths, merge_repeated, name, ctx):
108 beam_width = _execute.make_int(beam_width, "beam_width")
109 top_paths = _execute.make_int(top_paths, "top_paths")
110 if merge_repeated is None:
111 merge_repeated = True
112 merge_repeated = _execute.make_bool(merge_repeated, "merge_repeated")
113 _attr_T, (inputs,) = _execute.args_to_matching_eager([inputs], ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32)
114 sequence_length = _ops.convert_to_tensor(sequence_length, _dtypes.int32)
115 _inputs_flat = [inputs, sequence_length]
116 _attrs = ("beam_width", beam_width, "top_paths", top_paths,
117 "merge_repeated", merge_repeated, "T", _attr_T)
118 _result = _execute.execute(b"CTCBeamSearchDecoder", top_paths + top_paths +
119 top_paths + 1, inputs=_inputs_flat, attrs=_attrs,
120 ctx=ctx, name=name)
121 if _execute.must_record_gradient():
122 _execute.record_gradient(
123 "CTCBeamSearchDecoder", _inputs_flat, _attrs, _result)
124 _result = [_result[:top_paths]] + _result[top_paths:]
125 _result = _result[:1] + [_result[1:1 + top_paths]] + _result[1 + top_paths:]
126 _result = _result[:2] + [_result[2:2 + top_paths]] + _result[2 + top_paths:]
127 _result = _CTCBeamSearchDecoderOutput._make(_result)
128 return _result
130_CTCGreedyDecoderOutput = collections.namedtuple(
131 "CTCGreedyDecoder",
132 ["decoded_indices", "decoded_values", "decoded_shape", "log_probability"])
135def ctc_greedy_decoder(inputs, sequence_length, merge_repeated=False, blank_index=-1, name=None):
136 r"""Performs greedy decoding on the logits given in inputs.
138 A note about the attribute merge_repeated: if enabled, when
139 consecutive logits' maximum indices are the same, only the first of
140 these is emitted. Labeling the blank '*', the sequence "A B B * B B"
141 becomes "A B B" if merge_repeated = True and "A B B B B" if
142 merge_repeated = False.
144 Regardless of the value of merge_repeated, if the maximum index of a given
145 time and batch corresponds to the blank, index `(num_classes - 1)`, no new
146 element is emitted.
148 Args:
149 inputs: A `Tensor`. Must be one of the following types: `float32`, `float64`.
150 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
151 sequence_length: A `Tensor` of type `int32`.
152 A vector containing sequence lengths, size `(batch_size)`.
153 merge_repeated: An optional `bool`. Defaults to `False`.
154 If True, merge repeated classes in output.
155 blank_index: An optional `int`. Defaults to `-1`.
156 name: A name for the operation (optional).
158 Returns:
159 A tuple of `Tensor` objects (decoded_indices, decoded_values, decoded_shape, log_probability).
161 decoded_indices: A `Tensor` of type `int64`.
162 decoded_values: A `Tensor` of type `int64`.
163 decoded_shape: A `Tensor` of type `int64`.
164 log_probability: A `Tensor`. Has the same type as `inputs`.
165 """
166 _ctx = _context._context or _context.context()
167 tld = _ctx._thread_local_data
168 if tld.is_eager:
169 try:
170 _result = pywrap_tfe.TFE_Py_FastPathExecute(
171 _ctx, "CTCGreedyDecoder", name, inputs, sequence_length,
172 "merge_repeated", merge_repeated, "blank_index", blank_index)
173 _result = _CTCGreedyDecoderOutput._make(_result)
174 return _result
175 except _core._NotOkStatusException as e:
176 _ops.raise_from_not_ok_status(e, name)
177 except _core._FallbackException:
178 pass
179 try:
180 return ctc_greedy_decoder_eager_fallback(
181 inputs, sequence_length, merge_repeated=merge_repeated,
182 blank_index=blank_index, name=name, ctx=_ctx)
183 except _core._SymbolicException:
184 pass # Add nodes to the TensorFlow graph.
185 # Add nodes to the TensorFlow graph.
186 if merge_repeated is None:
187 merge_repeated = False
188 merge_repeated = _execute.make_bool(merge_repeated, "merge_repeated")
189 if blank_index is None:
190 blank_index = -1
191 blank_index = _execute.make_int(blank_index, "blank_index")
192 _, _, _op, _outputs = _op_def_library._apply_op_helper(
193 "CTCGreedyDecoder", inputs=inputs, sequence_length=sequence_length,
194 merge_repeated=merge_repeated,
195 blank_index=blank_index, name=name)
196 _result = _outputs[:]
197 if _execute.must_record_gradient():
198 _attrs = ("merge_repeated", _op._get_attr_bool("merge_repeated"),
199 "blank_index", _op._get_attr_int("blank_index"), "T",
200 _op._get_attr_type("T"))
201 _inputs_flat = _op.inputs
202 _execute.record_gradient(
203 "CTCGreedyDecoder", _inputs_flat, _attrs, _result)
204 _result = _CTCGreedyDecoderOutput._make(_result)
205 return _result
207CTCGreedyDecoder = tf_export("raw_ops.CTCGreedyDecoder")(_ops.to_raw_op(ctc_greedy_decoder))
210def ctc_greedy_decoder_eager_fallback(inputs, sequence_length, merge_repeated, blank_index, name, ctx):
211 if merge_repeated is None:
212 merge_repeated = False
213 merge_repeated = _execute.make_bool(merge_repeated, "merge_repeated")
214 if blank_index is None:
215 blank_index = -1
216 blank_index = _execute.make_int(blank_index, "blank_index")
217 _attr_T, (inputs,) = _execute.args_to_matching_eager([inputs], ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32)
218 sequence_length = _ops.convert_to_tensor(sequence_length, _dtypes.int32)
219 _inputs_flat = [inputs, sequence_length]
220 _attrs = ("merge_repeated", merge_repeated, "blank_index", blank_index, "T",
221 _attr_T)
222 _result = _execute.execute(b"CTCGreedyDecoder", 4, inputs=_inputs_flat,
223 attrs=_attrs, ctx=ctx, name=name)
224 if _execute.must_record_gradient():
225 _execute.record_gradient(
226 "CTCGreedyDecoder", _inputs_flat, _attrs, _result)
227 _result = _CTCGreedyDecoderOutput._make(_result)
228 return _result
230_CTCLossOutput = collections.namedtuple(
231 "CTCLoss",
232 ["loss", "gradient"])
235def ctc_loss(inputs, labels_indices, labels_values, sequence_length, preprocess_collapse_repeated=False, ctc_merge_repeated=True, ignore_longer_outputs_than_inputs=False, name=None):
236 r"""Calculates the CTC Loss (log probability) for each batch entry. Also calculates
238 the gradient. This class performs the softmax operation for you, so inputs
239 should be e.g. linear projections of outputs by an LSTM.
241 Args:
242 inputs: A `Tensor`. Must be one of the following types: `float32`, `float64`.
243 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
244 labels_indices: A `Tensor` of type `int64`.
245 The indices of a `SparseTensor<int32, 2>`.
246 `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
247 `(batch b, time t)`.
248 labels_values: A `Tensor` of type `int32`.
249 The values (labels) associated with the given batch and time.
250 sequence_length: A `Tensor` of type `int32`.
251 A vector containing sequence lengths (batch).
252 preprocess_collapse_repeated: An optional `bool`. Defaults to `False`.
253 Scalar, if true then repeated labels are
254 collapsed prior to the CTC calculation.
255 ctc_merge_repeated: An optional `bool`. Defaults to `True`.
256 Scalar. If set to false, *during* CTC calculation
257 repeated non-blank labels will not be merged and are interpreted as
258 individual labels. This is a simplified version of CTC.
259 ignore_longer_outputs_than_inputs: An optional `bool`. Defaults to `False`.
260 Scalar. If set to true, during CTC
261 calculation, items that have longer output sequences than input sequences
262 are skipped: they don't contribute to the loss term and have zero-gradient.
263 name: A name for the operation (optional).
265 Returns:
266 A tuple of `Tensor` objects (loss, gradient).
268 loss: A `Tensor`. Has the same type as `inputs`.
269 gradient: A `Tensor`. Has the same type as `inputs`.
270 """
271 _ctx = _context._context or _context.context()
272 tld = _ctx._thread_local_data
273 if tld.is_eager:
274 try:
275 _result = pywrap_tfe.TFE_Py_FastPathExecute(
276 _ctx, "CTCLoss", name, inputs, labels_indices, labels_values,
277 sequence_length, "preprocess_collapse_repeated",
278 preprocess_collapse_repeated, "ctc_merge_repeated",
279 ctc_merge_repeated, "ignore_longer_outputs_than_inputs",
280 ignore_longer_outputs_than_inputs)
281 _result = _CTCLossOutput._make(_result)
282 return _result
283 except _core._NotOkStatusException as e:
284 _ops.raise_from_not_ok_status(e, name)
285 except _core._FallbackException:
286 pass
287 try:
288 return ctc_loss_eager_fallback(
289 inputs, labels_indices, labels_values, sequence_length,
290 preprocess_collapse_repeated=preprocess_collapse_repeated,
291 ctc_merge_repeated=ctc_merge_repeated,
292 ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs,
293 name=name, ctx=_ctx)
294 except _core._SymbolicException:
295 pass # Add nodes to the TensorFlow graph.
296 # Add nodes to the TensorFlow graph.
297 if preprocess_collapse_repeated is None:
298 preprocess_collapse_repeated = False
299 preprocess_collapse_repeated = _execute.make_bool(preprocess_collapse_repeated, "preprocess_collapse_repeated")
300 if ctc_merge_repeated is None:
301 ctc_merge_repeated = True
302 ctc_merge_repeated = _execute.make_bool(ctc_merge_repeated, "ctc_merge_repeated")
303 if ignore_longer_outputs_than_inputs is None:
304 ignore_longer_outputs_than_inputs = False
305 ignore_longer_outputs_than_inputs = _execute.make_bool(ignore_longer_outputs_than_inputs, "ignore_longer_outputs_than_inputs")
306 _, _, _op, _outputs = _op_def_library._apply_op_helper(
307 "CTCLoss", inputs=inputs, labels_indices=labels_indices,
308 labels_values=labels_values,
309 sequence_length=sequence_length,
310 preprocess_collapse_repeated=preprocess_collapse_repeated,
311 ctc_merge_repeated=ctc_merge_repeated,
312 ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs,
313 name=name)
314 _result = _outputs[:]
315 if _execute.must_record_gradient():
316 _attrs = ("preprocess_collapse_repeated",
317 _op._get_attr_bool("preprocess_collapse_repeated"),
318 "ctc_merge_repeated", _op._get_attr_bool("ctc_merge_repeated"),
319 "ignore_longer_outputs_than_inputs",
320 _op._get_attr_bool("ignore_longer_outputs_than_inputs"), "T",
321 _op._get_attr_type("T"))
322 _inputs_flat = _op.inputs
323 _execute.record_gradient(
324 "CTCLoss", _inputs_flat, _attrs, _result)
325 _result = _CTCLossOutput._make(_result)
326 return _result
328CTCLoss = tf_export("raw_ops.CTCLoss")(_ops.to_raw_op(ctc_loss))
331def ctc_loss_eager_fallback(inputs, labels_indices, labels_values, sequence_length, preprocess_collapse_repeated, ctc_merge_repeated, ignore_longer_outputs_than_inputs, name, ctx):
332 if preprocess_collapse_repeated is None:
333 preprocess_collapse_repeated = False
334 preprocess_collapse_repeated = _execute.make_bool(preprocess_collapse_repeated, "preprocess_collapse_repeated")
335 if ctc_merge_repeated is None:
336 ctc_merge_repeated = True
337 ctc_merge_repeated = _execute.make_bool(ctc_merge_repeated, "ctc_merge_repeated")
338 if ignore_longer_outputs_than_inputs is None:
339 ignore_longer_outputs_than_inputs = False
340 ignore_longer_outputs_than_inputs = _execute.make_bool(ignore_longer_outputs_than_inputs, "ignore_longer_outputs_than_inputs")
341 _attr_T, (inputs,) = _execute.args_to_matching_eager([inputs], ctx, [_dtypes.float32, _dtypes.float64, ], _dtypes.float32)
342 labels_indices = _ops.convert_to_tensor(labels_indices, _dtypes.int64)
343 labels_values = _ops.convert_to_tensor(labels_values, _dtypes.int32)
344 sequence_length = _ops.convert_to_tensor(sequence_length, _dtypes.int32)
345 _inputs_flat = [inputs, labels_indices, labels_values, sequence_length]
346 _attrs = ("preprocess_collapse_repeated", preprocess_collapse_repeated,
347 "ctc_merge_repeated", ctc_merge_repeated,
348 "ignore_longer_outputs_than_inputs", ignore_longer_outputs_than_inputs, "T",
349 _attr_T)
350 _result = _execute.execute(b"CTCLoss", 2, inputs=_inputs_flat, attrs=_attrs,
351 ctx=ctx, name=name)
352 if _execute.must_record_gradient():
353 _execute.record_gradient(
354 "CTCLoss", _inputs_flat, _attrs, _result)
355 _result = _CTCLossOutput._make(_result)
356 return _result
358_CTCLossV2Output = collections.namedtuple(
359 "CTCLossV2",
360 ["loss", "gradient"])
363def ctc_loss_v2(inputs, labels_indices, labels_values, sequence_length, preprocess_collapse_repeated=False, ctc_merge_repeated=True, ignore_longer_outputs_than_inputs=False, name=None):
364 r"""Calculates the CTC Loss (log probability) for each batch entry. Also calculates
366 the gradient. This class performs the softmax operation for you, so inputs
367 should be e.g. linear projections of outputs by an LSTM.
369 Args:
370 inputs: A `Tensor` of type `float32`.
371 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. Default blank
372 label is 0 rather num_classes - 1.
373 labels_indices: A `Tensor` of type `int64`.
374 The indices of a `SparseTensor<int32, 2>`.
375 `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
376 `(batch b, time t)`.
377 labels_values: A `Tensor` of type `int32`.
378 The values (labels) associated with the given batch and time.
379 sequence_length: A `Tensor` of type `int32`.
380 A vector containing sequence lengths (batch).
381 preprocess_collapse_repeated: An optional `bool`. Defaults to `False`.
382 Scalar, if true then repeated labels are
383 collapsed prior to the CTC calculation.
384 ctc_merge_repeated: An optional `bool`. Defaults to `True`.
385 Scalar. If set to false, *during* CTC calculation
386 repeated non-blank labels will not be merged and are interpreted as
387 individual labels. This is a simplified version of CTC.
388 ignore_longer_outputs_than_inputs: An optional `bool`. Defaults to `False`.
389 Scalar. If set to true, during CTC
390 calculation, items that have longer output sequences than input sequences
391 are skipped: they don't contribute to the loss term and have zero-gradient.
392 name: A name for the operation (optional).
394 Returns:
395 A tuple of `Tensor` objects (loss, gradient).
397 loss: A `Tensor` of type `float32`.
398 gradient: A `Tensor` of type `float32`.
399 """
400 _ctx = _context._context or _context.context()
401 tld = _ctx._thread_local_data
402 if tld.is_eager:
403 try:
404 _result = pywrap_tfe.TFE_Py_FastPathExecute(
405 _ctx, "CTCLossV2", name, inputs, labels_indices, labels_values,
406 sequence_length, "preprocess_collapse_repeated",
407 preprocess_collapse_repeated, "ctc_merge_repeated",
408 ctc_merge_repeated, "ignore_longer_outputs_than_inputs",
409 ignore_longer_outputs_than_inputs)
410 _result = _CTCLossV2Output._make(_result)
411 return _result
412 except _core._NotOkStatusException as e:
413 _ops.raise_from_not_ok_status(e, name)
414 except _core._FallbackException:
415 pass
416 try:
417 return ctc_loss_v2_eager_fallback(
418 inputs, labels_indices, labels_values, sequence_length,
419 preprocess_collapse_repeated=preprocess_collapse_repeated,
420 ctc_merge_repeated=ctc_merge_repeated,
421 ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs,
422 name=name, ctx=_ctx)
423 except _core._SymbolicException:
424 pass # Add nodes to the TensorFlow graph.
425 # Add nodes to the TensorFlow graph.
426 if preprocess_collapse_repeated is None:
427 preprocess_collapse_repeated = False
428 preprocess_collapse_repeated = _execute.make_bool(preprocess_collapse_repeated, "preprocess_collapse_repeated")
429 if ctc_merge_repeated is None:
430 ctc_merge_repeated = True
431 ctc_merge_repeated = _execute.make_bool(ctc_merge_repeated, "ctc_merge_repeated")
432 if ignore_longer_outputs_than_inputs is None:
433 ignore_longer_outputs_than_inputs = False
434 ignore_longer_outputs_than_inputs = _execute.make_bool(ignore_longer_outputs_than_inputs, "ignore_longer_outputs_than_inputs")
435 _, _, _op, _outputs = _op_def_library._apply_op_helper(
436 "CTCLossV2", inputs=inputs, labels_indices=labels_indices,
437 labels_values=labels_values,
438 sequence_length=sequence_length,
439 preprocess_collapse_repeated=preprocess_collapse_repeated,
440 ctc_merge_repeated=ctc_merge_repeated,
441 ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs,
442 name=name)
443 _result = _outputs[:]
444 if _execute.must_record_gradient():
445 _attrs = ("preprocess_collapse_repeated",
446 _op._get_attr_bool("preprocess_collapse_repeated"),
447 "ctc_merge_repeated", _op._get_attr_bool("ctc_merge_repeated"),
448 "ignore_longer_outputs_than_inputs",
449 _op._get_attr_bool("ignore_longer_outputs_than_inputs"))
450 _inputs_flat = _op.inputs
451 _execute.record_gradient(
452 "CTCLossV2", _inputs_flat, _attrs, _result)
453 _result = _CTCLossV2Output._make(_result)
454 return _result
456CTCLossV2 = tf_export("raw_ops.CTCLossV2")(_ops.to_raw_op(ctc_loss_v2))
459def ctc_loss_v2_eager_fallback(inputs, labels_indices, labels_values, sequence_length, preprocess_collapse_repeated, ctc_merge_repeated, ignore_longer_outputs_than_inputs, name, ctx):
460 if preprocess_collapse_repeated is None:
461 preprocess_collapse_repeated = False
462 preprocess_collapse_repeated = _execute.make_bool(preprocess_collapse_repeated, "preprocess_collapse_repeated")
463 if ctc_merge_repeated is None:
464 ctc_merge_repeated = True
465 ctc_merge_repeated = _execute.make_bool(ctc_merge_repeated, "ctc_merge_repeated")
466 if ignore_longer_outputs_than_inputs is None:
467 ignore_longer_outputs_than_inputs = False
468 ignore_longer_outputs_than_inputs = _execute.make_bool(ignore_longer_outputs_than_inputs, "ignore_longer_outputs_than_inputs")
469 inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)
470 labels_indices = _ops.convert_to_tensor(labels_indices, _dtypes.int64)
471 labels_values = _ops.convert_to_tensor(labels_values, _dtypes.int32)
472 sequence_length = _ops.convert_to_tensor(sequence_length, _dtypes.int32)
473 _inputs_flat = [inputs, labels_indices, labels_values, sequence_length]
474 _attrs = ("preprocess_collapse_repeated", preprocess_collapse_repeated,
475 "ctc_merge_repeated", ctc_merge_repeated,
476 "ignore_longer_outputs_than_inputs", ignore_longer_outputs_than_inputs)
477 _result = _execute.execute(b"CTCLossV2", 2, inputs=_inputs_flat,
478 attrs=_attrs, ctx=ctx, name=name)
479 if _execute.must_record_gradient():
480 _execute.record_gradient(
481 "CTCLossV2", _inputs_flat, _attrs, _result)
482 _result = _CTCLossV2Output._make(_result)
483 return _result