Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_stateful_random_ops.py: 11%
399 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1"""Python wrappers around TensorFlow ops.
3This file is MACHINE GENERATED! Do not edit.
4"""
6import collections
8from tensorflow.python import pywrap_tfe as pywrap_tfe
9from tensorflow.python.eager import context as _context
10from tensorflow.python.eager import core as _core
11from tensorflow.python.eager import execute as _execute
12from tensorflow.python.framework import dtypes as _dtypes
13from tensorflow.security.fuzzing.py import annotation_types as _atypes
15from tensorflow.python.framework import op_def_registry as _op_def_registry
16from tensorflow.python.framework import ops as _ops
17from tensorflow.python.framework import op_def_library as _op_def_library
18from tensorflow.python.util.deprecation import deprecated_endpoints
19from tensorflow.python.util import dispatch as _dispatch
20from tensorflow.python.util.tf_export import tf_export
22from typing import TypeVar
24def non_deterministic_ints(shape, dtype=_dtypes.int64, name=None):
25 r"""Non-deterministically generates some integers.
27 This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results.
29 Args:
30 shape: A `Tensor`. The shape of the output tensor.
31 dtype: An optional `tf.DType`. Defaults to `tf.int64`.
32 The type of the output.
33 name: A name for the operation (optional).
35 Returns:
36 A `Tensor` of type `dtype`.
37 """
38 _ctx = _context._context or _context.context()
39 tld = _ctx._thread_local_data
40 if tld.is_eager:
41 try:
42 _result = pywrap_tfe.TFE_Py_FastPathExecute(
43 _ctx, "NonDeterministicInts", name, shape, "dtype", dtype)
44 return _result
45 except _core._NotOkStatusException as e:
46 _ops.raise_from_not_ok_status(e, name)
47 except _core._FallbackException:
48 pass
49 try:
50 return non_deterministic_ints_eager_fallback(
51 shape, dtype=dtype, name=name, ctx=_ctx)
52 except _core._SymbolicException:
53 pass # Add nodes to the TensorFlow graph.
54 # Add nodes to the TensorFlow graph.
55 if dtype is None:
56 dtype = _dtypes.int64
57 dtype = _execute.make_type(dtype, "dtype")
58 _, _, _op, _outputs = _op_def_library._apply_op_helper(
59 "NonDeterministicInts", shape=shape, dtype=dtype, name=name)
60 _result = _outputs[:]
61 if _execute.must_record_gradient():
62 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype",
63 _op._get_attr_type("shape_dtype"))
64 _inputs_flat = _op.inputs
65 _execute.record_gradient(
66 "NonDeterministicInts", _inputs_flat, _attrs, _result)
67 _result, = _result
68 return _result
70NonDeterministicInts = tf_export("raw_ops.NonDeterministicInts")(_ops.to_raw_op(non_deterministic_ints))
73def non_deterministic_ints_eager_fallback(shape, dtype, name, ctx):
74 if dtype is None:
75 dtype = _dtypes.int64
76 dtype = _execute.make_type(dtype, "dtype")
77 _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64)
78 _inputs_flat = [shape]
79 _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype)
80 _result = _execute.execute(b"NonDeterministicInts", 1, inputs=_inputs_flat,
81 attrs=_attrs, ctx=ctx, name=name)
82 if _execute.must_record_gradient():
83 _execute.record_gradient(
84 "NonDeterministicInts", _inputs_flat, _attrs, _result)
85 _result, = _result
86 return _result
89def rng_read_and_skip(resource, alg, delta, name=None):
90 r"""Advance the counter of a counter-based RNG.
92 The state of the RNG after
93 `rng_read_and_skip(n)` will be the same as that after `uniform([n])`
94 (or any other distribution). The actual increment added to the
95 counter is an unspecified implementation choice.
97 In the case that the input algorithm is RNG_ALG_AUTO_SELECT, the counter in the state needs to be of size int64[2], the current maximal counter size among algorithms. In this case, this op will manage the counter as if it is an 128-bit integer with layout [lower_64bits, higher_64bits]. If an algorithm needs less than 128 bits for the counter, it should use the left portion of the int64[2]. In this way, the int64[2] is compatible with all current RNG algorithms (Philox, ThreeFry and xla::RandomAlgorithm::RNG_DEFAULT). Downstream RNG ops can thus use this counter with any RNG algorithm.
99 Args:
100 resource: A `Tensor` of type `resource`.
101 The handle of the resource variable that stores the state of the RNG. The state consists of the counter followed by the key.
102 alg: A `Tensor` of type `int32`. The RNG algorithm.
103 delta: A `Tensor` of type `uint64`. The amount of advancement.
104 name: A name for the operation (optional).
106 Returns:
107 A `Tensor` of type `int64`.
108 """
109 _ctx = _context._context or _context.context()
110 tld = _ctx._thread_local_data
111 if tld.is_eager:
112 try:
113 _result = pywrap_tfe.TFE_Py_FastPathExecute(
114 _ctx, "RngReadAndSkip", name, resource, alg, delta)
115 return _result
116 except _core._NotOkStatusException as e:
117 _ops.raise_from_not_ok_status(e, name)
118 except _core._FallbackException:
119 pass
120 try:
121 return rng_read_and_skip_eager_fallback(
122 resource, alg, delta, name=name, ctx=_ctx)
123 except _core._SymbolicException:
124 pass # Add nodes to the TensorFlow graph.
125 # Add nodes to the TensorFlow graph.
126 _, _, _op, _outputs = _op_def_library._apply_op_helper(
127 "RngReadAndSkip", resource=resource, alg=alg, delta=delta, name=name)
128 _result = _outputs[:]
129 if _execute.must_record_gradient():
130 _attrs = ()
131 _inputs_flat = _op.inputs
132 _execute.record_gradient(
133 "RngReadAndSkip", _inputs_flat, _attrs, _result)
134 _result, = _result
135 return _result
137RngReadAndSkip = tf_export("raw_ops.RngReadAndSkip")(_ops.to_raw_op(rng_read_and_skip))
140def rng_read_and_skip_eager_fallback(resource, alg, delta, name, ctx):
141 resource = _ops.convert_to_tensor(resource, _dtypes.resource)
142 alg = _ops.convert_to_tensor(alg, _dtypes.int32)
143 delta = _ops.convert_to_tensor(delta, _dtypes.uint64)
144 _inputs_flat = [resource, alg, delta]
145 _attrs = None
146 _result = _execute.execute(b"RngReadAndSkip", 1, inputs=_inputs_flat,
147 attrs=_attrs, ctx=ctx, name=name)
148 if _execute.must_record_gradient():
149 _execute.record_gradient(
150 "RngReadAndSkip", _inputs_flat, _attrs, _result)
151 _result, = _result
152 return _result
155def rng_skip(resource, algorithm, delta, name=None):
156 r"""Advance the counter of a counter-based RNG.
158 The state of the RNG after
159 `rng_skip(n)` will be the same as that after `stateful_uniform([n])`
160 (or any other distribution). The actual increment added to the
161 counter is an unspecified implementation detail.
163 Args:
164 resource: A `Tensor` of type `resource`.
165 The handle of the resource variable that stores the state of the RNG.
166 algorithm: A `Tensor` of type `int64`. The RNG algorithm.
167 delta: A `Tensor` of type `int64`. The amount of advancement.
168 name: A name for the operation (optional).
170 Returns:
171 The created Operation.
172 """
173 _ctx = _context._context or _context.context()
174 tld = _ctx._thread_local_data
175 if tld.is_eager:
176 try:
177 _result = pywrap_tfe.TFE_Py_FastPathExecute(
178 _ctx, "RngSkip", name, resource, algorithm, delta)
179 return _result
180 except _core._NotOkStatusException as e:
181 _ops.raise_from_not_ok_status(e, name)
182 except _core._FallbackException:
183 pass
184 try:
185 return rng_skip_eager_fallback(
186 resource, algorithm, delta, name=name, ctx=_ctx)
187 except _core._SymbolicException:
188 pass # Add nodes to the TensorFlow graph.
189 # Add nodes to the TensorFlow graph.
190 _, _, _op, _outputs = _op_def_library._apply_op_helper(
191 "RngSkip", resource=resource, algorithm=algorithm, delta=delta,
192 name=name)
193 return _op
194RngSkip = tf_export("raw_ops.RngSkip")(_ops.to_raw_op(rng_skip))
197def rng_skip_eager_fallback(resource, algorithm, delta, name, ctx):
198 resource = _ops.convert_to_tensor(resource, _dtypes.resource)
199 algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64)
200 delta = _ops.convert_to_tensor(delta, _dtypes.int64)
201 _inputs_flat = [resource, algorithm, delta]
202 _attrs = None
203 _result = _execute.execute(b"RngSkip", 0, inputs=_inputs_flat, attrs=_attrs,
204 ctx=ctx, name=name)
205 _result = None
206 return _result
209def stateful_random_binomial(resource, algorithm, shape, counts, probs, dtype=_dtypes.int64, name=None):
210 r"""TODO: add doc.
212 Args:
213 resource: A `Tensor` of type `resource`.
214 algorithm: A `Tensor` of type `int64`.
215 shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
216 counts: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`.
217 probs: A `Tensor`. Must have the same type as `counts`.
218 dtype: An optional `tf.DType` from: `tf.half, tf.float32, tf.float64, tf.int32, tf.int64`. Defaults to `tf.int64`.
219 name: A name for the operation (optional).
221 Returns:
222 A `Tensor` of type `dtype`.
223 """
224 _ctx = _context._context or _context.context()
225 tld = _ctx._thread_local_data
226 if tld.is_eager:
227 try:
228 _result = pywrap_tfe.TFE_Py_FastPathExecute(
229 _ctx, "StatefulRandomBinomial", name, resource, algorithm, shape,
230 counts, probs, "dtype", dtype)
231 return _result
232 except _core._NotOkStatusException as e:
233 _ops.raise_from_not_ok_status(e, name)
234 except _core._FallbackException:
235 pass
236 try:
237 return stateful_random_binomial_eager_fallback(
238 resource, algorithm, shape, counts, probs, dtype=dtype, name=name,
239 ctx=_ctx)
240 except _core._SymbolicException:
241 pass # Add nodes to the TensorFlow graph.
242 # Add nodes to the TensorFlow graph.
243 if dtype is None:
244 dtype = _dtypes.int64
245 dtype = _execute.make_type(dtype, "dtype")
246 _, _, _op, _outputs = _op_def_library._apply_op_helper(
247 "StatefulRandomBinomial", resource=resource, algorithm=algorithm,
248 shape=shape, counts=counts, probs=probs,
249 dtype=dtype, name=name)
250 _result = _outputs[:]
251 if _execute.must_record_gradient():
252 _attrs = ("S", _op._get_attr_type("S"), "T", _op._get_attr_type("T"),
253 "dtype", _op._get_attr_type("dtype"))
254 _inputs_flat = _op.inputs
255 _execute.record_gradient(
256 "StatefulRandomBinomial", _inputs_flat, _attrs, _result)
257 _result, = _result
258 return _result
260StatefulRandomBinomial = tf_export("raw_ops.StatefulRandomBinomial")(_ops.to_raw_op(stateful_random_binomial))
263def stateful_random_binomial_eager_fallback(resource, algorithm, shape, counts, probs, dtype, name, ctx):
264 if dtype is None:
265 dtype = _dtypes.int64
266 dtype = _execute.make_type(dtype, "dtype")
267 _attr_S, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ])
268 _attr_T, _inputs_T = _execute.args_to_matching_eager([counts, probs], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ], _dtypes.float64)
269 (counts, probs) = _inputs_T
270 resource = _ops.convert_to_tensor(resource, _dtypes.resource)
271 algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64)
272 _inputs_flat = [resource, algorithm, shape, counts, probs]
273 _attrs = ("S", _attr_S, "T", _attr_T, "dtype", dtype)
274 _result = _execute.execute(b"StatefulRandomBinomial", 1,
275 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
276 name=name)
277 if _execute.must_record_gradient():
278 _execute.record_gradient(
279 "StatefulRandomBinomial", _inputs_flat, _attrs, _result)
280 _result, = _result
281 return _result
284def stateful_standard_normal(resource, shape, dtype=_dtypes.float32, name=None):
285 r"""Outputs random values from a normal distribution. This op is deprecated in favor of op 'StatefulStandardNormalV2'
287 The generated values will have mean 0 and standard deviation 1.
289 Args:
290 resource: A `Tensor` of type `resource`.
291 The handle of the resource variable that stores the state of the RNG.
292 shape: A `Tensor`. The shape of the output tensor.
293 dtype: An optional `tf.DType`. Defaults to `tf.float32`.
294 The type of the output.
295 name: A name for the operation (optional).
297 Returns:
298 A `Tensor` of type `dtype`.
299 """
300 _ctx = _context._context or _context.context()
301 tld = _ctx._thread_local_data
302 if tld.is_eager:
303 try:
304 _result = pywrap_tfe.TFE_Py_FastPathExecute(
305 _ctx, "StatefulStandardNormal", name, resource, shape, "dtype", dtype)
306 return _result
307 except _core._NotOkStatusException as e:
308 _ops.raise_from_not_ok_status(e, name)
309 except _core._FallbackException:
310 pass
311 try:
312 return stateful_standard_normal_eager_fallback(
313 resource, shape, dtype=dtype, name=name, ctx=_ctx)
314 except _core._SymbolicException:
315 pass # Add nodes to the TensorFlow graph.
316 # Add nodes to the TensorFlow graph.
317 if dtype is None:
318 dtype = _dtypes.float32
319 dtype = _execute.make_type(dtype, "dtype")
320 _, _, _op, _outputs = _op_def_library._apply_op_helper(
321 "StatefulStandardNormal", resource=resource, shape=shape, dtype=dtype,
322 name=name)
323 _result = _outputs[:]
324 if _execute.must_record_gradient():
325 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype",
326 _op._get_attr_type("shape_dtype"))
327 _inputs_flat = _op.inputs
328 _execute.record_gradient(
329 "StatefulStandardNormal", _inputs_flat, _attrs, _result)
330 _result, = _result
331 return _result
333StatefulStandardNormal = tf_export("raw_ops.StatefulStandardNormal")(_ops.to_raw_op(stateful_standard_normal))
336def stateful_standard_normal_eager_fallback(resource, shape, dtype, name, ctx):
337 if dtype is None:
338 dtype = _dtypes.float32
339 dtype = _execute.make_type(dtype, "dtype")
340 _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64)
341 resource = _ops.convert_to_tensor(resource, _dtypes.resource)
342 _inputs_flat = [resource, shape]
343 _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype)
344 _result = _execute.execute(b"StatefulStandardNormal", 1,
345 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
346 name=name)
347 if _execute.must_record_gradient():
348 _execute.record_gradient(
349 "StatefulStandardNormal", _inputs_flat, _attrs, _result)
350 _result, = _result
351 return _result
354def stateful_standard_normal_v2(resource, algorithm, shape, dtype=_dtypes.float32, name=None):
355 r"""Outputs random values from a normal distribution.
357 The generated values will have mean 0 and standard deviation 1.
359 Args:
360 resource: A `Tensor` of type `resource`.
361 The handle of the resource variable that stores the state of the RNG.
362 algorithm: A `Tensor` of type `int64`. The RNG algorithm.
363 shape: A `Tensor`. The shape of the output tensor.
364 dtype: An optional `tf.DType`. Defaults to `tf.float32`.
365 The type of the output.
366 name: A name for the operation (optional).
368 Returns:
369 A `Tensor` of type `dtype`.
370 """
371 _ctx = _context._context or _context.context()
372 tld = _ctx._thread_local_data
373 if tld.is_eager:
374 try:
375 _result = pywrap_tfe.TFE_Py_FastPathExecute(
376 _ctx, "StatefulStandardNormalV2", name, resource, algorithm, shape,
377 "dtype", dtype)
378 return _result
379 except _core._NotOkStatusException as e:
380 _ops.raise_from_not_ok_status(e, name)
381 except _core._FallbackException:
382 pass
383 try:
384 return stateful_standard_normal_v2_eager_fallback(
385 resource, algorithm, shape, dtype=dtype, name=name, ctx=_ctx)
386 except _core._SymbolicException:
387 pass # Add nodes to the TensorFlow graph.
388 # Add nodes to the TensorFlow graph.
389 if dtype is None:
390 dtype = _dtypes.float32
391 dtype = _execute.make_type(dtype, "dtype")
392 _, _, _op, _outputs = _op_def_library._apply_op_helper(
393 "StatefulStandardNormalV2", resource=resource, algorithm=algorithm,
394 shape=shape, dtype=dtype, name=name)
395 _result = _outputs[:]
396 if _execute.must_record_gradient():
397 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype",
398 _op._get_attr_type("shape_dtype"))
399 _inputs_flat = _op.inputs
400 _execute.record_gradient(
401 "StatefulStandardNormalV2", _inputs_flat, _attrs, _result)
402 _result, = _result
403 return _result
405StatefulStandardNormalV2 = tf_export("raw_ops.StatefulStandardNormalV2")(_ops.to_raw_op(stateful_standard_normal_v2))
408def stateful_standard_normal_v2_eager_fallback(resource, algorithm, shape, dtype, name, ctx):
409 if dtype is None:
410 dtype = _dtypes.float32
411 dtype = _execute.make_type(dtype, "dtype")
412 _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64)
413 resource = _ops.convert_to_tensor(resource, _dtypes.resource)
414 algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64)
415 _inputs_flat = [resource, algorithm, shape]
416 _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype)
417 _result = _execute.execute(b"StatefulStandardNormalV2", 1,
418 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
419 name=name)
420 if _execute.must_record_gradient():
421 _execute.record_gradient(
422 "StatefulStandardNormalV2", _inputs_flat, _attrs, _result)
423 _result, = _result
424 return _result
427def stateful_truncated_normal(resource, algorithm, shape, dtype=_dtypes.float32, name=None):
428 r"""Outputs random values from a truncated normal distribution.
430 The generated values follow a normal distribution with mean 0 and standard
431 deviation 1, except that values whose magnitude is more than 2 standard
432 deviations from the mean are dropped and re-picked.
434 Args:
435 resource: A `Tensor` of type `resource`.
436 The handle of the resource variable that stores the state of the RNG.
437 algorithm: A `Tensor` of type `int64`. The RNG algorithm.
438 shape: A `Tensor`. The shape of the output tensor.
439 dtype: An optional `tf.DType`. Defaults to `tf.float32`.
440 The type of the output.
441 name: A name for the operation (optional).
443 Returns:
444 A `Tensor` of type `dtype`.
445 """
446 _ctx = _context._context or _context.context()
447 tld = _ctx._thread_local_data
448 if tld.is_eager:
449 try:
450 _result = pywrap_tfe.TFE_Py_FastPathExecute(
451 _ctx, "StatefulTruncatedNormal", name, resource, algorithm, shape,
452 "dtype", dtype)
453 return _result
454 except _core._NotOkStatusException as e:
455 _ops.raise_from_not_ok_status(e, name)
456 except _core._FallbackException:
457 pass
458 try:
459 return stateful_truncated_normal_eager_fallback(
460 resource, algorithm, shape, dtype=dtype, name=name, ctx=_ctx)
461 except _core._SymbolicException:
462 pass # Add nodes to the TensorFlow graph.
463 # Add nodes to the TensorFlow graph.
464 if dtype is None:
465 dtype = _dtypes.float32
466 dtype = _execute.make_type(dtype, "dtype")
467 _, _, _op, _outputs = _op_def_library._apply_op_helper(
468 "StatefulTruncatedNormal", resource=resource, algorithm=algorithm,
469 shape=shape, dtype=dtype, name=name)
470 _result = _outputs[:]
471 if _execute.must_record_gradient():
472 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype",
473 _op._get_attr_type("shape_dtype"))
474 _inputs_flat = _op.inputs
475 _execute.record_gradient(
476 "StatefulTruncatedNormal", _inputs_flat, _attrs, _result)
477 _result, = _result
478 return _result
480StatefulTruncatedNormal = tf_export("raw_ops.StatefulTruncatedNormal")(_ops.to_raw_op(stateful_truncated_normal))
483def stateful_truncated_normal_eager_fallback(resource, algorithm, shape, dtype, name, ctx):
484 if dtype is None:
485 dtype = _dtypes.float32
486 dtype = _execute.make_type(dtype, "dtype")
487 _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64)
488 resource = _ops.convert_to_tensor(resource, _dtypes.resource)
489 algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64)
490 _inputs_flat = [resource, algorithm, shape]
491 _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype)
492 _result = _execute.execute(b"StatefulTruncatedNormal", 1,
493 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
494 name=name)
495 if _execute.must_record_gradient():
496 _execute.record_gradient(
497 "StatefulTruncatedNormal", _inputs_flat, _attrs, _result)
498 _result, = _result
499 return _result
502def stateful_uniform(resource, algorithm, shape, dtype=_dtypes.float32, name=None):
503 r"""Outputs random values from a uniform distribution.
505 The generated values follow a uniform distribution in the range `[0, 1)`. The
506 lower bound 0 is included in the range, while the upper bound 1 is excluded.
508 Args:
509 resource: A `Tensor` of type `resource`.
510 The handle of the resource variable that stores the state of the RNG.
511 algorithm: A `Tensor` of type `int64`. The RNG algorithm.
512 shape: A `Tensor`. The shape of the output tensor.
513 dtype: An optional `tf.DType`. Defaults to `tf.float32`.
514 The type of the output.
515 name: A name for the operation (optional).
517 Returns:
518 A `Tensor` of type `dtype`.
519 """
520 _ctx = _context._context or _context.context()
521 tld = _ctx._thread_local_data
522 if tld.is_eager:
523 try:
524 _result = pywrap_tfe.TFE_Py_FastPathExecute(
525 _ctx, "StatefulUniform", name, resource, algorithm, shape, "dtype",
526 dtype)
527 return _result
528 except _core._NotOkStatusException as e:
529 _ops.raise_from_not_ok_status(e, name)
530 except _core._FallbackException:
531 pass
532 try:
533 return stateful_uniform_eager_fallback(
534 resource, algorithm, shape, dtype=dtype, name=name, ctx=_ctx)
535 except _core._SymbolicException:
536 pass # Add nodes to the TensorFlow graph.
537 # Add nodes to the TensorFlow graph.
538 if dtype is None:
539 dtype = _dtypes.float32
540 dtype = _execute.make_type(dtype, "dtype")
541 _, _, _op, _outputs = _op_def_library._apply_op_helper(
542 "StatefulUniform", resource=resource, algorithm=algorithm,
543 shape=shape, dtype=dtype, name=name)
544 _result = _outputs[:]
545 if _execute.must_record_gradient():
546 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype",
547 _op._get_attr_type("shape_dtype"))
548 _inputs_flat = _op.inputs
549 _execute.record_gradient(
550 "StatefulUniform", _inputs_flat, _attrs, _result)
551 _result, = _result
552 return _result
554StatefulUniform = tf_export("raw_ops.StatefulUniform")(_ops.to_raw_op(stateful_uniform))
557def stateful_uniform_eager_fallback(resource, algorithm, shape, dtype, name, ctx):
558 if dtype is None:
559 dtype = _dtypes.float32
560 dtype = _execute.make_type(dtype, "dtype")
561 _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64)
562 resource = _ops.convert_to_tensor(resource, _dtypes.resource)
563 algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64)
564 _inputs_flat = [resource, algorithm, shape]
565 _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype)
566 _result = _execute.execute(b"StatefulUniform", 1, inputs=_inputs_flat,
567 attrs=_attrs, ctx=ctx, name=name)
568 if _execute.must_record_gradient():
569 _execute.record_gradient(
570 "StatefulUniform", _inputs_flat, _attrs, _result)
571 _result, = _result
572 return _result
575def stateful_uniform_full_int(resource, algorithm, shape, dtype=_dtypes.uint64, name=None):
576 r"""Outputs random integers from a uniform distribution.
578 The generated values are uniform integers covering the whole range of `dtype`.
580 Args:
581 resource: A `Tensor` of type `resource`.
582 The handle of the resource variable that stores the state of the RNG.
583 algorithm: A `Tensor` of type `int64`. The RNG algorithm.
584 shape: A `Tensor`. The shape of the output tensor.
585 dtype: An optional `tf.DType`. Defaults to `tf.uint64`.
586 The type of the output.
587 name: A name for the operation (optional).
589 Returns:
590 A `Tensor` of type `dtype`.
591 """
592 _ctx = _context._context or _context.context()
593 tld = _ctx._thread_local_data
594 if tld.is_eager:
595 try:
596 _result = pywrap_tfe.TFE_Py_FastPathExecute(
597 _ctx, "StatefulUniformFullInt", name, resource, algorithm, shape,
598 "dtype", dtype)
599 return _result
600 except _core._NotOkStatusException as e:
601 _ops.raise_from_not_ok_status(e, name)
602 except _core._FallbackException:
603 pass
604 try:
605 return stateful_uniform_full_int_eager_fallback(
606 resource, algorithm, shape, dtype=dtype, name=name, ctx=_ctx)
607 except _core._SymbolicException:
608 pass # Add nodes to the TensorFlow graph.
609 # Add nodes to the TensorFlow graph.
610 if dtype is None:
611 dtype = _dtypes.uint64
612 dtype = _execute.make_type(dtype, "dtype")
613 _, _, _op, _outputs = _op_def_library._apply_op_helper(
614 "StatefulUniformFullInt", resource=resource, algorithm=algorithm,
615 shape=shape, dtype=dtype, name=name)
616 _result = _outputs[:]
617 if _execute.must_record_gradient():
618 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype",
619 _op._get_attr_type("shape_dtype"))
620 _inputs_flat = _op.inputs
621 _execute.record_gradient(
622 "StatefulUniformFullInt", _inputs_flat, _attrs, _result)
623 _result, = _result
624 return _result
626StatefulUniformFullInt = tf_export("raw_ops.StatefulUniformFullInt")(_ops.to_raw_op(stateful_uniform_full_int))
629def stateful_uniform_full_int_eager_fallback(resource, algorithm, shape, dtype, name, ctx):
630 if dtype is None:
631 dtype = _dtypes.uint64
632 dtype = _execute.make_type(dtype, "dtype")
633 _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64)
634 resource = _ops.convert_to_tensor(resource, _dtypes.resource)
635 algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64)
636 _inputs_flat = [resource, algorithm, shape]
637 _attrs = ("dtype", dtype, "shape_dtype", _attr_shape_dtype)
638 _result = _execute.execute(b"StatefulUniformFullInt", 1,
639 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
640 name=name)
641 if _execute.must_record_gradient():
642 _execute.record_gradient(
643 "StatefulUniformFullInt", _inputs_flat, _attrs, _result)
644 _result, = _result
645 return _result
648def stateful_uniform_int(resource, algorithm, shape, minval, maxval, name=None):
649 r"""Outputs random integers from a uniform distribution.
651 The generated values are uniform integers in the range `[minval, maxval)`.
652 The lower bound `minval` is included in the range, while the upper bound
653 `maxval` is excluded.
655 The random integers are slightly biased unless `maxval - minval` is an exact
656 power of two. The bias is small for values of `maxval - minval` significantly
657 smaller than the range of the output (either `2^32` or `2^64`).
659 Args:
660 resource: A `Tensor` of type `resource`.
661 The handle of the resource variable that stores the state of the RNG.
662 algorithm: A `Tensor` of type `int64`. The RNG algorithm.
663 shape: A `Tensor`. The shape of the output tensor.
664 minval: A `Tensor`. Minimum value (inclusive, scalar).
665 maxval: A `Tensor`. Must have the same type as `minval`.
666 Maximum value (exclusive, scalar).
667 name: A name for the operation (optional).
669 Returns:
670 A `Tensor`. Has the same type as `minval`.
671 """
672 _ctx = _context._context or _context.context()
673 tld = _ctx._thread_local_data
674 if tld.is_eager:
675 try:
676 _result = pywrap_tfe.TFE_Py_FastPathExecute(
677 _ctx, "StatefulUniformInt", name, resource, algorithm, shape, minval,
678 maxval)
679 return _result
680 except _core._NotOkStatusException as e:
681 _ops.raise_from_not_ok_status(e, name)
682 except _core._FallbackException:
683 pass
684 try:
685 return stateful_uniform_int_eager_fallback(
686 resource, algorithm, shape, minval, maxval, name=name, ctx=_ctx)
687 except _core._SymbolicException:
688 pass # Add nodes to the TensorFlow graph.
689 # Add nodes to the TensorFlow graph.
690 _, _, _op, _outputs = _op_def_library._apply_op_helper(
691 "StatefulUniformInt", resource=resource, algorithm=algorithm,
692 shape=shape, minval=minval, maxval=maxval,
693 name=name)
694 _result = _outputs[:]
695 if _execute.must_record_gradient():
696 _attrs = ("dtype", _op._get_attr_type("dtype"), "shape_dtype",
697 _op._get_attr_type("shape_dtype"))
698 _inputs_flat = _op.inputs
699 _execute.record_gradient(
700 "StatefulUniformInt", _inputs_flat, _attrs, _result)
701 _result, = _result
702 return _result
704StatefulUniformInt = tf_export("raw_ops.StatefulUniformInt")(_ops.to_raw_op(stateful_uniform_int))
707def stateful_uniform_int_eager_fallback(resource, algorithm, shape, minval, maxval, name, ctx):
708 _attr_dtype, _inputs_dtype = _execute.args_to_matching_eager([minval, maxval], ctx, [], _dtypes.int64)
709 (minval, maxval) = _inputs_dtype
710 _attr_shape_dtype, (shape,) = _execute.args_to_matching_eager([shape], ctx, [], _dtypes.int64)
711 resource = _ops.convert_to_tensor(resource, _dtypes.resource)
712 algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int64)
713 _inputs_flat = [resource, algorithm, shape, minval, maxval]
714 _attrs = ("dtype", _attr_dtype, "shape_dtype", _attr_shape_dtype)
715 _result = _execute.execute(b"StatefulUniformInt", 1, inputs=_inputs_flat,
716 attrs=_attrs, ctx=ctx, name=name)
717 if _execute.must_record_gradient():
718 _execute.record_gradient(
719 "StatefulUniformInt", _inputs_flat, _attrs, _result)
720 _result, = _result
721 return _result