Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_bitwise_ops.py: 16%

376 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1"""Python wrappers around TensorFlow ops. 

2 

3This file is MACHINE GENERATED! Do not edit. 

4""" 

5 

6import collections 

7 

8from tensorflow.python import pywrap_tfe as pywrap_tfe 

9from tensorflow.python.eager import context as _context 

10from tensorflow.python.eager import core as _core 

11from tensorflow.python.eager import execute as _execute 

12from tensorflow.python.framework import dtypes as _dtypes 

13from tensorflow.security.fuzzing.py import annotation_types as _atypes 

14 

15from tensorflow.python.framework import op_def_registry as _op_def_registry 

16from tensorflow.python.framework import ops as _ops 

17from tensorflow.python.framework import op_def_library as _op_def_library 

18from tensorflow.python.util.deprecation import deprecated_endpoints 

19from tensorflow.python.util import dispatch as _dispatch 

20from tensorflow.python.util.tf_export import tf_export 

21 

22from typing import TypeVar 

23 

24@_dispatch.add_fallback_dispatch_list 

25@_dispatch.add_type_based_api_dispatcher 

26@tf_export('bitwise.bitwise_and') 

27def bitwise_and(x, y, name=None): 

28 r"""Elementwise computes the bitwise AND of `x` and `y`. 

29 

30 The result will have those bits set, that are set in both `x` and `y`. The 

31 computation is performed on the underlying representations of `x` and `y`. 

32 

33 For example: 

34 

35 ```python 

36 import tensorflow as tf 

37 from tensorflow.python.ops import bitwise_ops 

38 dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, 

39 tf.uint8, tf.uint16, tf.uint32, tf.uint64] 

40 

41 for dtype in dtype_list: 

42 lhs = tf.constant([0, 5, 3, 14], dtype=dtype) 

43 rhs = tf.constant([5, 0, 7, 11], dtype=dtype) 

44 exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) 

45 

46 res = bitwise_ops.bitwise_and(lhs, rhs) 

47 tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE 

48 ``` 

49 

50 Args: 

51 x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. 

52 y: A `Tensor`. Must have the same type as `x`. 

53 name: A name for the operation (optional). 

54 

55 Returns: 

56 A `Tensor`. Has the same type as `x`. 

57 """ 

58 _ctx = _context._context or _context.context() 

59 tld = _ctx._thread_local_data 

60 if tld.is_eager: 

61 try: 

62 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

63 _ctx, "BitwiseAnd", name, x, y) 

64 return _result 

65 except _core._NotOkStatusException as e: 

66 _ops.raise_from_not_ok_status(e, name) 

67 except _core._FallbackException: 

68 pass 

69 try: 

70 _result = _dispatcher_for_bitwise_and( 

71 (x, y, name,), None) 

72 if _result is not NotImplemented: 

73 return _result 

74 return bitwise_and_eager_fallback( 

75 x, y, name=name, ctx=_ctx) 

76 except _core._SymbolicException: 

77 pass # Add nodes to the TensorFlow graph. 

78 except (TypeError, ValueError): 

79 _result = _dispatch.dispatch( 

80 bitwise_and, (), dict(x=x, y=y, name=name) 

81 ) 

82 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

83 return _result 

84 raise 

85 else: 

86 _result = _dispatcher_for_bitwise_and( 

87 (x, y, name,), None) 

88 if _result is not NotImplemented: 

89 return _result 

90 # Add nodes to the TensorFlow graph. 

91 try: 

92 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

93 "BitwiseAnd", x=x, y=y, name=name) 

94 except (TypeError, ValueError): 

95 _result = _dispatch.dispatch( 

96 bitwise_and, (), dict(x=x, y=y, name=name) 

97 ) 

98 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

99 return _result 

100 raise 

101 _result = _outputs[:] 

102 if _execute.must_record_gradient(): 

103 _attrs = ("T", _op._get_attr_type("T")) 

104 _inputs_flat = _op.inputs 

105 _execute.record_gradient( 

106 "BitwiseAnd", _inputs_flat, _attrs, _result) 

107 _result, = _result 

108 return _result 

109 

110BitwiseAnd = tf_export("raw_ops.BitwiseAnd")(_ops.to_raw_op(bitwise_and)) 

111_dispatcher_for_bitwise_and = bitwise_and._tf_type_based_dispatcher.Dispatch 

112 

113 

114def bitwise_and_eager_fallback(x, y, name, ctx): 

115 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) 

116 (x, y) = _inputs_T 

117 _inputs_flat = [x, y] 

118 _attrs = ("T", _attr_T) 

119 _result = _execute.execute(b"BitwiseAnd", 1, inputs=_inputs_flat, 

120 attrs=_attrs, ctx=ctx, name=name) 

121 if _execute.must_record_gradient(): 

122 _execute.record_gradient( 

123 "BitwiseAnd", _inputs_flat, _attrs, _result) 

124 _result, = _result 

125 return _result 

126 

127 

128@_dispatch.add_fallback_dispatch_list 

129@_dispatch.add_type_based_api_dispatcher 

130@tf_export('bitwise.bitwise_or') 

131def bitwise_or(x, y, name=None): 

132 r"""Elementwise computes the bitwise OR of `x` and `y`. 

133 

134 The result will have those bits set, that are set in `x`, `y` or both. The 

135 computation is performed on the underlying representations of `x` and `y`. 

136 

137 For example: 

138 

139 ```python 

140 import tensorflow as tf 

141 from tensorflow.python.ops import bitwise_ops 

142 dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, 

143 tf.uint8, tf.uint16, tf.uint32, tf.uint64] 

144 

145 for dtype in dtype_list: 

146 lhs = tf.constant([0, 5, 3, 14], dtype=dtype) 

147 rhs = tf.constant([5, 0, 7, 11], dtype=dtype) 

148 exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) 

149 

150 res = bitwise_ops.bitwise_or(lhs, rhs) 

151 tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE 

152 ``` 

153 

154 Args: 

155 x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. 

156 y: A `Tensor`. Must have the same type as `x`. 

157 name: A name for the operation (optional). 

158 

159 Returns: 

160 A `Tensor`. Has the same type as `x`. 

161 """ 

162 _ctx = _context._context or _context.context() 

163 tld = _ctx._thread_local_data 

164 if tld.is_eager: 

165 try: 

166 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

167 _ctx, "BitwiseOr", name, x, y) 

168 return _result 

169 except _core._NotOkStatusException as e: 

170 _ops.raise_from_not_ok_status(e, name) 

171 except _core._FallbackException: 

172 pass 

173 try: 

174 _result = _dispatcher_for_bitwise_or( 

175 (x, y, name,), None) 

176 if _result is not NotImplemented: 

177 return _result 

178 return bitwise_or_eager_fallback( 

179 x, y, name=name, ctx=_ctx) 

180 except _core._SymbolicException: 

181 pass # Add nodes to the TensorFlow graph. 

182 except (TypeError, ValueError): 

183 _result = _dispatch.dispatch( 

184 bitwise_or, (), dict(x=x, y=y, name=name) 

185 ) 

186 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

187 return _result 

188 raise 

189 else: 

190 _result = _dispatcher_for_bitwise_or( 

191 (x, y, name,), None) 

192 if _result is not NotImplemented: 

193 return _result 

194 # Add nodes to the TensorFlow graph. 

195 try: 

196 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

197 "BitwiseOr", x=x, y=y, name=name) 

198 except (TypeError, ValueError): 

199 _result = _dispatch.dispatch( 

200 bitwise_or, (), dict(x=x, y=y, name=name) 

201 ) 

202 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

203 return _result 

204 raise 

205 _result = _outputs[:] 

206 if _execute.must_record_gradient(): 

207 _attrs = ("T", _op._get_attr_type("T")) 

208 _inputs_flat = _op.inputs 

209 _execute.record_gradient( 

210 "BitwiseOr", _inputs_flat, _attrs, _result) 

211 _result, = _result 

212 return _result 

213 

214BitwiseOr = tf_export("raw_ops.BitwiseOr")(_ops.to_raw_op(bitwise_or)) 

215_dispatcher_for_bitwise_or = bitwise_or._tf_type_based_dispatcher.Dispatch 

216 

217 

218def bitwise_or_eager_fallback(x, y, name, ctx): 

219 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) 

220 (x, y) = _inputs_T 

221 _inputs_flat = [x, y] 

222 _attrs = ("T", _attr_T) 

223 _result = _execute.execute(b"BitwiseOr", 1, inputs=_inputs_flat, 

224 attrs=_attrs, ctx=ctx, name=name) 

225 if _execute.must_record_gradient(): 

226 _execute.record_gradient( 

227 "BitwiseOr", _inputs_flat, _attrs, _result) 

228 _result, = _result 

229 return _result 

230 

231 

232@_dispatch.add_fallback_dispatch_list 

233@_dispatch.add_type_based_api_dispatcher 

234@tf_export('bitwise.bitwise_xor') 

235def bitwise_xor(x, y, name=None): 

236 r"""Elementwise computes the bitwise XOR of `x` and `y`. 

237 

238 The result will have those bits set, that are different in `x` and `y`. The 

239 computation is performed on the underlying representations of `x` and `y`. 

240 

241 For example: 

242 

243 ```python 

244 import tensorflow as tf 

245 from tensorflow.python.ops import bitwise_ops 

246 dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, 

247 tf.uint8, tf.uint16, tf.uint32, tf.uint64] 

248 

249 for dtype in dtype_list: 

250 lhs = tf.constant([0, 5, 3, 14], dtype=dtype) 

251 rhs = tf.constant([5, 0, 7, 11], dtype=dtype) 

252 exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) 

253 

254 res = bitwise_ops.bitwise_xor(lhs, rhs) 

255 tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE 

256 ``` 

257 

258 Args: 

259 x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. 

260 y: A `Tensor`. Must have the same type as `x`. 

261 name: A name for the operation (optional). 

262 

263 Returns: 

264 A `Tensor`. Has the same type as `x`. 

265 """ 

266 _ctx = _context._context or _context.context() 

267 tld = _ctx._thread_local_data 

268 if tld.is_eager: 

269 try: 

270 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

271 _ctx, "BitwiseXor", name, x, y) 

272 return _result 

273 except _core._NotOkStatusException as e: 

274 _ops.raise_from_not_ok_status(e, name) 

275 except _core._FallbackException: 

276 pass 

277 try: 

278 _result = _dispatcher_for_bitwise_xor( 

279 (x, y, name,), None) 

280 if _result is not NotImplemented: 

281 return _result 

282 return bitwise_xor_eager_fallback( 

283 x, y, name=name, ctx=_ctx) 

284 except _core._SymbolicException: 

285 pass # Add nodes to the TensorFlow graph. 

286 except (TypeError, ValueError): 

287 _result = _dispatch.dispatch( 

288 bitwise_xor, (), dict(x=x, y=y, name=name) 

289 ) 

290 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

291 return _result 

292 raise 

293 else: 

294 _result = _dispatcher_for_bitwise_xor( 

295 (x, y, name,), None) 

296 if _result is not NotImplemented: 

297 return _result 

298 # Add nodes to the TensorFlow graph. 

299 try: 

300 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

301 "BitwiseXor", x=x, y=y, name=name) 

302 except (TypeError, ValueError): 

303 _result = _dispatch.dispatch( 

304 bitwise_xor, (), dict(x=x, y=y, name=name) 

305 ) 

306 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

307 return _result 

308 raise 

309 _result = _outputs[:] 

310 if _execute.must_record_gradient(): 

311 _attrs = ("T", _op._get_attr_type("T")) 

312 _inputs_flat = _op.inputs 

313 _execute.record_gradient( 

314 "BitwiseXor", _inputs_flat, _attrs, _result) 

315 _result, = _result 

316 return _result 

317 

318BitwiseXor = tf_export("raw_ops.BitwiseXor")(_ops.to_raw_op(bitwise_xor)) 

319_dispatcher_for_bitwise_xor = bitwise_xor._tf_type_based_dispatcher.Dispatch 

320 

321 

322def bitwise_xor_eager_fallback(x, y, name, ctx): 

323 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) 

324 (x, y) = _inputs_T 

325 _inputs_flat = [x, y] 

326 _attrs = ("T", _attr_T) 

327 _result = _execute.execute(b"BitwiseXor", 1, inputs=_inputs_flat, 

328 attrs=_attrs, ctx=ctx, name=name) 

329 if _execute.must_record_gradient(): 

330 _execute.record_gradient( 

331 "BitwiseXor", _inputs_flat, _attrs, _result) 

332 _result, = _result 

333 return _result 

334 

335 

336@_dispatch.add_fallback_dispatch_list 

337@_dispatch.add_type_based_api_dispatcher 

338@tf_export('bitwise.invert') 

339def invert(x, name=None): 

340 r"""Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010. 

341 

342 Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101. 

343 This operation is performed on each element of the tensor argument `x`. 

344 

345 Example: 

346 ```python 

347 import tensorflow as tf 

348 from tensorflow.python.ops import bitwise_ops 

349 

350 # flip 2 (00000010) to -3 (11111101) 

351 tf.assert_equal(-3, bitwise_ops.invert(2)) 

352 

353 dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, 

354 dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] 

355 

356 inputs = [0, 5, 3, 14] 

357 for dtype in dtype_list: 

358 # Because of issues with negative numbers, let's test this indirectly. 

359 # 1. invert(a) and a = 0 

360 # 2. invert(a) or a = invert(0) 

361 input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype) 

362 not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and( 

363 input_tensor, bitwise_ops.invert(input_tensor)), 

364 bitwise_ops.bitwise_or( 

365 input_tensor, bitwise_ops.invert(input_tensor)), 

366 bitwise_ops.invert( 

367 tf.constant(0, dtype=dtype))] 

368 

369 expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) 

370 tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected) 

371 

372 expected = tf.cast([not_0] * 4, tf.float32) 

373 tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected) 

374 

375 # For unsigned dtypes let's also check the result directly. 

376 if dtype.is_unsigned: 

377 inverted = bitwise_ops.invert(input_tensor) 

378 expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) 

379 tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) 

380 ``` 

381 

382 Args: 

383 x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. 

384 name: A name for the operation (optional). 

385 

386 Returns: 

387 A `Tensor`. Has the same type as `x`. 

388 """ 

389 _ctx = _context._context or _context.context() 

390 tld = _ctx._thread_local_data 

391 if tld.is_eager: 

392 try: 

393 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

394 _ctx, "Invert", name, x) 

395 return _result 

396 except _core._NotOkStatusException as e: 

397 _ops.raise_from_not_ok_status(e, name) 

398 except _core._FallbackException: 

399 pass 

400 try: 

401 _result = _dispatcher_for_invert( 

402 (x, name,), None) 

403 if _result is not NotImplemented: 

404 return _result 

405 return invert_eager_fallback( 

406 x, name=name, ctx=_ctx) 

407 except _core._SymbolicException: 

408 pass # Add nodes to the TensorFlow graph. 

409 except (TypeError, ValueError): 

410 _result = _dispatch.dispatch( 

411 invert, (), dict(x=x, name=name) 

412 ) 

413 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

414 return _result 

415 raise 

416 else: 

417 _result = _dispatcher_for_invert( 

418 (x, name,), None) 

419 if _result is not NotImplemented: 

420 return _result 

421 # Add nodes to the TensorFlow graph. 

422 try: 

423 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

424 "Invert", x=x, name=name) 

425 except (TypeError, ValueError): 

426 _result = _dispatch.dispatch( 

427 invert, (), dict(x=x, name=name) 

428 ) 

429 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

430 return _result 

431 raise 

432 _result = _outputs[:] 

433 if _execute.must_record_gradient(): 

434 _attrs = ("T", _op._get_attr_type("T")) 

435 _inputs_flat = _op.inputs 

436 _execute.record_gradient( 

437 "Invert", _inputs_flat, _attrs, _result) 

438 _result, = _result 

439 return _result 

440 

441Invert = tf_export("raw_ops.Invert")(_ops.to_raw_op(invert)) 

442_dispatcher_for_invert = invert._tf_type_based_dispatcher.Dispatch 

443 

444 

445def invert_eager_fallback(x, name, ctx): 

446 _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) 

447 _inputs_flat = [x] 

448 _attrs = ("T", _attr_T) 

449 _result = _execute.execute(b"Invert", 1, inputs=_inputs_flat, attrs=_attrs, 

450 ctx=ctx, name=name) 

451 if _execute.must_record_gradient(): 

452 _execute.record_gradient( 

453 "Invert", _inputs_flat, _attrs, _result) 

454 _result, = _result 

455 return _result 

456 

457 

458@_dispatch.add_fallback_dispatch_list 

459@_dispatch.add_type_based_api_dispatcher 

460@tf_export('bitwise.left_shift') 

461def left_shift(x, y, name=None): 

462 r"""Elementwise computes the bitwise left-shift of `x` and `y`. 

463 

464 If `y` is negative, or greater than or equal to the width of `x` in bits the 

465 result is implementation defined. 

466 

467 Example: 

468 

469 ```python 

470 import tensorflow as tf 

471 from tensorflow.python.ops import bitwise_ops 

472 import numpy as np 

473 dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] 

474 

475 for dtype in dtype_list: 

476 lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) 

477 rhs = tf.constant([5, 0, 7, 11], dtype=dtype) 

478 

479 left_shift_result = bitwise_ops.left_shift(lhs, rhs) 

480 

481 print(left_shift_result) 

482 

483 # This will print: 

484 # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) 

485 # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) 

486 # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) 

487 # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64) 

488 

489 lhs = np.array([-2, 64, 101, 32], dtype=np.int8) 

490 rhs = np.array([-1, -5, -3, -14], dtype=np.int8) 

491 bitwise_ops.left_shift(lhs, rhs) 

492 # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> 

493 ``` 

494 

495 Args: 

496 x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. 

497 y: A `Tensor`. Must have the same type as `x`. 

498 name: A name for the operation (optional). 

499 

500 Returns: 

501 A `Tensor`. Has the same type as `x`. 

502 """ 

503 _ctx = _context._context or _context.context() 

504 tld = _ctx._thread_local_data 

505 if tld.is_eager: 

506 try: 

507 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

508 _ctx, "LeftShift", name, x, y) 

509 return _result 

510 except _core._NotOkStatusException as e: 

511 _ops.raise_from_not_ok_status(e, name) 

512 except _core._FallbackException: 

513 pass 

514 try: 

515 _result = _dispatcher_for_left_shift( 

516 (x, y, name,), None) 

517 if _result is not NotImplemented: 

518 return _result 

519 return left_shift_eager_fallback( 

520 x, y, name=name, ctx=_ctx) 

521 except _core._SymbolicException: 

522 pass # Add nodes to the TensorFlow graph. 

523 except (TypeError, ValueError): 

524 _result = _dispatch.dispatch( 

525 left_shift, (), dict(x=x, y=y, name=name) 

526 ) 

527 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

528 return _result 

529 raise 

530 else: 

531 _result = _dispatcher_for_left_shift( 

532 (x, y, name,), None) 

533 if _result is not NotImplemented: 

534 return _result 

535 # Add nodes to the TensorFlow graph. 

536 try: 

537 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

538 "LeftShift", x=x, y=y, name=name) 

539 except (TypeError, ValueError): 

540 _result = _dispatch.dispatch( 

541 left_shift, (), dict(x=x, y=y, name=name) 

542 ) 

543 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

544 return _result 

545 raise 

546 _result = _outputs[:] 

547 if _execute.must_record_gradient(): 

548 _attrs = ("T", _op._get_attr_type("T")) 

549 _inputs_flat = _op.inputs 

550 _execute.record_gradient( 

551 "LeftShift", _inputs_flat, _attrs, _result) 

552 _result, = _result 

553 return _result 

554 

555LeftShift = tf_export("raw_ops.LeftShift")(_ops.to_raw_op(left_shift)) 

556_dispatcher_for_left_shift = left_shift._tf_type_based_dispatcher.Dispatch 

557 

558 

559def left_shift_eager_fallback(x, y, name, ctx): 

560 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) 

561 (x, y) = _inputs_T 

562 _inputs_flat = [x, y] 

563 _attrs = ("T", _attr_T) 

564 _result = _execute.execute(b"LeftShift", 1, inputs=_inputs_flat, 

565 attrs=_attrs, ctx=ctx, name=name) 

566 if _execute.must_record_gradient(): 

567 _execute.record_gradient( 

568 "LeftShift", _inputs_flat, _attrs, _result) 

569 _result, = _result 

570 return _result 

571 

572 

573def population_count(x, name=None): 

574 r"""Computes element-wise population count (a.k.a. popcount, bitsum, bitcount). 

575 

576 For each entry in `x`, calculates the number of `1` (on) bits in the binary 

577 representation of that entry. 

578 

579 **NOTE**: It is more efficient to first `tf.bitcast` your tensors into 

580 `int32` or `int64` and perform the bitcount on the result, than to feed in 

581 8- or 16-bit inputs and then aggregate the resulting counts. 

582 

583 Args: 

584 x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. 

585 name: A name for the operation (optional). 

586 

587 Returns: 

588 A `Tensor` of type `uint8`. 

589 """ 

590 _ctx = _context._context or _context.context() 

591 tld = _ctx._thread_local_data 

592 if tld.is_eager: 

593 try: 

594 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

595 _ctx, "PopulationCount", name, x) 

596 return _result 

597 except _core._NotOkStatusException as e: 

598 _ops.raise_from_not_ok_status(e, name) 

599 except _core._FallbackException: 

600 pass 

601 try: 

602 return population_count_eager_fallback( 

603 x, name=name, ctx=_ctx) 

604 except _core._SymbolicException: 

605 pass # Add nodes to the TensorFlow graph. 

606 # Add nodes to the TensorFlow graph. 

607 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

608 "PopulationCount", x=x, name=name) 

609 _result = _outputs[:] 

610 if _execute.must_record_gradient(): 

611 _attrs = ("T", _op._get_attr_type("T")) 

612 _inputs_flat = _op.inputs 

613 _execute.record_gradient( 

614 "PopulationCount", _inputs_flat, _attrs, _result) 

615 _result, = _result 

616 return _result 

617 

618PopulationCount = tf_export("raw_ops.PopulationCount")(_ops.to_raw_op(population_count)) 

619 

620 

621def population_count_eager_fallback(x, name, ctx): 

622 _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) 

623 _inputs_flat = [x] 

624 _attrs = ("T", _attr_T) 

625 _result = _execute.execute(b"PopulationCount", 1, inputs=_inputs_flat, 

626 attrs=_attrs, ctx=ctx, name=name) 

627 if _execute.must_record_gradient(): 

628 _execute.record_gradient( 

629 "PopulationCount", _inputs_flat, _attrs, _result) 

630 _result, = _result 

631 return _result 

632 

633 

634@_dispatch.add_fallback_dispatch_list 

635@_dispatch.add_type_based_api_dispatcher 

636@tf_export('bitwise.right_shift') 

637def right_shift(x, y, name=None): 

638 r"""Elementwise computes the bitwise right-shift of `x` and `y`. 

639 

640 Performs a logical shift for unsigned integer types, and an arithmetic shift 

641 for signed integer types. 

642 

643 If `y` is negative, or greater than or equal to than the width of `x` in bits 

644 the result is implementation defined. 

645 

646 Example: 

647 

648 ```python 

649 import tensorflow as tf 

650 from tensorflow.python.ops import bitwise_ops 

651 import numpy as np 

652 dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] 

653 

654 for dtype in dtype_list: 

655 lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) 

656 rhs = tf.constant([5, 0, 7, 11], dtype=dtype) 

657 

658 right_shift_result = bitwise_ops.right_shift(lhs, rhs) 

659 

660 print(right_shift_result) 

661 

662 # This will print: 

663 # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) 

664 # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) 

665 # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) 

666 # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) 

667 

668 lhs = np.array([-2, 64, 101, 32], dtype=np.int8) 

669 rhs = np.array([-1, -5, -3, -14], dtype=np.int8) 

670 bitwise_ops.right_shift(lhs, rhs) 

671 # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], dtype=int8)> 

672 ``` 

673 

674 Args: 

675 x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`. 

676 y: A `Tensor`. Must have the same type as `x`. 

677 name: A name for the operation (optional). 

678 

679 Returns: 

680 A `Tensor`. Has the same type as `x`. 

681 """ 

682 _ctx = _context._context or _context.context() 

683 tld = _ctx._thread_local_data 

684 if tld.is_eager: 

685 try: 

686 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

687 _ctx, "RightShift", name, x, y) 

688 return _result 

689 except _core._NotOkStatusException as e: 

690 _ops.raise_from_not_ok_status(e, name) 

691 except _core._FallbackException: 

692 pass 

693 try: 

694 _result = _dispatcher_for_right_shift( 

695 (x, y, name,), None) 

696 if _result is not NotImplemented: 

697 return _result 

698 return right_shift_eager_fallback( 

699 x, y, name=name, ctx=_ctx) 

700 except _core._SymbolicException: 

701 pass # Add nodes to the TensorFlow graph. 

702 except (TypeError, ValueError): 

703 _result = _dispatch.dispatch( 

704 right_shift, (), dict(x=x, y=y, name=name) 

705 ) 

706 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

707 return _result 

708 raise 

709 else: 

710 _result = _dispatcher_for_right_shift( 

711 (x, y, name,), None) 

712 if _result is not NotImplemented: 

713 return _result 

714 # Add nodes to the TensorFlow graph. 

715 try: 

716 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

717 "RightShift", x=x, y=y, name=name) 

718 except (TypeError, ValueError): 

719 _result = _dispatch.dispatch( 

720 right_shift, (), dict(x=x, y=y, name=name) 

721 ) 

722 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

723 return _result 

724 raise 

725 _result = _outputs[:] 

726 if _execute.must_record_gradient(): 

727 _attrs = ("T", _op._get_attr_type("T")) 

728 _inputs_flat = _op.inputs 

729 _execute.record_gradient( 

730 "RightShift", _inputs_flat, _attrs, _result) 

731 _result, = _result 

732 return _result 

733 

734RightShift = tf_export("raw_ops.RightShift")(_ops.to_raw_op(right_shift)) 

735_dispatcher_for_right_shift = right_shift._tf_type_based_dispatcher.Dispatch 

736 

737 

738def right_shift_eager_fallback(x, y, name, ctx): 

739 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx, [_dtypes.int8, _dtypes.int16, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.uint16, _dtypes.uint32, _dtypes.uint64, ]) 

740 (x, y) = _inputs_T 

741 _inputs_flat = [x, y] 

742 _attrs = ("T", _attr_T) 

743 _result = _execute.execute(b"RightShift", 1, inputs=_inputs_flat, 

744 attrs=_attrs, ctx=ctx, name=name) 

745 if _execute.must_record_gradient(): 

746 _execute.record_gradient( 

747 "RightShift", _inputs_flat, _attrs, _result) 

748 _result, = _result 

749 return _result 

750