Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_nn_ops.py: 7%

5943 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1"""Python wrappers around TensorFlow ops. 

2 

3This file is MACHINE GENERATED! Do not edit. 

4""" 

5 

6import collections 

7 

8from tensorflow.python import pywrap_tfe as pywrap_tfe 

9from tensorflow.python.eager import context as _context 

10from tensorflow.python.eager import core as _core 

11from tensorflow.python.eager import execute as _execute 

12from tensorflow.python.framework import dtypes as _dtypes 

13from tensorflow.security.fuzzing.py import annotation_types as _atypes 

14 

15from tensorflow.python.framework import op_def_registry as _op_def_registry 

16from tensorflow.python.framework import ops as _ops 

17from tensorflow.python.framework import op_def_library as _op_def_library 

18from tensorflow.python.util.deprecation import deprecated_endpoints 

19from tensorflow.python.util import dispatch as _dispatch 

20from tensorflow.python.util.tf_export import tf_export 

21 

22from typing import TypeVar 

23_ApproxTopKOutput = collections.namedtuple( 

24 "ApproxTopK", 

25 ["values", "indices"]) 

26 

27 

28@_dispatch.add_fallback_dispatch_list 

29@_dispatch.add_type_based_api_dispatcher 

30@tf_export('approx_top_k') 

31def approx_top_k(input, k, reduction_dimension=-1, recall_target=0.95, is_max_k=True, reduction_input_size_override=-1, aggregate_to_topk=True, name=None): 

32 r"""Returns min/max k values and their indices of the input operand in an approximate manner. 

33 

34 See https://arxiv.org/abs/2206.14286 for the algorithm details. 

35 This op is only optimized on TPU currently. 

36 

37 Args: 

38 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`. 

39 Array to search. Must be at least 1-D of the floating type 

40 k: An `int` that is `>= 0`. Specifies the number of min/max-k. 

41 reduction_dimension: An optional `int`. Defaults to `-1`. 

42 Integer dimension along which to search. Default: -1. 

43 recall_target: An optional `float`. Defaults to `0.95`. 

44 Recall target for the approximation. Range in (0,1] 

45 is_max_k: An optional `bool`. Defaults to `True`. 

46 When true, computes max-k; otherwise computes min-k. 

47 reduction_input_size_override: An optional `int`. Defaults to `-1`. 

48 When set to a positive value, it overrides the size determined by 

49 `input[reduction_dim]` for evaluating the recall. This option is useful when 

50 the given `input` is only a subset of the overall computation in SPMD or 

51 distributed pipelines, where the true input size cannot be deferred by the 

52 `input` shape. 

53 aggregate_to_topk: An optional `bool`. Defaults to `True`. 

54 When true, aggregates approximate results to top-k. When false, returns the 

55 approximate results. The number of the approximate results is implementation 

56 defined and is greater equals to the specified `k`. 

57 name: A name for the operation (optional). 

58 

59 Returns: 

60 A tuple of `Tensor` objects (values, indices). 

61 

62 values: A `Tensor`. Has the same type as `input`. 

63 indices: A `Tensor` of type `int32`. 

64 """ 

65 _ctx = _context._context or _context.context() 

66 tld = _ctx._thread_local_data 

67 if tld.is_eager: 

68 try: 

69 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

70 _ctx, "ApproxTopK", name, input, "k", k, "reduction_dimension", 

71 reduction_dimension, "recall_target", recall_target, "is_max_k", 

72 is_max_k, "reduction_input_size_override", 

73 reduction_input_size_override, "aggregate_to_topk", aggregate_to_topk) 

74 _result = _ApproxTopKOutput._make(_result) 

75 return _result 

76 except _core._NotOkStatusException as e: 

77 _ops.raise_from_not_ok_status(e, name) 

78 except _core._FallbackException: 

79 pass 

80 try: 

81 _result = _dispatcher_for_approx_top_k( 

82 (input, k, reduction_dimension, recall_target, is_max_k, 

83 reduction_input_size_override, aggregate_to_topk, name,), None) 

84 if _result is not NotImplemented: 

85 return _result 

86 return approx_top_k_eager_fallback( 

87 input, k=k, reduction_dimension=reduction_dimension, 

88 recall_target=recall_target, is_max_k=is_max_k, 

89 reduction_input_size_override=reduction_input_size_override, 

90 aggregate_to_topk=aggregate_to_topk, name=name, ctx=_ctx) 

91 except _core._SymbolicException: 

92 pass # Add nodes to the TensorFlow graph. 

93 except (TypeError, ValueError): 

94 _result = _dispatch.dispatch( 

95 approx_top_k, (), dict(input=input, k=k, 

96 reduction_dimension=reduction_dimension, 

97 recall_target=recall_target, 

98 is_max_k=is_max_k, 

99 reduction_input_size_override=reduction_input_size_override, 

100 aggregate_to_topk=aggregate_to_topk, 

101 name=name) 

102 ) 

103 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

104 return _result 

105 raise 

106 else: 

107 _result = _dispatcher_for_approx_top_k( 

108 (input, k, reduction_dimension, recall_target, is_max_k, 

109 reduction_input_size_override, aggregate_to_topk, name,), None) 

110 if _result is not NotImplemented: 

111 return _result 

112 # Add nodes to the TensorFlow graph. 

113 k = _execute.make_int(k, "k") 

114 if reduction_dimension is None: 

115 reduction_dimension = -1 

116 reduction_dimension = _execute.make_int(reduction_dimension, "reduction_dimension") 

117 if recall_target is None: 

118 recall_target = 0.95 

119 recall_target = _execute.make_float(recall_target, "recall_target") 

120 if is_max_k is None: 

121 is_max_k = True 

122 is_max_k = _execute.make_bool(is_max_k, "is_max_k") 

123 if reduction_input_size_override is None: 

124 reduction_input_size_override = -1 

125 reduction_input_size_override = _execute.make_int(reduction_input_size_override, "reduction_input_size_override") 

126 if aggregate_to_topk is None: 

127 aggregate_to_topk = True 

128 aggregate_to_topk = _execute.make_bool(aggregate_to_topk, "aggregate_to_topk") 

129 try: 

130 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

131 "ApproxTopK", input=input, k=k, 

132 reduction_dimension=reduction_dimension, 

133 recall_target=recall_target, is_max_k=is_max_k, 

134 reduction_input_size_override=reduction_input_size_override, 

135 aggregate_to_topk=aggregate_to_topk, name=name) 

136 except (TypeError, ValueError): 

137 _result = _dispatch.dispatch( 

138 approx_top_k, (), dict(input=input, k=k, 

139 reduction_dimension=reduction_dimension, 

140 recall_target=recall_target, 

141 is_max_k=is_max_k, 

142 reduction_input_size_override=reduction_input_size_override, 

143 aggregate_to_topk=aggregate_to_topk, 

144 name=name) 

145 ) 

146 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

147 return _result 

148 raise 

149 _result = _outputs[:] 

150 if _execute.must_record_gradient(): 

151 _attrs = ("k", _op._get_attr_int("k"), "reduction_dimension", 

152 _op._get_attr_int("reduction_dimension"), "recall_target", 

153 _op.get_attr("recall_target"), "is_max_k", 

154 _op._get_attr_bool("is_max_k"), "reduction_input_size_override", 

155 _op._get_attr_int("reduction_input_size_override"), 

156 "aggregate_to_topk", _op._get_attr_bool("aggregate_to_topk"), 

157 "T", _op._get_attr_type("T")) 

158 _inputs_flat = _op.inputs 

159 _execute.record_gradient( 

160 "ApproxTopK", _inputs_flat, _attrs, _result) 

161 _result = _ApproxTopKOutput._make(_result) 

162 return _result 

163 

164ApproxTopK = tf_export("raw_ops.ApproxTopK")(_ops.to_raw_op(approx_top_k)) 

165_dispatcher_for_approx_top_k = approx_top_k._tf_type_based_dispatcher.Dispatch 

166 

167 

168def approx_top_k_eager_fallback(input, k, reduction_dimension, recall_target, is_max_k, reduction_input_size_override, aggregate_to_topk, name, ctx): 

169 k = _execute.make_int(k, "k") 

170 if reduction_dimension is None: 

171 reduction_dimension = -1 

172 reduction_dimension = _execute.make_int(reduction_dimension, "reduction_dimension") 

173 if recall_target is None: 

174 recall_target = 0.95 

175 recall_target = _execute.make_float(recall_target, "recall_target") 

176 if is_max_k is None: 

177 is_max_k = True 

178 is_max_k = _execute.make_bool(is_max_k, "is_max_k") 

179 if reduction_input_size_override is None: 

180 reduction_input_size_override = -1 

181 reduction_input_size_override = _execute.make_int(reduction_input_size_override, "reduction_input_size_override") 

182 if aggregate_to_topk is None: 

183 aggregate_to_topk = True 

184 aggregate_to_topk = _execute.make_bool(aggregate_to_topk, "aggregate_to_topk") 

185 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ]) 

186 _inputs_flat = [input] 

187 _attrs = ("k", k, "reduction_dimension", reduction_dimension, 

188 "recall_target", recall_target, "is_max_k", is_max_k, 

189 "reduction_input_size_override", reduction_input_size_override, 

190 "aggregate_to_topk", aggregate_to_topk, "T", _attr_T) 

191 _result = _execute.execute(b"ApproxTopK", 2, inputs=_inputs_flat, 

192 attrs=_attrs, ctx=ctx, name=name) 

193 if _execute.must_record_gradient(): 

194 _execute.record_gradient( 

195 "ApproxTopK", _inputs_flat, _attrs, _result) 

196 _result = _ApproxTopKOutput._make(_result) 

197 return _result 

198 

199 

200def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None): 

201 r"""Performs average pooling on the input. 

202 

203 Each entry in `output` is the mean of the corresponding size `ksize` 

204 window in `value`. 

205 

206 Args: 

207 value: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

208 4-D with shape `[batch, height, width, channels]`. 

209 ksize: A list of `ints` that has length `>= 4`. 

210 The size of the sliding window for each dimension of `value`. 

211 strides: A list of `ints` that has length `>= 4`. 

212 The stride of the sliding window for each dimension of `value`. 

213 padding: A `string` from: `"SAME", "VALID"`. 

214 The type of padding algorithm to use. 

215 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

216 Specify the data format of the input and output data. With the 

217 default format "NHWC", the data is stored in the order of: 

218 [batch, in_height, in_width, in_channels]. 

219 Alternatively, the format could be "NCHW", the data storage order of: 

220 [batch, in_channels, in_height, in_width]. 

221 name: A name for the operation (optional). 

222 

223 Returns: 

224 A `Tensor`. Has the same type as `value`. 

225 """ 

226 _ctx = _context._context or _context.context() 

227 tld = _ctx._thread_local_data 

228 if tld.is_eager: 

229 try: 

230 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

231 _ctx, "AvgPool", name, value, "ksize", ksize, "strides", strides, 

232 "padding", padding, "data_format", data_format) 

233 return _result 

234 except _core._NotOkStatusException as e: 

235 _ops.raise_from_not_ok_status(e, name) 

236 except _core._FallbackException: 

237 pass 

238 try: 

239 return avg_pool_eager_fallback( 

240 value, ksize=ksize, strides=strides, padding=padding, 

241 data_format=data_format, name=name, ctx=_ctx) 

242 except _core._SymbolicException: 

243 pass # Add nodes to the TensorFlow graph. 

244 # Add nodes to the TensorFlow graph. 

245 if not isinstance(ksize, (list, tuple)): 

246 raise TypeError( 

247 "Expected list for 'ksize' argument to " 

248 "'avg_pool' Op, not %r." % ksize) 

249 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

250 if not isinstance(strides, (list, tuple)): 

251 raise TypeError( 

252 "Expected list for 'strides' argument to " 

253 "'avg_pool' Op, not %r." % strides) 

254 strides = [_execute.make_int(_i, "strides") for _i in strides] 

255 padding = _execute.make_str(padding, "padding") 

256 if data_format is None: 

257 data_format = "NHWC" 

258 data_format = _execute.make_str(data_format, "data_format") 

259 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

260 "AvgPool", value=value, ksize=ksize, strides=strides, padding=padding, 

261 data_format=data_format, name=name) 

262 _result = _outputs[:] 

263 if _execute.must_record_gradient(): 

264 _attrs = ("ksize", _op.get_attr("ksize"), "strides", 

265 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

266 "data_format", _op.get_attr("data_format"), "T", 

267 _op._get_attr_type("T")) 

268 _inputs_flat = _op.inputs 

269 _execute.record_gradient( 

270 "AvgPool", _inputs_flat, _attrs, _result) 

271 _result, = _result 

272 return _result 

273 

274AvgPool = tf_export("raw_ops.AvgPool")(_ops.to_raw_op(avg_pool)) 

275 

276 

277def avg_pool_eager_fallback(value, ksize, strides, padding, data_format, name, ctx): 

278 if not isinstance(ksize, (list, tuple)): 

279 raise TypeError( 

280 "Expected list for 'ksize' argument to " 

281 "'avg_pool' Op, not %r." % ksize) 

282 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

283 if not isinstance(strides, (list, tuple)): 

284 raise TypeError( 

285 "Expected list for 'strides' argument to " 

286 "'avg_pool' Op, not %r." % strides) 

287 strides = [_execute.make_int(_i, "strides") for _i in strides] 

288 padding = _execute.make_str(padding, "padding") 

289 if data_format is None: 

290 data_format = "NHWC" 

291 data_format = _execute.make_str(data_format, "data_format") 

292 _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

293 _inputs_flat = [value] 

294 _attrs = ("ksize", ksize, "strides", strides, "padding", padding, 

295 "data_format", data_format, "T", _attr_T) 

296 _result = _execute.execute(b"AvgPool", 1, inputs=_inputs_flat, attrs=_attrs, 

297 ctx=ctx, name=name) 

298 if _execute.must_record_gradient(): 

299 _execute.record_gradient( 

300 "AvgPool", _inputs_flat, _attrs, _result) 

301 _result, = _result 

302 return _result 

303 

304 

305def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): 

306 r"""Performs 3D average pooling on the input. 

307 

308 Each entry in `output` is the mean of the corresponding size `ksize` window in 

309 `value`. 

310 

311 Args: 

312 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

313 Shape `[batch, depth, rows, cols, channels]` tensor to pool over. 

314 ksize: A list of `ints` that has length `>= 5`. 

315 1-D tensor of length 5. The size of the window for each dimension of 

316 the input tensor. Must have `ksize[0] = ksize[4] = 1`. 

317 strides: A list of `ints` that has length `>= 5`. 

318 1-D tensor of length 5. The stride of the sliding window for each 

319 dimension of `input`. Must have `strides[0] = strides[4] = 1`. 

320 padding: A `string` from: `"SAME", "VALID"`. 

321 The type of padding algorithm to use. 

322 data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`. 

323 The data format of the input and output data. With the 

324 default format "NDHWC", the data is stored in the order of: 

325 [batch, in_depth, in_height, in_width, in_channels]. 

326 Alternatively, the format could be "NCDHW", the data storage order is: 

327 [batch, in_channels, in_depth, in_height, in_width]. 

328 name: A name for the operation (optional). 

329 

330 Returns: 

331 A `Tensor`. Has the same type as `input`. 

332 """ 

333 _ctx = _context._context or _context.context() 

334 tld = _ctx._thread_local_data 

335 if tld.is_eager: 

336 try: 

337 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

338 _ctx, "AvgPool3D", name, input, "ksize", ksize, "strides", strides, 

339 "padding", padding, "data_format", data_format) 

340 return _result 

341 except _core._NotOkStatusException as e: 

342 _ops.raise_from_not_ok_status(e, name) 

343 except _core._FallbackException: 

344 pass 

345 try: 

346 return avg_pool3d_eager_fallback( 

347 input, ksize=ksize, strides=strides, padding=padding, 

348 data_format=data_format, name=name, ctx=_ctx) 

349 except _core._SymbolicException: 

350 pass # Add nodes to the TensorFlow graph. 

351 # Add nodes to the TensorFlow graph. 

352 if not isinstance(ksize, (list, tuple)): 

353 raise TypeError( 

354 "Expected list for 'ksize' argument to " 

355 "'avg_pool3d' Op, not %r." % ksize) 

356 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

357 if not isinstance(strides, (list, tuple)): 

358 raise TypeError( 

359 "Expected list for 'strides' argument to " 

360 "'avg_pool3d' Op, not %r." % strides) 

361 strides = [_execute.make_int(_i, "strides") for _i in strides] 

362 padding = _execute.make_str(padding, "padding") 

363 if data_format is None: 

364 data_format = "NDHWC" 

365 data_format = _execute.make_str(data_format, "data_format") 

366 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

367 "AvgPool3D", input=input, ksize=ksize, strides=strides, 

368 padding=padding, data_format=data_format, name=name) 

369 _result = _outputs[:] 

370 if _execute.must_record_gradient(): 

371 _attrs = ("ksize", _op.get_attr("ksize"), "strides", 

372 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

373 "data_format", _op.get_attr("data_format"), "T", 

374 _op._get_attr_type("T")) 

375 _inputs_flat = _op.inputs 

376 _execute.record_gradient( 

377 "AvgPool3D", _inputs_flat, _attrs, _result) 

378 _result, = _result 

379 return _result 

380 

381AvgPool3D = tf_export("raw_ops.AvgPool3D")(_ops.to_raw_op(avg_pool3d)) 

382 

383 

384def avg_pool3d_eager_fallback(input, ksize, strides, padding, data_format, name, ctx): 

385 if not isinstance(ksize, (list, tuple)): 

386 raise TypeError( 

387 "Expected list for 'ksize' argument to " 

388 "'avg_pool3d' Op, not %r." % ksize) 

389 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

390 if not isinstance(strides, (list, tuple)): 

391 raise TypeError( 

392 "Expected list for 'strides' argument to " 

393 "'avg_pool3d' Op, not %r." % strides) 

394 strides = [_execute.make_int(_i, "strides") for _i in strides] 

395 padding = _execute.make_str(padding, "padding") 

396 if data_format is None: 

397 data_format = "NDHWC" 

398 data_format = _execute.make_str(data_format, "data_format") 

399 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

400 _inputs_flat = [input] 

401 _attrs = ("ksize", ksize, "strides", strides, "padding", padding, 

402 "data_format", data_format, "T", _attr_T) 

403 _result = _execute.execute(b"AvgPool3D", 1, inputs=_inputs_flat, 

404 attrs=_attrs, ctx=ctx, name=name) 

405 if _execute.must_record_gradient(): 

406 _execute.record_gradient( 

407 "AvgPool3D", _inputs_flat, _attrs, _result) 

408 _result, = _result 

409 return _result 

410 

411 

412def avg_pool3d_grad(orig_input_shape, grad, ksize, strides, padding, data_format="NDHWC", name=None): 

413 r"""Computes gradients of average pooling function. 

414 

415 Args: 

416 orig_input_shape: A `Tensor` of type `int32`. 

417 The original input dimensions. 

418 grad: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

419 Output backprop of shape `[batch, depth, rows, cols, channels]`. 

420 ksize: A list of `ints` that has length `>= 5`. 

421 1-D tensor of length 5. The size of the window for each dimension of 

422 the input tensor. Must have `ksize[0] = ksize[4] = 1`. 

423 strides: A list of `ints` that has length `>= 5`. 

424 1-D tensor of length 5. The stride of the sliding window for each 

425 dimension of `input`. Must have `strides[0] = strides[4] = 1`. 

426 padding: A `string` from: `"SAME", "VALID"`. 

427 The type of padding algorithm to use. 

428 data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`. 

429 The data format of the input and output data. With the 

430 default format "NDHWC", the data is stored in the order of: 

431 [batch, in_depth, in_height, in_width, in_channels]. 

432 Alternatively, the format could be "NCDHW", the data storage order is: 

433 [batch, in_channels, in_depth, in_height, in_width]. 

434 name: A name for the operation (optional). 

435 

436 Returns: 

437 A `Tensor`. Has the same type as `grad`. 

438 """ 

439 _ctx = _context._context or _context.context() 

440 tld = _ctx._thread_local_data 

441 if tld.is_eager: 

442 try: 

443 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

444 _ctx, "AvgPool3DGrad", name, orig_input_shape, grad, "ksize", ksize, 

445 "strides", strides, "padding", padding, "data_format", data_format) 

446 return _result 

447 except _core._NotOkStatusException as e: 

448 _ops.raise_from_not_ok_status(e, name) 

449 except _core._FallbackException: 

450 pass 

451 try: 

452 return avg_pool3d_grad_eager_fallback( 

453 orig_input_shape, grad, ksize=ksize, strides=strides, 

454 padding=padding, data_format=data_format, name=name, ctx=_ctx) 

455 except _core._SymbolicException: 

456 pass # Add nodes to the TensorFlow graph. 

457 # Add nodes to the TensorFlow graph. 

458 if not isinstance(ksize, (list, tuple)): 

459 raise TypeError( 

460 "Expected list for 'ksize' argument to " 

461 "'avg_pool3d_grad' Op, not %r." % ksize) 

462 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

463 if not isinstance(strides, (list, tuple)): 

464 raise TypeError( 

465 "Expected list for 'strides' argument to " 

466 "'avg_pool3d_grad' Op, not %r." % strides) 

467 strides = [_execute.make_int(_i, "strides") for _i in strides] 

468 padding = _execute.make_str(padding, "padding") 

469 if data_format is None: 

470 data_format = "NDHWC" 

471 data_format = _execute.make_str(data_format, "data_format") 

472 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

473 "AvgPool3DGrad", orig_input_shape=orig_input_shape, grad=grad, 

474 ksize=ksize, strides=strides, padding=padding, 

475 data_format=data_format, name=name) 

476 _result = _outputs[:] 

477 if _execute.must_record_gradient(): 

478 _attrs = ("ksize", _op.get_attr("ksize"), "strides", 

479 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

480 "data_format", _op.get_attr("data_format"), "T", 

481 _op._get_attr_type("T")) 

482 _inputs_flat = _op.inputs 

483 _execute.record_gradient( 

484 "AvgPool3DGrad", _inputs_flat, _attrs, _result) 

485 _result, = _result 

486 return _result 

487 

488AvgPool3DGrad = tf_export("raw_ops.AvgPool3DGrad")(_ops.to_raw_op(avg_pool3d_grad)) 

489 

490 

491def avg_pool3d_grad_eager_fallback(orig_input_shape, grad, ksize, strides, padding, data_format, name, ctx): 

492 if not isinstance(ksize, (list, tuple)): 

493 raise TypeError( 

494 "Expected list for 'ksize' argument to " 

495 "'avg_pool3d_grad' Op, not %r." % ksize) 

496 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

497 if not isinstance(strides, (list, tuple)): 

498 raise TypeError( 

499 "Expected list for 'strides' argument to " 

500 "'avg_pool3d_grad' Op, not %r." % strides) 

501 strides = [_execute.make_int(_i, "strides") for _i in strides] 

502 padding = _execute.make_str(padding, "padding") 

503 if data_format is None: 

504 data_format = "NDHWC" 

505 data_format = _execute.make_str(data_format, "data_format") 

506 _attr_T, (grad,) = _execute.args_to_matching_eager([grad], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

507 orig_input_shape = _ops.convert_to_tensor(orig_input_shape, _dtypes.int32) 

508 _inputs_flat = [orig_input_shape, grad] 

509 _attrs = ("ksize", ksize, "strides", strides, "padding", padding, 

510 "data_format", data_format, "T", _attr_T) 

511 _result = _execute.execute(b"AvgPool3DGrad", 1, inputs=_inputs_flat, 

512 attrs=_attrs, ctx=ctx, name=name) 

513 if _execute.must_record_gradient(): 

514 _execute.record_gradient( 

515 "AvgPool3DGrad", _inputs_flat, _attrs, _result) 

516 _result, = _result 

517 return _result 

518 

519 

520def avg_pool_grad(orig_input_shape, grad, ksize, strides, padding, data_format="NHWC", name=None): 

521 r"""Computes gradients of the average pooling function. 

522 

523 Args: 

524 orig_input_shape: A `Tensor` of type `int32`. 

525 1-D. Shape of the original input to `avg_pool`. 

526 grad: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

527 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. 

528 the output of `avg_pool`. 

529 ksize: A list of `ints` that has length `>= 4`. 

530 The size of the sliding window for each dimension of the input. 

531 strides: A list of `ints` that has length `>= 4`. 

532 The stride of the sliding window for each dimension of the input. 

533 padding: A `string` from: `"SAME", "VALID"`. 

534 The type of padding algorithm to use. 

535 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

536 Specify the data format of the input and output data. With the 

537 default format "NHWC", the data is stored in the order of: 

538 [batch, in_height, in_width, in_channels]. 

539 Alternatively, the format could be "NCHW", the data storage order of: 

540 [batch, in_channels, in_height, in_width]. 

541 name: A name for the operation (optional). 

542 

543 Returns: 

544 A `Tensor`. Has the same type as `grad`. 

545 """ 

546 _ctx = _context._context or _context.context() 

547 tld = _ctx._thread_local_data 

548 if tld.is_eager: 

549 try: 

550 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

551 _ctx, "AvgPoolGrad", name, orig_input_shape, grad, "ksize", ksize, 

552 "strides", strides, "padding", padding, "data_format", data_format) 

553 return _result 

554 except _core._NotOkStatusException as e: 

555 _ops.raise_from_not_ok_status(e, name) 

556 except _core._FallbackException: 

557 pass 

558 try: 

559 return avg_pool_grad_eager_fallback( 

560 orig_input_shape, grad, ksize=ksize, strides=strides, 

561 padding=padding, data_format=data_format, name=name, ctx=_ctx) 

562 except _core._SymbolicException: 

563 pass # Add nodes to the TensorFlow graph. 

564 # Add nodes to the TensorFlow graph. 

565 if not isinstance(ksize, (list, tuple)): 

566 raise TypeError( 

567 "Expected list for 'ksize' argument to " 

568 "'avg_pool_grad' Op, not %r." % ksize) 

569 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

570 if not isinstance(strides, (list, tuple)): 

571 raise TypeError( 

572 "Expected list for 'strides' argument to " 

573 "'avg_pool_grad' Op, not %r." % strides) 

574 strides = [_execute.make_int(_i, "strides") for _i in strides] 

575 padding = _execute.make_str(padding, "padding") 

576 if data_format is None: 

577 data_format = "NHWC" 

578 data_format = _execute.make_str(data_format, "data_format") 

579 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

580 "AvgPoolGrad", orig_input_shape=orig_input_shape, grad=grad, 

581 ksize=ksize, strides=strides, padding=padding, 

582 data_format=data_format, name=name) 

583 _result = _outputs[:] 

584 if _execute.must_record_gradient(): 

585 _attrs = ("ksize", _op.get_attr("ksize"), "strides", 

586 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

587 "data_format", _op.get_attr("data_format"), "T", 

588 _op._get_attr_type("T")) 

589 _inputs_flat = _op.inputs 

590 _execute.record_gradient( 

591 "AvgPoolGrad", _inputs_flat, _attrs, _result) 

592 _result, = _result 

593 return _result 

594 

595AvgPoolGrad = tf_export("raw_ops.AvgPoolGrad")(_ops.to_raw_op(avg_pool_grad)) 

596 

597 

598def avg_pool_grad_eager_fallback(orig_input_shape, grad, ksize, strides, padding, data_format, name, ctx): 

599 if not isinstance(ksize, (list, tuple)): 

600 raise TypeError( 

601 "Expected list for 'ksize' argument to " 

602 "'avg_pool_grad' Op, not %r." % ksize) 

603 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

604 if not isinstance(strides, (list, tuple)): 

605 raise TypeError( 

606 "Expected list for 'strides' argument to " 

607 "'avg_pool_grad' Op, not %r." % strides) 

608 strides = [_execute.make_int(_i, "strides") for _i in strides] 

609 padding = _execute.make_str(padding, "padding") 

610 if data_format is None: 

611 data_format = "NHWC" 

612 data_format = _execute.make_str(data_format, "data_format") 

613 _attr_T, (grad,) = _execute.args_to_matching_eager([grad], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

614 orig_input_shape = _ops.convert_to_tensor(orig_input_shape, _dtypes.int32) 

615 _inputs_flat = [orig_input_shape, grad] 

616 _attrs = ("ksize", ksize, "strides", strides, "padding", padding, 

617 "data_format", data_format, "T", _attr_T) 

618 _result = _execute.execute(b"AvgPoolGrad", 1, inputs=_inputs_flat, 

619 attrs=_attrs, ctx=ctx, name=name) 

620 if _execute.must_record_gradient(): 

621 _execute.record_gradient( 

622 "AvgPoolGrad", _inputs_flat, _attrs, _result) 

623 _result, = _result 

624 return _result 

625 

626 

627def _batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None): 

628 r"""Batch normalization. 

629 

630 This op is deprecated. Prefer `tf.nn.batch_normalization`. 

631 

632 Args: 

633 t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

634 A 4D input Tensor. 

635 m: A `Tensor`. Must have the same type as `t`. 

636 A 1D mean Tensor with size matching the last dimension of t. 

637 This is the first output from tf.nn.moments, 

638 or a saved moving average thereof. 

639 v: A `Tensor`. Must have the same type as `t`. 

640 A 1D variance Tensor with size matching the last dimension of t. 

641 This is the second output from tf.nn.moments, 

642 or a saved moving average thereof. 

643 beta: A `Tensor`. Must have the same type as `t`. 

644 A 1D beta Tensor with size matching the last dimension of t. 

645 An offset to be added to the normalized tensor. 

646 gamma: A `Tensor`. Must have the same type as `t`. 

647 A 1D gamma Tensor with size matching the last dimension of t. 

648 If "scale_after_normalization" is true, this tensor will be multiplied 

649 with the normalized tensor. 

650 variance_epsilon: A `float`. A small float number to avoid dividing by 0. 

651 scale_after_normalization: A `bool`. 

652 A bool indicating whether the resulted tensor 

653 needs to be multiplied with gamma. 

654 name: A name for the operation (optional). 

655 

656 Returns: 

657 A `Tensor`. Has the same type as `t`. 

658 """ 

659 _ctx = _context._context or _context.context() 

660 tld = _ctx._thread_local_data 

661 if tld.is_eager: 

662 try: 

663 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

664 _ctx, "BatchNormWithGlobalNormalization", name, t, m, v, beta, gamma, 

665 "variance_epsilon", variance_epsilon, "scale_after_normalization", 

666 scale_after_normalization) 

667 return _result 

668 except _core._NotOkStatusException as e: 

669 _ops.raise_from_not_ok_status(e, name) 

670 except _core._FallbackException: 

671 pass 

672 try: 

673 return _batch_norm_with_global_normalization_eager_fallback( 

674 t, m, v, beta, gamma, variance_epsilon=variance_epsilon, 

675 scale_after_normalization=scale_after_normalization, name=name, 

676 ctx=_ctx) 

677 except _core._SymbolicException: 

678 pass # Add nodes to the TensorFlow graph. 

679 # Add nodes to the TensorFlow graph. 

680 variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon") 

681 scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization") 

682 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

683 "BatchNormWithGlobalNormalization", t=t, m=m, v=v, beta=beta, 

684 gamma=gamma, 

685 variance_epsilon=variance_epsilon, 

686 scale_after_normalization=scale_after_normalization, 

687 name=name) 

688 _result = _outputs[:] 

689 if _execute.must_record_gradient(): 

690 _attrs = ("T", _op._get_attr_type("T"), "variance_epsilon", 

691 _op.get_attr("variance_epsilon"), "scale_after_normalization", 

692 _op._get_attr_bool("scale_after_normalization")) 

693 _inputs_flat = _op.inputs 

694 _execute.record_gradient( 

695 "BatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result) 

696 _result, = _result 

697 return _result 

698 

699BatchNormWithGlobalNormalization = tf_export("raw_ops.BatchNormWithGlobalNormalization")(_ops.to_raw_op(_batch_norm_with_global_normalization)) 

700 

701 

702def _batch_norm_with_global_normalization_eager_fallback(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name, ctx): 

703 variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon") 

704 scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization") 

705 _attr_T, _inputs_T = _execute.args_to_matching_eager([t, m, v, beta, gamma], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

706 (t, m, v, beta, gamma) = _inputs_T 

707 _inputs_flat = [t, m, v, beta, gamma] 

708 _attrs = ("T", _attr_T, "variance_epsilon", variance_epsilon, 

709 "scale_after_normalization", scale_after_normalization) 

710 _result = _execute.execute(b"BatchNormWithGlobalNormalization", 1, 

711 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

712 name=name) 

713 if _execute.must_record_gradient(): 

714 _execute.record_gradient( 

715 "BatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result) 

716 _result, = _result 

717 return _result 

718 

719_BatchNormWithGlobalNormalizationGradOutput = collections.namedtuple( 

720 "BatchNormWithGlobalNormalizationGrad", 

721 ["dx", "dm", "dv", "db", "dg"]) 

722 

723 

724def batch_norm_with_global_normalization_grad(t, m, v, gamma, backprop, variance_epsilon, scale_after_normalization, name=None): 

725 r"""Gradients for batch normalization. 

726 

727 This op is deprecated. See `tf.nn.batch_normalization`. 

728 

729 Args: 

730 t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

731 A 4D input Tensor. 

732 m: A `Tensor`. Must have the same type as `t`. 

733 A 1D mean Tensor with size matching the last dimension of t. 

734 This is the first output from tf.nn.moments, 

735 or a saved moving average thereof. 

736 v: A `Tensor`. Must have the same type as `t`. 

737 A 1D variance Tensor with size matching the last dimension of t. 

738 This is the second output from tf.nn.moments, 

739 or a saved moving average thereof. 

740 gamma: A `Tensor`. Must have the same type as `t`. 

741 A 1D gamma Tensor with size matching the last dimension of t. 

742 If "scale_after_normalization" is true, this Tensor will be multiplied 

743 with the normalized Tensor. 

744 backprop: A `Tensor`. Must have the same type as `t`. 4D backprop Tensor. 

745 variance_epsilon: A `float`. A small float number to avoid dividing by 0. 

746 scale_after_normalization: A `bool`. 

747 A bool indicating whether the resulted tensor 

748 needs to be multiplied with gamma. 

749 name: A name for the operation (optional). 

750 

751 Returns: 

752 A tuple of `Tensor` objects (dx, dm, dv, db, dg). 

753 

754 dx: A `Tensor`. Has the same type as `t`. 

755 dm: A `Tensor`. Has the same type as `t`. 

756 dv: A `Tensor`. Has the same type as `t`. 

757 db: A `Tensor`. Has the same type as `t`. 

758 dg: A `Tensor`. Has the same type as `t`. 

759 """ 

760 _ctx = _context._context or _context.context() 

761 tld = _ctx._thread_local_data 

762 if tld.is_eager: 

763 try: 

764 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

765 _ctx, "BatchNormWithGlobalNormalizationGrad", name, t, m, v, gamma, 

766 backprop, "variance_epsilon", variance_epsilon, 

767 "scale_after_normalization", scale_after_normalization) 

768 _result = _BatchNormWithGlobalNormalizationGradOutput._make(_result) 

769 return _result 

770 except _core._NotOkStatusException as e: 

771 _ops.raise_from_not_ok_status(e, name) 

772 except _core._FallbackException: 

773 pass 

774 try: 

775 return batch_norm_with_global_normalization_grad_eager_fallback( 

776 t, m, v, gamma, backprop, variance_epsilon=variance_epsilon, 

777 scale_after_normalization=scale_after_normalization, name=name, 

778 ctx=_ctx) 

779 except _core._SymbolicException: 

780 pass # Add nodes to the TensorFlow graph. 

781 # Add nodes to the TensorFlow graph. 

782 variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon") 

783 scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization") 

784 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

785 "BatchNormWithGlobalNormalizationGrad", t=t, m=m, v=v, gamma=gamma, 

786 backprop=backprop, 

787 variance_epsilon=variance_epsilon, 

788 scale_after_normalization=scale_after_normalization, 

789 name=name) 

790 _result = _outputs[:] 

791 if _execute.must_record_gradient(): 

792 _attrs = ("T", _op._get_attr_type("T"), "variance_epsilon", 

793 _op.get_attr("variance_epsilon"), "scale_after_normalization", 

794 _op._get_attr_bool("scale_after_normalization")) 

795 _inputs_flat = _op.inputs 

796 _execute.record_gradient( 

797 "BatchNormWithGlobalNormalizationGrad", _inputs_flat, _attrs, _result) 

798 _result = _BatchNormWithGlobalNormalizationGradOutput._make(_result) 

799 return _result 

800 

801BatchNormWithGlobalNormalizationGrad = tf_export("raw_ops.BatchNormWithGlobalNormalizationGrad")(_ops.to_raw_op(batch_norm_with_global_normalization_grad)) 

802 

803 

804def batch_norm_with_global_normalization_grad_eager_fallback(t, m, v, gamma, backprop, variance_epsilon, scale_after_normalization, name, ctx): 

805 variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon") 

806 scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization") 

807 _attr_T, _inputs_T = _execute.args_to_matching_eager([t, m, v, gamma, backprop], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

808 (t, m, v, gamma, backprop) = _inputs_T 

809 _inputs_flat = [t, m, v, gamma, backprop] 

810 _attrs = ("T", _attr_T, "variance_epsilon", variance_epsilon, 

811 "scale_after_normalization", scale_after_normalization) 

812 _result = _execute.execute(b"BatchNormWithGlobalNormalizationGrad", 5, 

813 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

814 name=name) 

815 if _execute.must_record_gradient(): 

816 _execute.record_gradient( 

817 "BatchNormWithGlobalNormalizationGrad", _inputs_flat, _attrs, _result) 

818 _result = _BatchNormWithGlobalNormalizationGradOutput._make(_result) 

819 return _result 

820 

821 

822def bias_add(value, bias, data_format="NHWC", name=None): 

823 r"""Adds `bias` to `value`. 

824 

825 This is a special case of `tf.add` where `bias` is restricted to be 1-D. 

826 Broadcasting is supported, so `value` may have any number of dimensions. 

827 

828 Args: 

829 value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

830 Any number of dimensions. 

831 bias: A `Tensor`. Must have the same type as `value`. 

832 1-D with size the last dimension of `value`. 

833 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

834 Specify the data format of the input and output data. With the 

835 default format "NHWC", the bias tensor will be added to the last dimension 

836 of the value tensor. 

837 Alternatively, the format could be "NCHW", the data storage order of: 

838 [batch, in_channels, in_height, in_width]. 

839 The tensor will be added to "in_channels", the third-to-the-last 

840 dimension. 

841 name: A name for the operation (optional). 

842 

843 Returns: 

844 A `Tensor`. Has the same type as `value`. 

845 """ 

846 _ctx = _context._context or _context.context() 

847 tld = _ctx._thread_local_data 

848 if tld.is_eager: 

849 try: 

850 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

851 _ctx, "BiasAdd", name, value, bias, "data_format", data_format) 

852 return _result 

853 except _core._NotOkStatusException as e: 

854 _ops.raise_from_not_ok_status(e, name) 

855 except _core._FallbackException: 

856 pass 

857 try: 

858 return bias_add_eager_fallback( 

859 value, bias, data_format=data_format, name=name, ctx=_ctx) 

860 except _core._SymbolicException: 

861 pass # Add nodes to the TensorFlow graph. 

862 # Add nodes to the TensorFlow graph. 

863 if data_format is None: 

864 data_format = "NHWC" 

865 data_format = _execute.make_str(data_format, "data_format") 

866 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

867 "BiasAdd", value=value, bias=bias, data_format=data_format, name=name) 

868 _result = _outputs[:] 

869 if _execute.must_record_gradient(): 

870 _attrs = ("T", _op._get_attr_type("T"), "data_format", 

871 _op.get_attr("data_format")) 

872 _inputs_flat = _op.inputs 

873 _execute.record_gradient( 

874 "BiasAdd", _inputs_flat, _attrs, _result) 

875 _result, = _result 

876 return _result 

877 

878BiasAdd = tf_export("raw_ops.BiasAdd")(_ops.to_raw_op(bias_add)) 

879 

880 

881def bias_add_eager_fallback(value, bias, data_format, name, ctx): 

882 if data_format is None: 

883 data_format = "NHWC" 

884 data_format = _execute.make_str(data_format, "data_format") 

885 _attr_T, _inputs_T = _execute.args_to_matching_eager([value, bias], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

886 (value, bias) = _inputs_T 

887 _inputs_flat = [value, bias] 

888 _attrs = ("T", _attr_T, "data_format", data_format) 

889 _result = _execute.execute(b"BiasAdd", 1, inputs=_inputs_flat, attrs=_attrs, 

890 ctx=ctx, name=name) 

891 if _execute.must_record_gradient(): 

892 _execute.record_gradient( 

893 "BiasAdd", _inputs_flat, _attrs, _result) 

894 _result, = _result 

895 return _result 

896 

897 

898def bias_add_grad(out_backprop, data_format="NHWC", name=None): 

899 r"""The backward operation for "BiasAdd" on the "bias" tensor. 

900 

901 It accumulates all the values from out_backprop into the feature dimension. 

902 For NHWC data format, the feature dimension is the last. For NCHW data format, 

903 the feature dimension is the third-to-last. 

904 

905 Args: 

906 out_backprop: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

907 Any number of dimensions. 

908 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

909 Specify the data format of the input and output data. With the 

910 default format "NHWC", the bias tensor will be added to the last dimension 

911 of the value tensor. 

912 Alternatively, the format could be "NCHW", the data storage order of: 

913 [batch, in_channels, in_height, in_width]. 

914 The tensor will be added to "in_channels", the third-to-the-last 

915 dimension. 

916 name: A name for the operation (optional). 

917 

918 Returns: 

919 A `Tensor`. Has the same type as `out_backprop`. 

920 """ 

921 _ctx = _context._context or _context.context() 

922 tld = _ctx._thread_local_data 

923 if tld.is_eager: 

924 try: 

925 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

926 _ctx, "BiasAddGrad", name, out_backprop, "data_format", data_format) 

927 return _result 

928 except _core._NotOkStatusException as e: 

929 _ops.raise_from_not_ok_status(e, name) 

930 except _core._FallbackException: 

931 pass 

932 try: 

933 return bias_add_grad_eager_fallback( 

934 out_backprop, data_format=data_format, name=name, ctx=_ctx) 

935 except _core._SymbolicException: 

936 pass # Add nodes to the TensorFlow graph. 

937 # Add nodes to the TensorFlow graph. 

938 if data_format is None: 

939 data_format = "NHWC" 

940 data_format = _execute.make_str(data_format, "data_format") 

941 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

942 "BiasAddGrad", out_backprop=out_backprop, data_format=data_format, 

943 name=name) 

944 _result = _outputs[:] 

945 if _execute.must_record_gradient(): 

946 _attrs = ("T", _op._get_attr_type("T"), "data_format", 

947 _op.get_attr("data_format")) 

948 _inputs_flat = _op.inputs 

949 _execute.record_gradient( 

950 "BiasAddGrad", _inputs_flat, _attrs, _result) 

951 _result, = _result 

952 return _result 

953 

954BiasAddGrad = tf_export("raw_ops.BiasAddGrad")(_ops.to_raw_op(bias_add_grad)) 

955 

956 

957def bias_add_grad_eager_fallback(out_backprop, data_format, name, ctx): 

958 if data_format is None: 

959 data_format = "NHWC" 

960 data_format = _execute.make_str(data_format, "data_format") 

961 _attr_T, (out_backprop,) = _execute.args_to_matching_eager([out_backprop], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

962 _inputs_flat = [out_backprop] 

963 _attrs = ("T", _attr_T, "data_format", data_format) 

964 _result = _execute.execute(b"BiasAddGrad", 1, inputs=_inputs_flat, 

965 attrs=_attrs, ctx=ctx, name=name) 

966 if _execute.must_record_gradient(): 

967 _execute.record_gradient( 

968 "BiasAddGrad", _inputs_flat, _attrs, _result) 

969 _result, = _result 

970 return _result 

971 

972 

973def bias_add_v1(value, bias, name=None): 

974 r"""Adds `bias` to `value`. 

975 

976 This is a deprecated version of BiasAdd and will be soon removed. 

977 

978 This is a special case of `tf.add` where `bias` is restricted to be 1-D. 

979 Broadcasting is supported, so `value` may have any number of dimensions. 

980 

981 Args: 

982 value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. 

983 Any number of dimensions. 

984 bias: A `Tensor`. Must have the same type as `value`. 

985 1-D with size the last dimension of `value`. 

986 name: A name for the operation (optional). 

987 

988 Returns: 

989 A `Tensor`. Has the same type as `value`. 

990 """ 

991 _ctx = _context._context or _context.context() 

992 tld = _ctx._thread_local_data 

993 if tld.is_eager: 

994 try: 

995 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

996 _ctx, "BiasAddV1", name, value, bias) 

997 return _result 

998 except _core._NotOkStatusException as e: 

999 _ops.raise_from_not_ok_status(e, name) 

1000 except _core._FallbackException: 

1001 pass 

1002 try: 

1003 return bias_add_v1_eager_fallback( 

1004 value, bias, name=name, ctx=_ctx) 

1005 except _core._SymbolicException: 

1006 pass # Add nodes to the TensorFlow graph. 

1007 # Add nodes to the TensorFlow graph. 

1008 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1009 "BiasAddV1", value=value, bias=bias, name=name) 

1010 _result = _outputs[:] 

1011 if _execute.must_record_gradient(): 

1012 _attrs = ("T", _op._get_attr_type("T")) 

1013 _inputs_flat = _op.inputs 

1014 _execute.record_gradient( 

1015 "BiasAddV1", _inputs_flat, _attrs, _result) 

1016 _result, = _result 

1017 return _result 

1018 

1019BiasAddV1 = tf_export("raw_ops.BiasAddV1")(_ops.to_raw_op(bias_add_v1)) 

1020 

1021 

1022def bias_add_v1_eager_fallback(value, bias, name, ctx): 

1023 _attr_T, _inputs_T = _execute.args_to_matching_eager([value, bias], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

1024 (value, bias) = _inputs_T 

1025 _inputs_flat = [value, bias] 

1026 _attrs = ("T", _attr_T) 

1027 _result = _execute.execute(b"BiasAddV1", 1, inputs=_inputs_flat, 

1028 attrs=_attrs, ctx=ctx, name=name) 

1029 if _execute.must_record_gradient(): 

1030 _execute.record_gradient( 

1031 "BiasAddV1", _inputs_flat, _attrs, _result) 

1032 _result, = _result 

1033 return _result 

1034 

1035 

1036def conv2d(input, filter, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None): 

1037 r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors. 

1038 

1039 Given an input tensor of shape `[batch, in_height, in_width, in_channels]` 

1040 and a filter / kernel tensor of shape 

1041 `[filter_height, filter_width, in_channels, out_channels]`, this op 

1042 performs the following: 

1043 

1044 1. Flattens the filter to a 2-D matrix with shape 

1045 `[filter_height * filter_width * in_channels, output_channels]`. 

1046 2. Extracts image patches from the input tensor to form a *virtual* 

1047 tensor of shape `[batch, out_height, out_width, 

1048 filter_height * filter_width * in_channels]`. 

1049 3. For each patch, right-multiplies the filter matrix and the image patch 

1050 vector. 

1051 

1052 In detail, with the default NHWC format, 

1053 

1054 output[b, i, j, k] = 

1055 sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * 

1056 filter[di, dj, q, k] 

1057 

1058 Must have `strides[0] = strides[3] = 1`. For the most common case of the same 

1059 horizontal and vertices strides, `strides = [1, stride, stride, 1]`. 

1060 

1061 Args: 

1062 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`. 

1063 A 4-D tensor. The dimension order is interpreted according to the value 

1064 of `data_format`, see below for details. 

1065 filter: A `Tensor`. Must have the same type as `input`. 

1066 A 4-D tensor of shape 

1067 `[filter_height, filter_width, in_channels, out_channels]` 

1068 strides: A list of `ints`. 

1069 1-D tensor of length 4. The stride of the sliding window for each 

1070 dimension of `input`. The dimension order is determined by the value of 

1071 `data_format`, see below for details. 

1072 padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`. 

1073 The type of padding algorithm to use. 

1074 use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. 

1075 explicit_paddings: An optional list of `ints`. Defaults to `[]`. 

1076 If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith 

1077 dimension, the amount of padding inserted before and after the dimension is 

1078 `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If 

1079 `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. 

1080 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

1081 Specify the data format of the input and output data. With the 

1082 default format "NHWC", the data is stored in the order of: 

1083 [batch, height, width, channels]. 

1084 Alternatively, the format could be "NCHW", the data storage order of: 

1085 [batch, channels, height, width]. 

1086 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

1087 1-D tensor of length 4. The dilation factor for each dimension of 

1088 `input`. If set to k > 1, there will be k-1 skipped cells between each 

1089 filter element on that dimension. The dimension order is determined by the 

1090 value of `data_format`, see above for details. Dilations in the batch and 

1091 depth dimensions must be 1. 

1092 name: A name for the operation (optional). 

1093 

1094 Returns: 

1095 A `Tensor`. Has the same type as `input`. 

1096 """ 

1097 _ctx = _context._context or _context.context() 

1098 tld = _ctx._thread_local_data 

1099 if tld.is_eager: 

1100 try: 

1101 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1102 _ctx, "Conv2D", name, input, filter, "strides", strides, 

1103 "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", padding, 

1104 "explicit_paddings", explicit_paddings, "data_format", data_format, 

1105 "dilations", dilations) 

1106 return _result 

1107 except _core._NotOkStatusException as e: 

1108 _ops.raise_from_not_ok_status(e, name) 

1109 except _core._FallbackException: 

1110 pass 

1111 try: 

1112 return conv2d_eager_fallback( 

1113 input, filter, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, 

1114 padding=padding, explicit_paddings=explicit_paddings, 

1115 data_format=data_format, dilations=dilations, name=name, ctx=_ctx) 

1116 except _core._SymbolicException: 

1117 pass # Add nodes to the TensorFlow graph. 

1118 # Add nodes to the TensorFlow graph. 

1119 if not isinstance(strides, (list, tuple)): 

1120 raise TypeError( 

1121 "Expected list for 'strides' argument to " 

1122 "'conv2d' Op, not %r." % strides) 

1123 strides = [_execute.make_int(_i, "strides") for _i in strides] 

1124 padding = _execute.make_str(padding, "padding") 

1125 if use_cudnn_on_gpu is None: 

1126 use_cudnn_on_gpu = True 

1127 use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu") 

1128 if explicit_paddings is None: 

1129 explicit_paddings = [] 

1130 if not isinstance(explicit_paddings, (list, tuple)): 

1131 raise TypeError( 

1132 "Expected list for 'explicit_paddings' argument to " 

1133 "'conv2d' Op, not %r." % explicit_paddings) 

1134 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

1135 if data_format is None: 

1136 data_format = "NHWC" 

1137 data_format = _execute.make_str(data_format, "data_format") 

1138 if dilations is None: 

1139 dilations = [1, 1, 1, 1] 

1140 if not isinstance(dilations, (list, tuple)): 

1141 raise TypeError( 

1142 "Expected list for 'dilations' argument to " 

1143 "'conv2d' Op, not %r." % dilations) 

1144 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

1145 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1146 "Conv2D", input=input, filter=filter, strides=strides, 

1147 padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, 

1148 explicit_paddings=explicit_paddings, 

1149 data_format=data_format, dilations=dilations, name=name) 

1150 _result = _outputs[:] 

1151 if _execute.must_record_gradient(): 

1152 _attrs = ("T", _op._get_attr_type("T"), "strides", 

1153 _op.get_attr("strides"), "use_cudnn_on_gpu", 

1154 _op._get_attr_bool("use_cudnn_on_gpu"), "padding", 

1155 _op.get_attr("padding"), "explicit_paddings", 

1156 _op.get_attr("explicit_paddings"), "data_format", 

1157 _op.get_attr("data_format"), "dilations", 

1158 _op.get_attr("dilations")) 

1159 _inputs_flat = _op.inputs 

1160 _execute.record_gradient( 

1161 "Conv2D", _inputs_flat, _attrs, _result) 

1162 _result, = _result 

1163 return _result 

1164 

1165Conv2D = tf_export("raw_ops.Conv2D")(_ops.to_raw_op(conv2d)) 

1166 

1167 

1168def conv2d_eager_fallback(input, filter, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name, ctx): 

1169 if not isinstance(strides, (list, tuple)): 

1170 raise TypeError( 

1171 "Expected list for 'strides' argument to " 

1172 "'conv2d' Op, not %r." % strides) 

1173 strides = [_execute.make_int(_i, "strides") for _i in strides] 

1174 padding = _execute.make_str(padding, "padding") 

1175 if use_cudnn_on_gpu is None: 

1176 use_cudnn_on_gpu = True 

1177 use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu") 

1178 if explicit_paddings is None: 

1179 explicit_paddings = [] 

1180 if not isinstance(explicit_paddings, (list, tuple)): 

1181 raise TypeError( 

1182 "Expected list for 'explicit_paddings' argument to " 

1183 "'conv2d' Op, not %r." % explicit_paddings) 

1184 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

1185 if data_format is None: 

1186 data_format = "NHWC" 

1187 data_format = _execute.make_str(data_format, "data_format") 

1188 if dilations is None: 

1189 dilations = [1, 1, 1, 1] 

1190 if not isinstance(dilations, (list, tuple)): 

1191 raise TypeError( 

1192 "Expected list for 'dilations' argument to " 

1193 "'conv2d' Op, not %r." % dilations) 

1194 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

1195 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.int32, ]) 

1196 (input, filter) = _inputs_T 

1197 _inputs_flat = [input, filter] 

1198 _attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu", 

1199 use_cudnn_on_gpu, "padding", padding, "explicit_paddings", 

1200 explicit_paddings, "data_format", data_format, "dilations", dilations) 

1201 _result = _execute.execute(b"Conv2D", 1, inputs=_inputs_flat, attrs=_attrs, 

1202 ctx=ctx, name=name) 

1203 if _execute.must_record_gradient(): 

1204 _execute.record_gradient( 

1205 "Conv2D", _inputs_flat, _attrs, _result) 

1206 _result, = _result 

1207 return _result 

1208 

1209 

1210def conv2d_backprop_filter(input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None): 

1211 r"""Computes the gradients of convolution with respect to the filter. 

1212 

1213 Args: 

1214 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

1215 4-D with shape `[batch, in_height, in_width, in_channels]`. 

1216 filter_sizes: A `Tensor` of type `int32`. 

1217 An integer vector representing the tensor shape of `filter`, 

1218 where `filter` is a 4-D 

1219 `[filter_height, filter_width, in_channels, out_channels]` tensor. 

1220 out_backprop: A `Tensor`. Must have the same type as `input`. 

1221 4-D with shape `[batch, out_height, out_width, out_channels]`. 

1222 Gradients w.r.t. the output of the convolution. 

1223 strides: A list of `ints`. 

1224 The stride of the sliding window for each dimension of the input 

1225 of the convolution. Must be in the same order as the dimension specified with 

1226 format. 

1227 padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`. 

1228 The type of padding algorithm to use. 

1229 use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. 

1230 explicit_paddings: An optional list of `ints`. Defaults to `[]`. 

1231 If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith 

1232 dimension, the amount of padding inserted before and after the dimension is 

1233 `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If 

1234 `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. 

1235 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

1236 Specify the data format of the input and output data. With the 

1237 default format "NHWC", the data is stored in the order of: 

1238 [batch, in_height, in_width, in_channels]. 

1239 Alternatively, the format could be "NCHW", the data storage order of: 

1240 [batch, in_channels, in_height, in_width]. 

1241 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

1242 1-D tensor of length 4. The dilation factor for each dimension of 

1243 `input`. If set to k > 1, there will be k-1 skipped cells between each filter 

1244 element on that dimension. The dimension order is determined by the value of 

1245 `data_format`, see above for details. Dilations in the batch and depth 

1246 dimensions must be 1. 

1247 name: A name for the operation (optional). 

1248 

1249 Returns: 

1250 A `Tensor`. Has the same type as `input`. 

1251 """ 

1252 _ctx = _context._context or _context.context() 

1253 tld = _ctx._thread_local_data 

1254 if tld.is_eager: 

1255 try: 

1256 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1257 _ctx, "Conv2DBackpropFilter", name, input, filter_sizes, out_backprop, 

1258 "strides", strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", 

1259 padding, "explicit_paddings", explicit_paddings, "data_format", 

1260 data_format, "dilations", dilations) 

1261 return _result 

1262 except _core._NotOkStatusException as e: 

1263 _ops.raise_from_not_ok_status(e, name) 

1264 except _core._FallbackException: 

1265 pass 

1266 try: 

1267 return conv2d_backprop_filter_eager_fallback( 

1268 input, filter_sizes, out_backprop, strides=strides, 

1269 use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, 

1270 explicit_paddings=explicit_paddings, data_format=data_format, 

1271 dilations=dilations, name=name, ctx=_ctx) 

1272 except _core._SymbolicException: 

1273 pass # Add nodes to the TensorFlow graph. 

1274 # Add nodes to the TensorFlow graph. 

1275 if not isinstance(strides, (list, tuple)): 

1276 raise TypeError( 

1277 "Expected list for 'strides' argument to " 

1278 "'conv2d_backprop_filter' Op, not %r." % strides) 

1279 strides = [_execute.make_int(_i, "strides") for _i in strides] 

1280 padding = _execute.make_str(padding, "padding") 

1281 if use_cudnn_on_gpu is None: 

1282 use_cudnn_on_gpu = True 

1283 use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu") 

1284 if explicit_paddings is None: 

1285 explicit_paddings = [] 

1286 if not isinstance(explicit_paddings, (list, tuple)): 

1287 raise TypeError( 

1288 "Expected list for 'explicit_paddings' argument to " 

1289 "'conv2d_backprop_filter' Op, not %r." % explicit_paddings) 

1290 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

1291 if data_format is None: 

1292 data_format = "NHWC" 

1293 data_format = _execute.make_str(data_format, "data_format") 

1294 if dilations is None: 

1295 dilations = [1, 1, 1, 1] 

1296 if not isinstance(dilations, (list, tuple)): 

1297 raise TypeError( 

1298 "Expected list for 'dilations' argument to " 

1299 "'conv2d_backprop_filter' Op, not %r." % dilations) 

1300 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

1301 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1302 "Conv2DBackpropFilter", input=input, filter_sizes=filter_sizes, 

1303 out_backprop=out_backprop, strides=strides, 

1304 padding=padding, 

1305 use_cudnn_on_gpu=use_cudnn_on_gpu, 

1306 explicit_paddings=explicit_paddings, 

1307 data_format=data_format, dilations=dilations, 

1308 name=name) 

1309 _result = _outputs[:] 

1310 if _execute.must_record_gradient(): 

1311 _attrs = ("T", _op._get_attr_type("T"), "strides", 

1312 _op.get_attr("strides"), "use_cudnn_on_gpu", 

1313 _op._get_attr_bool("use_cudnn_on_gpu"), "padding", 

1314 _op.get_attr("padding"), "explicit_paddings", 

1315 _op.get_attr("explicit_paddings"), "data_format", 

1316 _op.get_attr("data_format"), "dilations", 

1317 _op.get_attr("dilations")) 

1318 _inputs_flat = _op.inputs 

1319 _execute.record_gradient( 

1320 "Conv2DBackpropFilter", _inputs_flat, _attrs, _result) 

1321 _result, = _result 

1322 return _result 

1323 

1324Conv2DBackpropFilter = tf_export("raw_ops.Conv2DBackpropFilter")(_ops.to_raw_op(conv2d_backprop_filter)) 

1325 

1326 

1327def conv2d_backprop_filter_eager_fallback(input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name, ctx): 

1328 if not isinstance(strides, (list, tuple)): 

1329 raise TypeError( 

1330 "Expected list for 'strides' argument to " 

1331 "'conv2d_backprop_filter' Op, not %r." % strides) 

1332 strides = [_execute.make_int(_i, "strides") for _i in strides] 

1333 padding = _execute.make_str(padding, "padding") 

1334 if use_cudnn_on_gpu is None: 

1335 use_cudnn_on_gpu = True 

1336 use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu") 

1337 if explicit_paddings is None: 

1338 explicit_paddings = [] 

1339 if not isinstance(explicit_paddings, (list, tuple)): 

1340 raise TypeError( 

1341 "Expected list for 'explicit_paddings' argument to " 

1342 "'conv2d_backprop_filter' Op, not %r." % explicit_paddings) 

1343 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

1344 if data_format is None: 

1345 data_format = "NHWC" 

1346 data_format = _execute.make_str(data_format, "data_format") 

1347 if dilations is None: 

1348 dilations = [1, 1, 1, 1] 

1349 if not isinstance(dilations, (list, tuple)): 

1350 raise TypeError( 

1351 "Expected list for 'dilations' argument to " 

1352 "'conv2d_backprop_filter' Op, not %r." % dilations) 

1353 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

1354 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

1355 (input, out_backprop) = _inputs_T 

1356 filter_sizes = _ops.convert_to_tensor(filter_sizes, _dtypes.int32) 

1357 _inputs_flat = [input, filter_sizes, out_backprop] 

1358 _attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu", 

1359 use_cudnn_on_gpu, "padding", padding, "explicit_paddings", 

1360 explicit_paddings, "data_format", data_format, "dilations", dilations) 

1361 _result = _execute.execute(b"Conv2DBackpropFilter", 1, inputs=_inputs_flat, 

1362 attrs=_attrs, ctx=ctx, name=name) 

1363 if _execute.must_record_gradient(): 

1364 _execute.record_gradient( 

1365 "Conv2DBackpropFilter", _inputs_flat, _attrs, _result) 

1366 _result, = _result 

1367 return _result 

1368 

1369 

1370@_dispatch.add_fallback_dispatch_list 

1371@_dispatch.add_type_based_api_dispatcher 

1372@tf_export('conv2d_backprop_filter_v2') 

1373def conv2d_backprop_filter_v2(input, filter, out_backprop, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None): 

1374 r"""Computes the gradients of convolution with respect to the filter. 

1375 

1376 Args: 

1377 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

1378 4-D with shape `[batch, in_height, in_width, in_channels]`. 

1379 filter: A `Tensor`. Must have the same type as `input`. 

1380 4-D with shape `[filter_height, filter_width, in_channels, out_channels]`. 

1381 Only shape of tensor is used. 

1382 out_backprop: A `Tensor`. Must have the same type as `input`. 

1383 4-D with shape `[batch, out_height, out_width, out_channels]`. 

1384 Gradients w.r.t. the output of the convolution. 

1385 strides: A list of `ints`. 

1386 The stride of the sliding window for each dimension of the input 

1387 of the convolution. Must be in the same order as the dimension specified with 

1388 format. 

1389 padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`. 

1390 The type of padding algorithm to use. 

1391 use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. 

1392 explicit_paddings: An optional list of `ints`. Defaults to `[]`. 

1393 If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith 

1394 dimension, the amount of padding inserted before and after the dimension is 

1395 `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If 

1396 `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. 

1397 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

1398 Specify the data format of the input and output data. With the 

1399 default format "NHWC", the data is stored in the order of: 

1400 [batch, in_height, in_width, in_channels]. 

1401 Alternatively, the format could be "NCHW", the data storage order of: 

1402 [batch, in_channels, in_height, in_width]. 

1403 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

1404 1-D tensor of length 4. The dilation factor for each dimension of 

1405 `input`. If set to k > 1, there will be k-1 skipped cells between each filter 

1406 element on that dimension. The dimension order is determined by the value of 

1407 `data_format`, see above for details. Dilations in the batch and depth 

1408 dimensions must be 1. 

1409 name: A name for the operation (optional). 

1410 

1411 Returns: 

1412 A `Tensor`. Has the same type as `input`. 

1413 """ 

1414 _ctx = _context._context or _context.context() 

1415 tld = _ctx._thread_local_data 

1416 if tld.is_eager: 

1417 try: 

1418 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1419 _ctx, "Conv2DBackpropFilterV2", name, input, filter, out_backprop, 

1420 "strides", strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", 

1421 padding, "explicit_paddings", explicit_paddings, "data_format", 

1422 data_format, "dilations", dilations) 

1423 return _result 

1424 except _core._NotOkStatusException as e: 

1425 _ops.raise_from_not_ok_status(e, name) 

1426 except _core._FallbackException: 

1427 pass 

1428 try: 

1429 _result = _dispatcher_for_conv2d_backprop_filter_v2( 

1430 (input, filter, out_backprop, strides, padding, use_cudnn_on_gpu, 

1431 explicit_paddings, data_format, dilations, name,), None) 

1432 if _result is not NotImplemented: 

1433 return _result 

1434 return conv2d_backprop_filter_v2_eager_fallback( 

1435 input, filter, out_backprop, strides=strides, 

1436 use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, 

1437 explicit_paddings=explicit_paddings, data_format=data_format, 

1438 dilations=dilations, name=name, ctx=_ctx) 

1439 except _core._SymbolicException: 

1440 pass # Add nodes to the TensorFlow graph. 

1441 except (TypeError, ValueError): 

1442 _result = _dispatch.dispatch( 

1443 conv2d_backprop_filter_v2, (), dict(input=input, filter=filter, 

1444 out_backprop=out_backprop, 

1445 strides=strides, 

1446 padding=padding, 

1447 use_cudnn_on_gpu=use_cudnn_on_gpu, 

1448 explicit_paddings=explicit_paddings, 

1449 data_format=data_format, 

1450 dilations=dilations, 

1451 name=name) 

1452 ) 

1453 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

1454 return _result 

1455 raise 

1456 else: 

1457 _result = _dispatcher_for_conv2d_backprop_filter_v2( 

1458 (input, filter, out_backprop, strides, padding, use_cudnn_on_gpu, 

1459 explicit_paddings, data_format, dilations, name,), None) 

1460 if _result is not NotImplemented: 

1461 return _result 

1462 # Add nodes to the TensorFlow graph. 

1463 if not isinstance(strides, (list, tuple)): 

1464 raise TypeError( 

1465 "Expected list for 'strides' argument to " 

1466 "'conv2d_backprop_filter_v2' Op, not %r." % strides) 

1467 strides = [_execute.make_int(_i, "strides") for _i in strides] 

1468 padding = _execute.make_str(padding, "padding") 

1469 if use_cudnn_on_gpu is None: 

1470 use_cudnn_on_gpu = True 

1471 use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu") 

1472 if explicit_paddings is None: 

1473 explicit_paddings = [] 

1474 if not isinstance(explicit_paddings, (list, tuple)): 

1475 raise TypeError( 

1476 "Expected list for 'explicit_paddings' argument to " 

1477 "'conv2d_backprop_filter_v2' Op, not %r." % explicit_paddings) 

1478 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

1479 if data_format is None: 

1480 data_format = "NHWC" 

1481 data_format = _execute.make_str(data_format, "data_format") 

1482 if dilations is None: 

1483 dilations = [1, 1, 1, 1] 

1484 if not isinstance(dilations, (list, tuple)): 

1485 raise TypeError( 

1486 "Expected list for 'dilations' argument to " 

1487 "'conv2d_backprop_filter_v2' Op, not %r." % dilations) 

1488 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

1489 try: 

1490 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1491 "Conv2DBackpropFilterV2", input=input, filter=filter, 

1492 out_backprop=out_backprop, strides=strides, 

1493 padding=padding, 

1494 use_cudnn_on_gpu=use_cudnn_on_gpu, 

1495 explicit_paddings=explicit_paddings, 

1496 data_format=data_format, 

1497 dilations=dilations, name=name) 

1498 except (TypeError, ValueError): 

1499 _result = _dispatch.dispatch( 

1500 conv2d_backprop_filter_v2, (), dict(input=input, filter=filter, 

1501 out_backprop=out_backprop, 

1502 strides=strides, 

1503 padding=padding, 

1504 use_cudnn_on_gpu=use_cudnn_on_gpu, 

1505 explicit_paddings=explicit_paddings, 

1506 data_format=data_format, 

1507 dilations=dilations, name=name) 

1508 ) 

1509 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

1510 return _result 

1511 raise 

1512 _result = _outputs[:] 

1513 if _execute.must_record_gradient(): 

1514 _attrs = ("T", _op._get_attr_type("T"), "strides", 

1515 _op.get_attr("strides"), "use_cudnn_on_gpu", 

1516 _op._get_attr_bool("use_cudnn_on_gpu"), "padding", 

1517 _op.get_attr("padding"), "explicit_paddings", 

1518 _op.get_attr("explicit_paddings"), "data_format", 

1519 _op.get_attr("data_format"), "dilations", 

1520 _op.get_attr("dilations")) 

1521 _inputs_flat = _op.inputs 

1522 _execute.record_gradient( 

1523 "Conv2DBackpropFilterV2", _inputs_flat, _attrs, _result) 

1524 _result, = _result 

1525 return _result 

1526 

1527Conv2DBackpropFilterV2 = tf_export("raw_ops.Conv2DBackpropFilterV2")(_ops.to_raw_op(conv2d_backprop_filter_v2)) 

1528_dispatcher_for_conv2d_backprop_filter_v2 = conv2d_backprop_filter_v2._tf_type_based_dispatcher.Dispatch 

1529 

1530 

1531def conv2d_backprop_filter_v2_eager_fallback(input, filter, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name, ctx): 

1532 if not isinstance(strides, (list, tuple)): 

1533 raise TypeError( 

1534 "Expected list for 'strides' argument to " 

1535 "'conv2d_backprop_filter_v2' Op, not %r." % strides) 

1536 strides = [_execute.make_int(_i, "strides") for _i in strides] 

1537 padding = _execute.make_str(padding, "padding") 

1538 if use_cudnn_on_gpu is None: 

1539 use_cudnn_on_gpu = True 

1540 use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu") 

1541 if explicit_paddings is None: 

1542 explicit_paddings = [] 

1543 if not isinstance(explicit_paddings, (list, tuple)): 

1544 raise TypeError( 

1545 "Expected list for 'explicit_paddings' argument to " 

1546 "'conv2d_backprop_filter_v2' Op, not %r." % explicit_paddings) 

1547 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

1548 if data_format is None: 

1549 data_format = "NHWC" 

1550 data_format = _execute.make_str(data_format, "data_format") 

1551 if dilations is None: 

1552 dilations = [1, 1, 1, 1] 

1553 if not isinstance(dilations, (list, tuple)): 

1554 raise TypeError( 

1555 "Expected list for 'dilations' argument to " 

1556 "'conv2d_backprop_filter_v2' Op, not %r." % dilations) 

1557 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

1558 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

1559 (input, filter, out_backprop) = _inputs_T 

1560 _inputs_flat = [input, filter, out_backprop] 

1561 _attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu", 

1562 use_cudnn_on_gpu, "padding", padding, "explicit_paddings", 

1563 explicit_paddings, "data_format", data_format, "dilations", dilations) 

1564 _result = _execute.execute(b"Conv2DBackpropFilterV2", 1, 

1565 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

1566 name=name) 

1567 if _execute.must_record_gradient(): 

1568 _execute.record_gradient( 

1569 "Conv2DBackpropFilterV2", _inputs_flat, _attrs, _result) 

1570 _result, = _result 

1571 return _result 

1572 

1573 

1574def conv2d_backprop_input(input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None): 

1575 r"""Computes the gradients of convolution with respect to the input. 

1576 

1577 Args: 

1578 input_sizes: A `Tensor` of type `int32`. 

1579 An integer vector representing the shape of `input`, 

1580 where `input` is a 4-D `[batch, height, width, channels]` tensor. 

1581 filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`. 

1582 4-D with shape 

1583 `[filter_height, filter_width, in_channels, out_channels]`. 

1584 out_backprop: A `Tensor`. Must have the same type as `filter`. 

1585 4-D with shape `[batch, out_height, out_width, out_channels]`. 

1586 Gradients w.r.t. the output of the convolution. 

1587 strides: A list of `ints`. 

1588 The stride of the sliding window for each dimension of the input 

1589 of the convolution. Must be in the same order as the dimension specified with 

1590 format. 

1591 padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`. 

1592 The type of padding algorithm to use. 

1593 use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. 

1594 explicit_paddings: An optional list of `ints`. Defaults to `[]`. 

1595 If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith 

1596 dimension, the amount of padding inserted before and after the dimension is 

1597 `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If 

1598 `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. 

1599 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

1600 Specify the data format of the input and output data. With the 

1601 default format "NHWC", the data is stored in the order of: 

1602 [batch, in_height, in_width, in_channels]. 

1603 Alternatively, the format could be "NCHW", the data storage order of: 

1604 [batch, in_channels, in_height, in_width]. 

1605 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

1606 1-D tensor of length 4. The dilation factor for each dimension of 

1607 `input`. If set to k > 1, there will be k-1 skipped cells between each filter 

1608 element on that dimension. The dimension order is determined by the value of 

1609 `data_format`, see above for details. Dilations in the batch and depth 

1610 dimensions must be 1. 

1611 name: A name for the operation (optional). 

1612 

1613 Returns: 

1614 A `Tensor`. Has the same type as `filter`. 

1615 """ 

1616 _ctx = _context._context or _context.context() 

1617 tld = _ctx._thread_local_data 

1618 if tld.is_eager: 

1619 try: 

1620 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1621 _ctx, "Conv2DBackpropInput", name, input_sizes, filter, out_backprop, 

1622 "strides", strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", 

1623 padding, "explicit_paddings", explicit_paddings, "data_format", 

1624 data_format, "dilations", dilations) 

1625 return _result 

1626 except _core._NotOkStatusException as e: 

1627 _ops.raise_from_not_ok_status(e, name) 

1628 except _core._FallbackException: 

1629 pass 

1630 try: 

1631 return conv2d_backprop_input_eager_fallback( 

1632 input_sizes, filter, out_backprop, strides=strides, 

1633 use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, 

1634 explicit_paddings=explicit_paddings, data_format=data_format, 

1635 dilations=dilations, name=name, ctx=_ctx) 

1636 except _core._SymbolicException: 

1637 pass # Add nodes to the TensorFlow graph. 

1638 # Add nodes to the TensorFlow graph. 

1639 if not isinstance(strides, (list, tuple)): 

1640 raise TypeError( 

1641 "Expected list for 'strides' argument to " 

1642 "'conv2d_backprop_input' Op, not %r." % strides) 

1643 strides = [_execute.make_int(_i, "strides") for _i in strides] 

1644 padding = _execute.make_str(padding, "padding") 

1645 if use_cudnn_on_gpu is None: 

1646 use_cudnn_on_gpu = True 

1647 use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu") 

1648 if explicit_paddings is None: 

1649 explicit_paddings = [] 

1650 if not isinstance(explicit_paddings, (list, tuple)): 

1651 raise TypeError( 

1652 "Expected list for 'explicit_paddings' argument to " 

1653 "'conv2d_backprop_input' Op, not %r." % explicit_paddings) 

1654 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

1655 if data_format is None: 

1656 data_format = "NHWC" 

1657 data_format = _execute.make_str(data_format, "data_format") 

1658 if dilations is None: 

1659 dilations = [1, 1, 1, 1] 

1660 if not isinstance(dilations, (list, tuple)): 

1661 raise TypeError( 

1662 "Expected list for 'dilations' argument to " 

1663 "'conv2d_backprop_input' Op, not %r." % dilations) 

1664 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

1665 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1666 "Conv2DBackpropInput", input_sizes=input_sizes, filter=filter, 

1667 out_backprop=out_backprop, strides=strides, 

1668 padding=padding, 

1669 use_cudnn_on_gpu=use_cudnn_on_gpu, 

1670 explicit_paddings=explicit_paddings, 

1671 data_format=data_format, dilations=dilations, 

1672 name=name) 

1673 _result = _outputs[:] 

1674 if _execute.must_record_gradient(): 

1675 _attrs = ("T", _op._get_attr_type("T"), "strides", 

1676 _op.get_attr("strides"), "use_cudnn_on_gpu", 

1677 _op._get_attr_bool("use_cudnn_on_gpu"), "padding", 

1678 _op.get_attr("padding"), "explicit_paddings", 

1679 _op.get_attr("explicit_paddings"), "data_format", 

1680 _op.get_attr("data_format"), "dilations", 

1681 _op.get_attr("dilations")) 

1682 _inputs_flat = _op.inputs 

1683 _execute.record_gradient( 

1684 "Conv2DBackpropInput", _inputs_flat, _attrs, _result) 

1685 _result, = _result 

1686 return _result 

1687 

1688Conv2DBackpropInput = tf_export("raw_ops.Conv2DBackpropInput")(_ops.to_raw_op(conv2d_backprop_input)) 

1689 

1690 

1691def conv2d_backprop_input_eager_fallback(input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name, ctx): 

1692 if not isinstance(strides, (list, tuple)): 

1693 raise TypeError( 

1694 "Expected list for 'strides' argument to " 

1695 "'conv2d_backprop_input' Op, not %r." % strides) 

1696 strides = [_execute.make_int(_i, "strides") for _i in strides] 

1697 padding = _execute.make_str(padding, "padding") 

1698 if use_cudnn_on_gpu is None: 

1699 use_cudnn_on_gpu = True 

1700 use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu") 

1701 if explicit_paddings is None: 

1702 explicit_paddings = [] 

1703 if not isinstance(explicit_paddings, (list, tuple)): 

1704 raise TypeError( 

1705 "Expected list for 'explicit_paddings' argument to " 

1706 "'conv2d_backprop_input' Op, not %r." % explicit_paddings) 

1707 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

1708 if data_format is None: 

1709 data_format = "NHWC" 

1710 data_format = _execute.make_str(data_format, "data_format") 

1711 if dilations is None: 

1712 dilations = [1, 1, 1, 1] 

1713 if not isinstance(dilations, (list, tuple)): 

1714 raise TypeError( 

1715 "Expected list for 'dilations' argument to " 

1716 "'conv2d_backprop_input' Op, not %r." % dilations) 

1717 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

1718 _attr_T, _inputs_T = _execute.args_to_matching_eager([filter, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.int32, ]) 

1719 (filter, out_backprop) = _inputs_T 

1720 input_sizes = _ops.convert_to_tensor(input_sizes, _dtypes.int32) 

1721 _inputs_flat = [input_sizes, filter, out_backprop] 

1722 _attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu", 

1723 use_cudnn_on_gpu, "padding", padding, "explicit_paddings", 

1724 explicit_paddings, "data_format", data_format, "dilations", dilations) 

1725 _result = _execute.execute(b"Conv2DBackpropInput", 1, inputs=_inputs_flat, 

1726 attrs=_attrs, ctx=ctx, name=name) 

1727 if _execute.must_record_gradient(): 

1728 _execute.record_gradient( 

1729 "Conv2DBackpropInput", _inputs_flat, _attrs, _result) 

1730 _result, = _result 

1731 return _result 

1732 

1733 

1734@_dispatch.add_fallback_dispatch_list 

1735@_dispatch.add_type_based_api_dispatcher 

1736@tf_export('conv2d_backprop_input_v2') 

1737def conv2d_backprop_input_v2(input, filter, out_backprop, strides, padding, use_cudnn_on_gpu=True, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None): 

1738 r"""Computes the gradients of convolution with respect to the input. 

1739 

1740 Args: 

1741 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`. 

1742 4-D with shape `[batch, in_height, in_width, in_channels]`. 

1743 Only shape of tensor is used. 

1744 filter: A `Tensor`. Must have the same type as `input`. 4-D with shape 

1745 `[filter_height, filter_width, in_channels, out_channels]`. 

1746 out_backprop: A `Tensor`. Must have the same type as `input`. 

1747 4-D with shape `[batch, out_height, out_width, out_channels]`. 

1748 Gradients w.r.t. the output of the convolution. 

1749 strides: A list of `ints`. 

1750 The stride of the sliding window for each dimension of the input 

1751 of the convolution. Must be in the same order as the dimension specified with 

1752 format. 

1753 padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`. 

1754 The type of padding algorithm to use. 

1755 use_cudnn_on_gpu: An optional `bool`. Defaults to `True`. 

1756 explicit_paddings: An optional list of `ints`. Defaults to `[]`. 

1757 If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith 

1758 dimension, the amount of padding inserted before and after the dimension is 

1759 `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If 

1760 `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. 

1761 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

1762 Specify the data format of the input and output data. With the 

1763 default format "NHWC", the data is stored in the order of: 

1764 [batch, in_height, in_width, in_channels]. 

1765 Alternatively, the format could be "NCHW", the data storage order of: 

1766 [batch, in_channels, in_height, in_width]. 

1767 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

1768 1-D tensor of length 4. The dilation factor for each dimension of 

1769 `input`. If set to k > 1, there will be k-1 skipped cells between each filter 

1770 element on that dimension. The dimension order is determined by the value of 

1771 `data_format`, see above for details. Dilations in the batch and depth 

1772 dimensions must be 1. 

1773 name: A name for the operation (optional). 

1774 

1775 Returns: 

1776 A `Tensor`. Has the same type as `input`. 

1777 """ 

1778 _ctx = _context._context or _context.context() 

1779 tld = _ctx._thread_local_data 

1780 if tld.is_eager: 

1781 try: 

1782 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1783 _ctx, "Conv2DBackpropInputV2", name, input, filter, out_backprop, 

1784 "strides", strides, "use_cudnn_on_gpu", use_cudnn_on_gpu, "padding", 

1785 padding, "explicit_paddings", explicit_paddings, "data_format", 

1786 data_format, "dilations", dilations) 

1787 return _result 

1788 except _core._NotOkStatusException as e: 

1789 _ops.raise_from_not_ok_status(e, name) 

1790 except _core._FallbackException: 

1791 pass 

1792 try: 

1793 _result = _dispatcher_for_conv2d_backprop_input_v2( 

1794 (input, filter, out_backprop, strides, padding, use_cudnn_on_gpu, 

1795 explicit_paddings, data_format, dilations, name,), None) 

1796 if _result is not NotImplemented: 

1797 return _result 

1798 return conv2d_backprop_input_v2_eager_fallback( 

1799 input, filter, out_backprop, strides=strides, 

1800 use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, 

1801 explicit_paddings=explicit_paddings, data_format=data_format, 

1802 dilations=dilations, name=name, ctx=_ctx) 

1803 except _core._SymbolicException: 

1804 pass # Add nodes to the TensorFlow graph. 

1805 except (TypeError, ValueError): 

1806 _result = _dispatch.dispatch( 

1807 conv2d_backprop_input_v2, (), dict(input=input, filter=filter, 

1808 out_backprop=out_backprop, 

1809 strides=strides, 

1810 padding=padding, 

1811 use_cudnn_on_gpu=use_cudnn_on_gpu, 

1812 explicit_paddings=explicit_paddings, 

1813 data_format=data_format, 

1814 dilations=dilations, name=name) 

1815 ) 

1816 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

1817 return _result 

1818 raise 

1819 else: 

1820 _result = _dispatcher_for_conv2d_backprop_input_v2( 

1821 (input, filter, out_backprop, strides, padding, use_cudnn_on_gpu, 

1822 explicit_paddings, data_format, dilations, name,), None) 

1823 if _result is not NotImplemented: 

1824 return _result 

1825 # Add nodes to the TensorFlow graph. 

1826 if not isinstance(strides, (list, tuple)): 

1827 raise TypeError( 

1828 "Expected list for 'strides' argument to " 

1829 "'conv2d_backprop_input_v2' Op, not %r." % strides) 

1830 strides = [_execute.make_int(_i, "strides") for _i in strides] 

1831 padding = _execute.make_str(padding, "padding") 

1832 if use_cudnn_on_gpu is None: 

1833 use_cudnn_on_gpu = True 

1834 use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu") 

1835 if explicit_paddings is None: 

1836 explicit_paddings = [] 

1837 if not isinstance(explicit_paddings, (list, tuple)): 

1838 raise TypeError( 

1839 "Expected list for 'explicit_paddings' argument to " 

1840 "'conv2d_backprop_input_v2' Op, not %r." % explicit_paddings) 

1841 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

1842 if data_format is None: 

1843 data_format = "NHWC" 

1844 data_format = _execute.make_str(data_format, "data_format") 

1845 if dilations is None: 

1846 dilations = [1, 1, 1, 1] 

1847 if not isinstance(dilations, (list, tuple)): 

1848 raise TypeError( 

1849 "Expected list for 'dilations' argument to " 

1850 "'conv2d_backprop_input_v2' Op, not %r." % dilations) 

1851 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

1852 try: 

1853 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

1854 "Conv2DBackpropInputV2", input=input, filter=filter, 

1855 out_backprop=out_backprop, strides=strides, 

1856 padding=padding, 

1857 use_cudnn_on_gpu=use_cudnn_on_gpu, 

1858 explicit_paddings=explicit_paddings, 

1859 data_format=data_format, dilations=dilations, 

1860 name=name) 

1861 except (TypeError, ValueError): 

1862 _result = _dispatch.dispatch( 

1863 conv2d_backprop_input_v2, (), dict(input=input, filter=filter, 

1864 out_backprop=out_backprop, 

1865 strides=strides, padding=padding, 

1866 use_cudnn_on_gpu=use_cudnn_on_gpu, 

1867 explicit_paddings=explicit_paddings, 

1868 data_format=data_format, 

1869 dilations=dilations, name=name) 

1870 ) 

1871 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

1872 return _result 

1873 raise 

1874 _result = _outputs[:] 

1875 if _execute.must_record_gradient(): 

1876 _attrs = ("T", _op._get_attr_type("T"), "strides", 

1877 _op.get_attr("strides"), "use_cudnn_on_gpu", 

1878 _op._get_attr_bool("use_cudnn_on_gpu"), "padding", 

1879 _op.get_attr("padding"), "explicit_paddings", 

1880 _op.get_attr("explicit_paddings"), "data_format", 

1881 _op.get_attr("data_format"), "dilations", 

1882 _op.get_attr("dilations")) 

1883 _inputs_flat = _op.inputs 

1884 _execute.record_gradient( 

1885 "Conv2DBackpropInputV2", _inputs_flat, _attrs, _result) 

1886 _result, = _result 

1887 return _result 

1888 

1889Conv2DBackpropInputV2 = tf_export("raw_ops.Conv2DBackpropInputV2")(_ops.to_raw_op(conv2d_backprop_input_v2)) 

1890_dispatcher_for_conv2d_backprop_input_v2 = conv2d_backprop_input_v2._tf_type_based_dispatcher.Dispatch 

1891 

1892 

1893def conv2d_backprop_input_v2_eager_fallback(input, filter, out_backprop, strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, dilations, name, ctx): 

1894 if not isinstance(strides, (list, tuple)): 

1895 raise TypeError( 

1896 "Expected list for 'strides' argument to " 

1897 "'conv2d_backprop_input_v2' Op, not %r." % strides) 

1898 strides = [_execute.make_int(_i, "strides") for _i in strides] 

1899 padding = _execute.make_str(padding, "padding") 

1900 if use_cudnn_on_gpu is None: 

1901 use_cudnn_on_gpu = True 

1902 use_cudnn_on_gpu = _execute.make_bool(use_cudnn_on_gpu, "use_cudnn_on_gpu") 

1903 if explicit_paddings is None: 

1904 explicit_paddings = [] 

1905 if not isinstance(explicit_paddings, (list, tuple)): 

1906 raise TypeError( 

1907 "Expected list for 'explicit_paddings' argument to " 

1908 "'conv2d_backprop_input_v2' Op, not %r." % explicit_paddings) 

1909 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

1910 if data_format is None: 

1911 data_format = "NHWC" 

1912 data_format = _execute.make_str(data_format, "data_format") 

1913 if dilations is None: 

1914 dilations = [1, 1, 1, 1] 

1915 if not isinstance(dilations, (list, tuple)): 

1916 raise TypeError( 

1917 "Expected list for 'dilations' argument to " 

1918 "'conv2d_backprop_input_v2' Op, not %r." % dilations) 

1919 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

1920 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.int32, ]) 

1921 (input, filter, out_backprop) = _inputs_T 

1922 _inputs_flat = [input, filter, out_backprop] 

1923 _attrs = ("T", _attr_T, "strides", strides, "use_cudnn_on_gpu", 

1924 use_cudnn_on_gpu, "padding", padding, "explicit_paddings", 

1925 explicit_paddings, "data_format", data_format, "dilations", dilations) 

1926 _result = _execute.execute(b"Conv2DBackpropInputV2", 1, inputs=_inputs_flat, 

1927 attrs=_attrs, ctx=ctx, name=name) 

1928 if _execute.must_record_gradient(): 

1929 _execute.record_gradient( 

1930 "Conv2DBackpropInputV2", _inputs_flat, _attrs, _result) 

1931 _result, = _result 

1932 return _result 

1933 

1934 

1935def conv3d(input, filter, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None): 

1936 r"""Computes a 3-D convolution given 5-D `input` and `filter` tensors. 

1937 

1938 In signal processing, cross-correlation is a measure of similarity of 

1939 two waveforms as a function of a time-lag applied to one of them. This 

1940 is also known as a sliding dot product or sliding inner-product. 

1941 

1942 Our Conv3D implements a form of cross-correlation. 

1943 

1944 Args: 

1945 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

1946 Shape `[batch, in_depth, in_height, in_width, in_channels]`. 

1947 filter: A `Tensor`. Must have the same type as `input`. 

1948 Shape `[filter_depth, filter_height, filter_width, in_channels, 

1949 out_channels]`. `in_channels` must match between `input` and `filter`. 

1950 strides: A list of `ints` that has length `>= 5`. 

1951 1-D tensor of length 5. The stride of the sliding window for each 

1952 dimension of `input`. Must have `strides[0] = strides[4] = 1`. 

1953 padding: A `string` from: `"SAME", "VALID"`. 

1954 The type of padding algorithm to use. 

1955 data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`. 

1956 The data format of the input and output data. With the 

1957 default format "NDHWC", the data is stored in the order of: 

1958 [batch, in_depth, in_height, in_width, in_channels]. 

1959 Alternatively, the format could be "NCDHW", the data storage order is: 

1960 [batch, in_channels, in_depth, in_height, in_width]. 

1961 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`. 

1962 1-D tensor of length 5. The dilation factor for each dimension of 

1963 `input`. If set to k > 1, there will be k-1 skipped cells between each 

1964 filter element on that dimension. The dimension order is determined by the 

1965 value of `data_format`, see above for details. Dilations in the batch and 

1966 depth dimensions must be 1. 

1967 name: A name for the operation (optional). 

1968 

1969 Returns: 

1970 A `Tensor`. Has the same type as `input`. 

1971 """ 

1972 _ctx = _context._context or _context.context() 

1973 tld = _ctx._thread_local_data 

1974 if tld.is_eager: 

1975 try: 

1976 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

1977 _ctx, "Conv3D", name, input, filter, "strides", strides, "padding", 

1978 padding, "data_format", data_format, "dilations", dilations) 

1979 return _result 

1980 except _core._NotOkStatusException as e: 

1981 _ops.raise_from_not_ok_status(e, name) 

1982 except _core._FallbackException: 

1983 pass 

1984 try: 

1985 return conv3d_eager_fallback( 

1986 input, filter, strides=strides, padding=padding, 

1987 data_format=data_format, dilations=dilations, name=name, ctx=_ctx) 

1988 except _core._SymbolicException: 

1989 pass # Add nodes to the TensorFlow graph. 

1990 # Add nodes to the TensorFlow graph. 

1991 if not isinstance(strides, (list, tuple)): 

1992 raise TypeError( 

1993 "Expected list for 'strides' argument to " 

1994 "'conv3d' Op, not %r." % strides) 

1995 strides = [_execute.make_int(_i, "strides") for _i in strides] 

1996 padding = _execute.make_str(padding, "padding") 

1997 if data_format is None: 

1998 data_format = "NDHWC" 

1999 data_format = _execute.make_str(data_format, "data_format") 

2000 if dilations is None: 

2001 dilations = [1, 1, 1, 1, 1] 

2002 if not isinstance(dilations, (list, tuple)): 

2003 raise TypeError( 

2004 "Expected list for 'dilations' argument to " 

2005 "'conv3d' Op, not %r." % dilations) 

2006 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2007 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2008 "Conv3D", input=input, filter=filter, strides=strides, 

2009 padding=padding, data_format=data_format, 

2010 dilations=dilations, name=name) 

2011 _result = _outputs[:] 

2012 if _execute.must_record_gradient(): 

2013 _attrs = ("T", _op._get_attr_type("T"), "strides", 

2014 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

2015 "data_format", _op.get_attr("data_format"), "dilations", 

2016 _op.get_attr("dilations")) 

2017 _inputs_flat = _op.inputs 

2018 _execute.record_gradient( 

2019 "Conv3D", _inputs_flat, _attrs, _result) 

2020 _result, = _result 

2021 return _result 

2022 

2023Conv3D = tf_export("raw_ops.Conv3D")(_ops.to_raw_op(conv3d)) 

2024 

2025 

2026def conv3d_eager_fallback(input, filter, strides, padding, data_format, dilations, name, ctx): 

2027 if not isinstance(strides, (list, tuple)): 

2028 raise TypeError( 

2029 "Expected list for 'strides' argument to " 

2030 "'conv3d' Op, not %r." % strides) 

2031 strides = [_execute.make_int(_i, "strides") for _i in strides] 

2032 padding = _execute.make_str(padding, "padding") 

2033 if data_format is None: 

2034 data_format = "NDHWC" 

2035 data_format = _execute.make_str(data_format, "data_format") 

2036 if dilations is None: 

2037 dilations = [1, 1, 1, 1, 1] 

2038 if not isinstance(dilations, (list, tuple)): 

2039 raise TypeError( 

2040 "Expected list for 'dilations' argument to " 

2041 "'conv3d' Op, not %r." % dilations) 

2042 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2043 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

2044 (input, filter) = _inputs_T 

2045 _inputs_flat = [input, filter] 

2046 _attrs = ("T", _attr_T, "strides", strides, "padding", padding, 

2047 "data_format", data_format, "dilations", dilations) 

2048 _result = _execute.execute(b"Conv3D", 1, inputs=_inputs_flat, attrs=_attrs, 

2049 ctx=ctx, name=name) 

2050 if _execute.must_record_gradient(): 

2051 _execute.record_gradient( 

2052 "Conv3D", _inputs_flat, _attrs, _result) 

2053 _result, = _result 

2054 return _result 

2055 

2056 

2057def conv3d_backprop_filter(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None): 

2058 r"""Computes the gradients of 3-D convolution with respect to the filter. 

2059 

2060 Args: 

2061 input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. 

2062 Shape `[batch, depth, rows, cols, in_channels]`. 

2063 filter: A `Tensor`. Must have the same type as `input`. 

2064 Shape `[depth, rows, cols, in_channels, out_channels]`. 

2065 `in_channels` must match between `input` and `filter`. 

2066 out_backprop: A `Tensor`. Must have the same type as `input`. 

2067 Backprop signal of shape `[batch, out_depth, out_rows, out_cols, 

2068 out_channels]`. 

2069 strides: A list of `ints` that has length `>= 5`. 

2070 1-D tensor of length 5. The stride of the sliding window for each 

2071 dimension of `input`. Must have `strides[0] = strides[4] = 1`. 

2072 padding: A `string` from: `"SAME", "VALID"`. 

2073 The type of padding algorithm to use. 

2074 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`. 

2075 name: A name for the operation (optional). 

2076 

2077 Returns: 

2078 A `Tensor`. Has the same type as `input`. 

2079 """ 

2080 _ctx = _context._context or _context.context() 

2081 tld = _ctx._thread_local_data 

2082 if tld.is_eager: 

2083 try: 

2084 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2085 _ctx, "Conv3DBackpropFilter", name, input, filter, out_backprop, 

2086 "strides", strides, "padding", padding, "dilations", dilations) 

2087 return _result 

2088 except _core._NotOkStatusException as e: 

2089 _ops.raise_from_not_ok_status(e, name) 

2090 except _core._FallbackException: 

2091 pass 

2092 try: 

2093 return conv3d_backprop_filter_eager_fallback( 

2094 input, filter, out_backprop, strides=strides, padding=padding, 

2095 dilations=dilations, name=name, ctx=_ctx) 

2096 except _core._SymbolicException: 

2097 pass # Add nodes to the TensorFlow graph. 

2098 # Add nodes to the TensorFlow graph. 

2099 if not isinstance(strides, (list, tuple)): 

2100 raise TypeError( 

2101 "Expected list for 'strides' argument to " 

2102 "'conv3d_backprop_filter' Op, not %r." % strides) 

2103 strides = [_execute.make_int(_i, "strides") for _i in strides] 

2104 padding = _execute.make_str(padding, "padding") 

2105 if dilations is None: 

2106 dilations = [1, 1, 1, 1, 1] 

2107 if not isinstance(dilations, (list, tuple)): 

2108 raise TypeError( 

2109 "Expected list for 'dilations' argument to " 

2110 "'conv3d_backprop_filter' Op, not %r." % dilations) 

2111 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2112 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2113 "Conv3DBackpropFilter", input=input, filter=filter, 

2114 out_backprop=out_backprop, strides=strides, 

2115 padding=padding, dilations=dilations, 

2116 name=name) 

2117 _result = _outputs[:] 

2118 if _execute.must_record_gradient(): 

2119 _attrs = ("T", _op._get_attr_type("T"), "strides", 

2120 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

2121 "dilations", _op.get_attr("dilations")) 

2122 _inputs_flat = _op.inputs 

2123 _execute.record_gradient( 

2124 "Conv3DBackpropFilter", _inputs_flat, _attrs, _result) 

2125 _result, = _result 

2126 return _result 

2127 

2128Conv3DBackpropFilter = tf_export("raw_ops.Conv3DBackpropFilter")(_ops.to_raw_op(conv3d_backprop_filter)) 

2129 

2130 

2131def conv3d_backprop_filter_eager_fallback(input, filter, out_backprop, strides, padding, dilations, name, ctx): 

2132 if not isinstance(strides, (list, tuple)): 

2133 raise TypeError( 

2134 "Expected list for 'strides' argument to " 

2135 "'conv3d_backprop_filter' Op, not %r." % strides) 

2136 strides = [_execute.make_int(_i, "strides") for _i in strides] 

2137 padding = _execute.make_str(padding, "padding") 

2138 if dilations is None: 

2139 dilations = [1, 1, 1, 1, 1] 

2140 if not isinstance(dilations, (list, tuple)): 

2141 raise TypeError( 

2142 "Expected list for 'dilations' argument to " 

2143 "'conv3d_backprop_filter' Op, not %r." % dilations) 

2144 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2145 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

2146 (input, filter, out_backprop) = _inputs_T 

2147 _inputs_flat = [input, filter, out_backprop] 

2148 _attrs = ("T", _attr_T, "strides", strides, "padding", padding, "dilations", 

2149 dilations) 

2150 _result = _execute.execute(b"Conv3DBackpropFilter", 1, inputs=_inputs_flat, 

2151 attrs=_attrs, ctx=ctx, name=name) 

2152 if _execute.must_record_gradient(): 

2153 _execute.record_gradient( 

2154 "Conv3DBackpropFilter", _inputs_flat, _attrs, _result) 

2155 _result, = _result 

2156 return _result 

2157 

2158 

2159@_dispatch.add_fallback_dispatch_list 

2160@_dispatch.add_type_based_api_dispatcher 

2161@tf_export(v1=['nn.conv3d_backprop_filter', 'nn.conv3d_backprop_filter_v2']) 

2162@deprecated_endpoints('nn.conv3d_backprop_filter', 'nn.conv3d_backprop_filter_v2') 

2163def conv3d_backprop_filter_v2(input, filter_sizes, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None): 

2164 r"""Computes the gradients of 3-D convolution with respect to the filter. 

2165 

2166 Args: 

2167 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

2168 Shape `[batch, depth, rows, cols, in_channels]`. 

2169 filter_sizes: A `Tensor` of type `int32`. 

2170 An integer vector representing the tensor shape of `filter`, 

2171 where `filter` is a 5-D 

2172 `[filter_depth, filter_height, filter_width, in_channels, out_channels]` 

2173 tensor. 

2174 out_backprop: A `Tensor`. Must have the same type as `input`. 

2175 Backprop signal of shape `[batch, out_depth, out_rows, out_cols, 

2176 out_channels]`. 

2177 strides: A list of `ints` that has length `>= 5`. 

2178 1-D tensor of length 5. The stride of the sliding window for each 

2179 dimension of `input`. Must have `strides[0] = strides[4] = 1`. 

2180 padding: A `string` from: `"SAME", "VALID"`. 

2181 The type of padding algorithm to use. 

2182 data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`. 

2183 The data format of the input and output data. With the 

2184 default format "NDHWC", the data is stored in the order of: 

2185 [batch, in_depth, in_height, in_width, in_channels]. 

2186 Alternatively, the format could be "NCDHW", the data storage order is: 

2187 [batch, in_channels, in_depth, in_height, in_width]. 

2188 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`. 

2189 1-D tensor of length 5. The dilation factor for each dimension of 

2190 `input`. If set to k > 1, there will be k-1 skipped cells between each 

2191 filter element on that dimension. The dimension order is determined by the 

2192 value of `data_format`, see above for details. Dilations in the batch and 

2193 depth dimensions must be 1. 

2194 name: A name for the operation (optional). 

2195 

2196 Returns: 

2197 A `Tensor`. Has the same type as `input`. 

2198 """ 

2199 _ctx = _context._context or _context.context() 

2200 tld = _ctx._thread_local_data 

2201 if tld.is_eager: 

2202 try: 

2203 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2204 _ctx, "Conv3DBackpropFilterV2", name, input, filter_sizes, 

2205 out_backprop, "strides", strides, "padding", padding, "data_format", 

2206 data_format, "dilations", dilations) 

2207 return _result 

2208 except _core._NotOkStatusException as e: 

2209 _ops.raise_from_not_ok_status(e, name) 

2210 except _core._FallbackException: 

2211 pass 

2212 try: 

2213 _result = _dispatcher_for_conv3d_backprop_filter_v2( 

2214 (input, filter_sizes, out_backprop, strides, padding, data_format, 

2215 dilations, name,), None) 

2216 if _result is not NotImplemented: 

2217 return _result 

2218 return conv3d_backprop_filter_v2_eager_fallback( 

2219 input, filter_sizes, out_backprop, strides=strides, padding=padding, 

2220 data_format=data_format, dilations=dilations, name=name, ctx=_ctx) 

2221 except _core._SymbolicException: 

2222 pass # Add nodes to the TensorFlow graph. 

2223 except (TypeError, ValueError): 

2224 _result = _dispatch.dispatch( 

2225 conv3d_backprop_filter_v2, (), dict(input=input, 

2226 filter_sizes=filter_sizes, 

2227 out_backprop=out_backprop, 

2228 strides=strides, 

2229 padding=padding, 

2230 data_format=data_format, 

2231 dilations=dilations, 

2232 name=name) 

2233 ) 

2234 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

2235 return _result 

2236 raise 

2237 else: 

2238 _result = _dispatcher_for_conv3d_backprop_filter_v2( 

2239 (input, filter_sizes, out_backprop, strides, padding, data_format, 

2240 dilations, name,), None) 

2241 if _result is not NotImplemented: 

2242 return _result 

2243 # Add nodes to the TensorFlow graph. 

2244 if not isinstance(strides, (list, tuple)): 

2245 raise TypeError( 

2246 "Expected list for 'strides' argument to " 

2247 "'conv3d_backprop_filter_v2' Op, not %r." % strides) 

2248 strides = [_execute.make_int(_i, "strides") for _i in strides] 

2249 padding = _execute.make_str(padding, "padding") 

2250 if data_format is None: 

2251 data_format = "NDHWC" 

2252 data_format = _execute.make_str(data_format, "data_format") 

2253 if dilations is None: 

2254 dilations = [1, 1, 1, 1, 1] 

2255 if not isinstance(dilations, (list, tuple)): 

2256 raise TypeError( 

2257 "Expected list for 'dilations' argument to " 

2258 "'conv3d_backprop_filter_v2' Op, not %r." % dilations) 

2259 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2260 try: 

2261 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2262 "Conv3DBackpropFilterV2", input=input, filter_sizes=filter_sizes, 

2263 out_backprop=out_backprop, strides=strides, 

2264 padding=padding, data_format=data_format, 

2265 dilations=dilations, name=name) 

2266 except (TypeError, ValueError): 

2267 _result = _dispatch.dispatch( 

2268 conv3d_backprop_filter_v2, (), dict(input=input, 

2269 filter_sizes=filter_sizes, 

2270 out_backprop=out_backprop, 

2271 strides=strides, 

2272 padding=padding, 

2273 data_format=data_format, 

2274 dilations=dilations, name=name) 

2275 ) 

2276 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

2277 return _result 

2278 raise 

2279 _result = _outputs[:] 

2280 if _execute.must_record_gradient(): 

2281 _attrs = ("T", _op._get_attr_type("T"), "strides", 

2282 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

2283 "data_format", _op.get_attr("data_format"), "dilations", 

2284 _op.get_attr("dilations")) 

2285 _inputs_flat = _op.inputs 

2286 _execute.record_gradient( 

2287 "Conv3DBackpropFilterV2", _inputs_flat, _attrs, _result) 

2288 _result, = _result 

2289 return _result 

2290 

2291Conv3DBackpropFilterV2 = tf_export("raw_ops.Conv3DBackpropFilterV2")(_ops.to_raw_op(conv3d_backprop_filter_v2)) 

2292_dispatcher_for_conv3d_backprop_filter_v2 = conv3d_backprop_filter_v2._tf_type_based_dispatcher.Dispatch 

2293 

2294 

2295def conv3d_backprop_filter_v2_eager_fallback(input, filter_sizes, out_backprop, strides, padding, data_format, dilations, name, ctx): 

2296 if not isinstance(strides, (list, tuple)): 

2297 raise TypeError( 

2298 "Expected list for 'strides' argument to " 

2299 "'conv3d_backprop_filter_v2' Op, not %r." % strides) 

2300 strides = [_execute.make_int(_i, "strides") for _i in strides] 

2301 padding = _execute.make_str(padding, "padding") 

2302 if data_format is None: 

2303 data_format = "NDHWC" 

2304 data_format = _execute.make_str(data_format, "data_format") 

2305 if dilations is None: 

2306 dilations = [1, 1, 1, 1, 1] 

2307 if not isinstance(dilations, (list, tuple)): 

2308 raise TypeError( 

2309 "Expected list for 'dilations' argument to " 

2310 "'conv3d_backprop_filter_v2' Op, not %r." % dilations) 

2311 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2312 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

2313 (input, out_backprop) = _inputs_T 

2314 filter_sizes = _ops.convert_to_tensor(filter_sizes, _dtypes.int32) 

2315 _inputs_flat = [input, filter_sizes, out_backprop] 

2316 _attrs = ("T", _attr_T, "strides", strides, "padding", padding, 

2317 "data_format", data_format, "dilations", dilations) 

2318 _result = _execute.execute(b"Conv3DBackpropFilterV2", 1, 

2319 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

2320 name=name) 

2321 if _execute.must_record_gradient(): 

2322 _execute.record_gradient( 

2323 "Conv3DBackpropFilterV2", _inputs_flat, _attrs, _result) 

2324 _result, = _result 

2325 return _result 

2326 

2327 

2328def conv3d_backprop_input(input, filter, out_backprop, strides, padding, dilations=[1, 1, 1, 1, 1], name=None): 

2329 r"""Computes the gradients of 3-D convolution with respect to the input. 

2330 

2331 Args: 

2332 input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. 

2333 Shape `[batch, depth, rows, cols, in_channels]`. 

2334 filter: A `Tensor`. Must have the same type as `input`. 

2335 Shape `[depth, rows, cols, in_channels, out_channels]`. 

2336 `in_channels` must match between `input` and `filter`. 

2337 out_backprop: A `Tensor`. Must have the same type as `input`. 

2338 Backprop signal of shape `[batch, out_depth, out_rows, out_cols, 

2339 out_channels]`. 

2340 strides: A list of `ints` that has length `>= 5`. 

2341 1-D tensor of length 5. The stride of the sliding window for each 

2342 dimension of `input`. Must have `strides[0] = strides[4] = 1`. 

2343 padding: A `string` from: `"SAME", "VALID"`. 

2344 The type of padding algorithm to use. 

2345 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`. 

2346 name: A name for the operation (optional). 

2347 

2348 Returns: 

2349 A `Tensor`. Has the same type as `input`. 

2350 """ 

2351 _ctx = _context._context or _context.context() 

2352 tld = _ctx._thread_local_data 

2353 if tld.is_eager: 

2354 try: 

2355 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2356 _ctx, "Conv3DBackpropInput", name, input, filter, out_backprop, 

2357 "strides", strides, "padding", padding, "dilations", dilations) 

2358 return _result 

2359 except _core._NotOkStatusException as e: 

2360 _ops.raise_from_not_ok_status(e, name) 

2361 except _core._FallbackException: 

2362 pass 

2363 try: 

2364 return conv3d_backprop_input_eager_fallback( 

2365 input, filter, out_backprop, strides=strides, padding=padding, 

2366 dilations=dilations, name=name, ctx=_ctx) 

2367 except _core._SymbolicException: 

2368 pass # Add nodes to the TensorFlow graph. 

2369 # Add nodes to the TensorFlow graph. 

2370 if not isinstance(strides, (list, tuple)): 

2371 raise TypeError( 

2372 "Expected list for 'strides' argument to " 

2373 "'conv3d_backprop_input' Op, not %r." % strides) 

2374 strides = [_execute.make_int(_i, "strides") for _i in strides] 

2375 padding = _execute.make_str(padding, "padding") 

2376 if dilations is None: 

2377 dilations = [1, 1, 1, 1, 1] 

2378 if not isinstance(dilations, (list, tuple)): 

2379 raise TypeError( 

2380 "Expected list for 'dilations' argument to " 

2381 "'conv3d_backprop_input' Op, not %r." % dilations) 

2382 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2383 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2384 "Conv3DBackpropInput", input=input, filter=filter, 

2385 out_backprop=out_backprop, strides=strides, 

2386 padding=padding, dilations=dilations, 

2387 name=name) 

2388 _result = _outputs[:] 

2389 if _execute.must_record_gradient(): 

2390 _attrs = ("T", _op._get_attr_type("T"), "strides", 

2391 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

2392 "dilations", _op.get_attr("dilations")) 

2393 _inputs_flat = _op.inputs 

2394 _execute.record_gradient( 

2395 "Conv3DBackpropInput", _inputs_flat, _attrs, _result) 

2396 _result, = _result 

2397 return _result 

2398 

2399Conv3DBackpropInput = tf_export("raw_ops.Conv3DBackpropInput")(_ops.to_raw_op(conv3d_backprop_input)) 

2400 

2401 

2402def conv3d_backprop_input_eager_fallback(input, filter, out_backprop, strides, padding, dilations, name, ctx): 

2403 if not isinstance(strides, (list, tuple)): 

2404 raise TypeError( 

2405 "Expected list for 'strides' argument to " 

2406 "'conv3d_backprop_input' Op, not %r." % strides) 

2407 strides = [_execute.make_int(_i, "strides") for _i in strides] 

2408 padding = _execute.make_str(padding, "padding") 

2409 if dilations is None: 

2410 dilations = [1, 1, 1, 1, 1] 

2411 if not isinstance(dilations, (list, tuple)): 

2412 raise TypeError( 

2413 "Expected list for 'dilations' argument to " 

2414 "'conv3d_backprop_input' Op, not %r." % dilations) 

2415 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2416 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

2417 (input, filter, out_backprop) = _inputs_T 

2418 _inputs_flat = [input, filter, out_backprop] 

2419 _attrs = ("T", _attr_T, "strides", strides, "padding", padding, "dilations", 

2420 dilations) 

2421 _result = _execute.execute(b"Conv3DBackpropInput", 1, inputs=_inputs_flat, 

2422 attrs=_attrs, ctx=ctx, name=name) 

2423 if _execute.must_record_gradient(): 

2424 _execute.record_gradient( 

2425 "Conv3DBackpropInput", _inputs_flat, _attrs, _result) 

2426 _result, = _result 

2427 return _result 

2428 

2429 

2430def conv3d_backprop_input_v2(input_sizes, filter, out_backprop, strides, padding, data_format="NDHWC", dilations=[1, 1, 1, 1, 1], name=None): 

2431 r"""Computes the gradients of 3-D convolution with respect to the input. 

2432 

2433 Args: 

2434 input_sizes: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

2435 An integer vector representing the tensor shape of `input`, 

2436 where `input` is a 5-D 

2437 `[batch, depth, rows, cols, in_channels]` tensor. 

2438 filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

2439 Shape `[depth, rows, cols, in_channels, out_channels]`. 

2440 `in_channels` must match between `input` and `filter`. 

2441 out_backprop: A `Tensor`. Must have the same type as `filter`. 

2442 Backprop signal of shape `[batch, out_depth, out_rows, out_cols, 

2443 out_channels]`. 

2444 strides: A list of `ints` that has length `>= 5`. 

2445 1-D tensor of length 5. The stride of the sliding window for each 

2446 dimension of `input`. Must have `strides[0] = strides[4] = 1`. 

2447 padding: A `string` from: `"SAME", "VALID"`. 

2448 The type of padding algorithm to use. 

2449 data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`. 

2450 The data format of the input and output data. With the 

2451 default format "NDHWC", the data is stored in the order of: 

2452 [batch, in_depth, in_height, in_width, in_channels]. 

2453 Alternatively, the format could be "NCDHW", the data storage order is: 

2454 [batch, in_channels, in_depth, in_height, in_width]. 

2455 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1, 1]`. 

2456 1-D tensor of length 5. The dilation factor for each dimension of 

2457 `input`. If set to k > 1, there will be k-1 skipped cells between each 

2458 filter element on that dimension. The dimension order is determined by the 

2459 value of `data_format`, see above for details. Dilations in the batch and 

2460 depth dimensions must be 1. 

2461 name: A name for the operation (optional). 

2462 

2463 Returns: 

2464 A `Tensor`. Has the same type as `filter`. 

2465 """ 

2466 _ctx = _context._context or _context.context() 

2467 tld = _ctx._thread_local_data 

2468 if tld.is_eager: 

2469 try: 

2470 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2471 _ctx, "Conv3DBackpropInputV2", name, input_sizes, filter, 

2472 out_backprop, "strides", strides, "padding", padding, "data_format", 

2473 data_format, "dilations", dilations) 

2474 return _result 

2475 except _core._NotOkStatusException as e: 

2476 _ops.raise_from_not_ok_status(e, name) 

2477 except _core._FallbackException: 

2478 pass 

2479 try: 

2480 return conv3d_backprop_input_v2_eager_fallback( 

2481 input_sizes, filter, out_backprop, strides=strides, padding=padding, 

2482 data_format=data_format, dilations=dilations, name=name, ctx=_ctx) 

2483 except _core._SymbolicException: 

2484 pass # Add nodes to the TensorFlow graph. 

2485 # Add nodes to the TensorFlow graph. 

2486 if not isinstance(strides, (list, tuple)): 

2487 raise TypeError( 

2488 "Expected list for 'strides' argument to " 

2489 "'conv3d_backprop_input_v2' Op, not %r." % strides) 

2490 strides = [_execute.make_int(_i, "strides") for _i in strides] 

2491 padding = _execute.make_str(padding, "padding") 

2492 if data_format is None: 

2493 data_format = "NDHWC" 

2494 data_format = _execute.make_str(data_format, "data_format") 

2495 if dilations is None: 

2496 dilations = [1, 1, 1, 1, 1] 

2497 if not isinstance(dilations, (list, tuple)): 

2498 raise TypeError( 

2499 "Expected list for 'dilations' argument to " 

2500 "'conv3d_backprop_input_v2' Op, not %r." % dilations) 

2501 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2502 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2503 "Conv3DBackpropInputV2", input_sizes=input_sizes, filter=filter, 

2504 out_backprop=out_backprop, strides=strides, 

2505 padding=padding, data_format=data_format, 

2506 dilations=dilations, name=name) 

2507 _result = _outputs[:] 

2508 if _execute.must_record_gradient(): 

2509 _attrs = ("T", _op._get_attr_type("T"), "strides", 

2510 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

2511 "data_format", _op.get_attr("data_format"), "dilations", 

2512 _op.get_attr("dilations"), "Tshape", 

2513 _op._get_attr_type("Tshape")) 

2514 _inputs_flat = _op.inputs 

2515 _execute.record_gradient( 

2516 "Conv3DBackpropInputV2", _inputs_flat, _attrs, _result) 

2517 _result, = _result 

2518 return _result 

2519 

2520Conv3DBackpropInputV2 = tf_export("raw_ops.Conv3DBackpropInputV2")(_ops.to_raw_op(conv3d_backprop_input_v2)) 

2521 

2522 

2523def conv3d_backprop_input_v2_eager_fallback(input_sizes, filter, out_backprop, strides, padding, data_format, dilations, name, ctx): 

2524 if not isinstance(strides, (list, tuple)): 

2525 raise TypeError( 

2526 "Expected list for 'strides' argument to " 

2527 "'conv3d_backprop_input_v2' Op, not %r." % strides) 

2528 strides = [_execute.make_int(_i, "strides") for _i in strides] 

2529 padding = _execute.make_str(padding, "padding") 

2530 if data_format is None: 

2531 data_format = "NDHWC" 

2532 data_format = _execute.make_str(data_format, "data_format") 

2533 if dilations is None: 

2534 dilations = [1, 1, 1, 1, 1] 

2535 if not isinstance(dilations, (list, tuple)): 

2536 raise TypeError( 

2537 "Expected list for 'dilations' argument to " 

2538 "'conv3d_backprop_input_v2' Op, not %r." % dilations) 

2539 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2540 _attr_T, _inputs_T = _execute.args_to_matching_eager([filter, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

2541 (filter, out_backprop) = _inputs_T 

2542 _attr_Tshape, (input_sizes,) = _execute.args_to_matching_eager([input_sizes], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

2543 _inputs_flat = [input_sizes, filter, out_backprop] 

2544 _attrs = ("T", _attr_T, "strides", strides, "padding", padding, 

2545 "data_format", data_format, "dilations", dilations, "Tshape", _attr_Tshape) 

2546 _result = _execute.execute(b"Conv3DBackpropInputV2", 1, inputs=_inputs_flat, 

2547 attrs=_attrs, ctx=ctx, name=name) 

2548 if _execute.must_record_gradient(): 

2549 _execute.record_gradient( 

2550 "Conv3DBackpropInputV2", _inputs_flat, _attrs, _result) 

2551 _result, = _result 

2552 return _result 

2553 

2554 

2555def data_format_dim_map(x, src_format="NHWC", dst_format="NCHW", name=None): 

2556 r"""Returns the dimension index in the destination data format given the one in 

2557 

2558 the source data format. 

2559 

2560 Args: 

2561 x: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

2562 A Tensor with each element as a dimension index in source data format. 

2563 Must be in the range [-4, 4). 

2564 src_format: An optional `string`. Defaults to `"NHWC"`. 

2565 source data format. 

2566 dst_format: An optional `string`. Defaults to `"NCHW"`. 

2567 destination data format. 

2568 name: A name for the operation (optional). 

2569 

2570 Returns: 

2571 A `Tensor`. Has the same type as `x`. 

2572 """ 

2573 _ctx = _context._context or _context.context() 

2574 tld = _ctx._thread_local_data 

2575 if tld.is_eager: 

2576 try: 

2577 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2578 _ctx, "DataFormatDimMap", name, x, "src_format", src_format, 

2579 "dst_format", dst_format) 

2580 return _result 

2581 except _core._NotOkStatusException as e: 

2582 _ops.raise_from_not_ok_status(e, name) 

2583 except _core._FallbackException: 

2584 pass 

2585 try: 

2586 return data_format_dim_map_eager_fallback( 

2587 x, src_format=src_format, dst_format=dst_format, name=name, 

2588 ctx=_ctx) 

2589 except _core._SymbolicException: 

2590 pass # Add nodes to the TensorFlow graph. 

2591 # Add nodes to the TensorFlow graph. 

2592 if src_format is None: 

2593 src_format = "NHWC" 

2594 src_format = _execute.make_str(src_format, "src_format") 

2595 if dst_format is None: 

2596 dst_format = "NCHW" 

2597 dst_format = _execute.make_str(dst_format, "dst_format") 

2598 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2599 "DataFormatDimMap", x=x, src_format=src_format, dst_format=dst_format, 

2600 name=name) 

2601 _result = _outputs[:] 

2602 if _execute.must_record_gradient(): 

2603 _attrs = ("T", _op._get_attr_type("T"), "src_format", 

2604 _op.get_attr("src_format"), "dst_format", 

2605 _op.get_attr("dst_format")) 

2606 _inputs_flat = _op.inputs 

2607 _execute.record_gradient( 

2608 "DataFormatDimMap", _inputs_flat, _attrs, _result) 

2609 _result, = _result 

2610 return _result 

2611 

2612DataFormatDimMap = tf_export("raw_ops.DataFormatDimMap")(_ops.to_raw_op(data_format_dim_map)) 

2613 

2614 

2615def data_format_dim_map_eager_fallback(x, src_format, dst_format, name, ctx): 

2616 if src_format is None: 

2617 src_format = "NHWC" 

2618 src_format = _execute.make_str(src_format, "src_format") 

2619 if dst_format is None: 

2620 dst_format = "NCHW" 

2621 dst_format = _execute.make_str(dst_format, "dst_format") 

2622 _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

2623 _inputs_flat = [x] 

2624 _attrs = ("T", _attr_T, "src_format", src_format, "dst_format", dst_format) 

2625 _result = _execute.execute(b"DataFormatDimMap", 1, inputs=_inputs_flat, 

2626 attrs=_attrs, ctx=ctx, name=name) 

2627 if _execute.must_record_gradient(): 

2628 _execute.record_gradient( 

2629 "DataFormatDimMap", _inputs_flat, _attrs, _result) 

2630 _result, = _result 

2631 return _result 

2632 

2633 

2634def data_format_vec_permute(x, src_format="NHWC", dst_format="NCHW", name=None): 

2635 r"""Permute input tensor from `src_format` to `dst_format`. 

2636 

2637 Given source and destination format strings of length n=4 or 5, the input 

2638 tensor must be a vector of size n or n-2, or a 2D tensor of shape 

2639 (n, 2) or (n-2, 2). 

2640 

2641 If the first dimension of the input tensor is n-2, it is assumed that 

2642 non-spatial dimensions are omitted (i.e `N`, `C`). 

2643 

2644 For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and input: 

2645 ``` 

2646 [1, 2, 3, 4] 

2647 ``` 

2648 , the output will be: 

2649 ``` 

2650 [1, 4, 2, 3] 

2651 ``` 

2652 With `src_format` of `NDHWC`, `dst_format` of `NCDHW`, and input: 

2653 ``` 

2654 [[1, 6], [2, 7], [3, 8], [4, 9], [5, 10]] 

2655 ``` 

2656 , the output will be: 

2657 ``` 

2658 [[1, 6], [5, 10], [2, 7], [3, 8], [4, 9]] 

2659 ``` 

2660 With `src_format` of `NHWC`, `dst_format` of `NCHW`, and input: 

2661 ``` 

2662 [1, 2] 

2663 ``` 

2664 , the output will be: 

2665 ``` 

2666 [1, 2] 

2667 ``` 

2668 

2669 Args: 

2670 x: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

2671 Tensor of rank 1 or 2 in source data format. 

2672 src_format: An optional `string`. Defaults to `"NHWC"`. 

2673 source data format. 

2674 dst_format: An optional `string`. Defaults to `"NCHW"`. 

2675 destination data format. 

2676 name: A name for the operation (optional). 

2677 

2678 Returns: 

2679 A `Tensor`. Has the same type as `x`. 

2680 """ 

2681 _ctx = _context._context or _context.context() 

2682 tld = _ctx._thread_local_data 

2683 if tld.is_eager: 

2684 try: 

2685 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2686 _ctx, "DataFormatVecPermute", name, x, "src_format", src_format, 

2687 "dst_format", dst_format) 

2688 return _result 

2689 except _core._NotOkStatusException as e: 

2690 _ops.raise_from_not_ok_status(e, name) 

2691 except _core._FallbackException: 

2692 pass 

2693 try: 

2694 return data_format_vec_permute_eager_fallback( 

2695 x, src_format=src_format, dst_format=dst_format, name=name, 

2696 ctx=_ctx) 

2697 except _core._SymbolicException: 

2698 pass # Add nodes to the TensorFlow graph. 

2699 # Add nodes to the TensorFlow graph. 

2700 if src_format is None: 

2701 src_format = "NHWC" 

2702 src_format = _execute.make_str(src_format, "src_format") 

2703 if dst_format is None: 

2704 dst_format = "NCHW" 

2705 dst_format = _execute.make_str(dst_format, "dst_format") 

2706 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2707 "DataFormatVecPermute", x=x, src_format=src_format, 

2708 dst_format=dst_format, name=name) 

2709 _result = _outputs[:] 

2710 if _execute.must_record_gradient(): 

2711 _attrs = ("T", _op._get_attr_type("T"), "src_format", 

2712 _op.get_attr("src_format"), "dst_format", 

2713 _op.get_attr("dst_format")) 

2714 _inputs_flat = _op.inputs 

2715 _execute.record_gradient( 

2716 "DataFormatVecPermute", _inputs_flat, _attrs, _result) 

2717 _result, = _result 

2718 return _result 

2719 

2720DataFormatVecPermute = tf_export("raw_ops.DataFormatVecPermute")(_ops.to_raw_op(data_format_vec_permute)) 

2721 

2722 

2723def data_format_vec_permute_eager_fallback(x, src_format, dst_format, name, ctx): 

2724 if src_format is None: 

2725 src_format = "NHWC" 

2726 src_format = _execute.make_str(src_format, "src_format") 

2727 if dst_format is None: 

2728 dst_format = "NCHW" 

2729 dst_format = _execute.make_str(dst_format, "dst_format") 

2730 _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

2731 _inputs_flat = [x] 

2732 _attrs = ("T", _attr_T, "src_format", src_format, "dst_format", dst_format) 

2733 _result = _execute.execute(b"DataFormatVecPermute", 1, inputs=_inputs_flat, 

2734 attrs=_attrs, ctx=ctx, name=name) 

2735 if _execute.must_record_gradient(): 

2736 _execute.record_gradient( 

2737 "DataFormatVecPermute", _inputs_flat, _attrs, _result) 

2738 _result, = _result 

2739 return _result 

2740 

2741 

2742def depthwise_conv2d_native(input, filter, strides, padding, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None): 

2743 r"""Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. 

2744 

2745 Given an input tensor of shape `[batch, in_height, in_width, in_channels]` 

2746 and a filter / kernel tensor of shape 

2747 `[filter_height, filter_width, in_channels, channel_multiplier]`, containing 

2748 `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies 

2749 a different filter to each input channel (expanding from 1 channel to 

2750 `channel_multiplier` channels for each), then concatenates the results 

2751 together. Thus, the output has `in_channels * channel_multiplier` channels. 

2752 

2753 ``` 

2754 for k in 0..in_channels-1 

2755 for q in 0..channel_multiplier-1 

2756 output[b, i, j, k * channel_multiplier + q] = 

2757 sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * 

2758 filter[di, dj, k, q] 

2759 ``` 

2760 

2761 Must have `strides[0] = strides[3] = 1`. For the most common case of the same 

2762 horizontal and vertices strides, `strides = [1, stride, stride, 1]`. 

2763 

2764 Args: 

2765 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

2766 filter: A `Tensor`. Must have the same type as `input`. 

2767 strides: A list of `ints`. 

2768 1-D of length 4. The stride of the sliding window for each dimension 

2769 of `input`. 

2770 padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`. 

2771 The type of padding algorithm to use. 

2772 explicit_paddings: An optional list of `ints`. Defaults to `[]`. 

2773 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

2774 Specify the data format of the input and output data. With the 

2775 default format "NHWC", the data is stored in the order of: 

2776 [batch, height, width, channels]. 

2777 Alternatively, the format could be "NCHW", the data storage order of: 

2778 [batch, channels, height, width]. 

2779 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

2780 1-D tensor of length 4. The dilation factor for each dimension of 

2781 `input`. If set to k > 1, there will be k-1 skipped cells between each filter 

2782 element on that dimension. The dimension order is determined by the value of 

2783 `data_format`, see above for details. Dilations in the batch and depth 

2784 dimensions must be 1. 

2785 name: A name for the operation (optional). 

2786 

2787 Returns: 

2788 A `Tensor`. Has the same type as `input`. 

2789 """ 

2790 _ctx = _context._context or _context.context() 

2791 tld = _ctx._thread_local_data 

2792 if tld.is_eager: 

2793 try: 

2794 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2795 _ctx, "DepthwiseConv2dNative", name, input, filter, "strides", 

2796 strides, "padding", padding, "explicit_paddings", explicit_paddings, 

2797 "data_format", data_format, "dilations", dilations) 

2798 return _result 

2799 except _core._NotOkStatusException as e: 

2800 _ops.raise_from_not_ok_status(e, name) 

2801 except _core._FallbackException: 

2802 pass 

2803 try: 

2804 return depthwise_conv2d_native_eager_fallback( 

2805 input, filter, strides=strides, padding=padding, 

2806 explicit_paddings=explicit_paddings, data_format=data_format, 

2807 dilations=dilations, name=name, ctx=_ctx) 

2808 except _core._SymbolicException: 

2809 pass # Add nodes to the TensorFlow graph. 

2810 # Add nodes to the TensorFlow graph. 

2811 if not isinstance(strides, (list, tuple)): 

2812 raise TypeError( 

2813 "Expected list for 'strides' argument to " 

2814 "'depthwise_conv2d_native' Op, not %r." % strides) 

2815 strides = [_execute.make_int(_i, "strides") for _i in strides] 

2816 padding = _execute.make_str(padding, "padding") 

2817 if explicit_paddings is None: 

2818 explicit_paddings = [] 

2819 if not isinstance(explicit_paddings, (list, tuple)): 

2820 raise TypeError( 

2821 "Expected list for 'explicit_paddings' argument to " 

2822 "'depthwise_conv2d_native' Op, not %r." % explicit_paddings) 

2823 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

2824 if data_format is None: 

2825 data_format = "NHWC" 

2826 data_format = _execute.make_str(data_format, "data_format") 

2827 if dilations is None: 

2828 dilations = [1, 1, 1, 1] 

2829 if not isinstance(dilations, (list, tuple)): 

2830 raise TypeError( 

2831 "Expected list for 'dilations' argument to " 

2832 "'depthwise_conv2d_native' Op, not %r." % dilations) 

2833 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2834 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2835 "DepthwiseConv2dNative", input=input, filter=filter, strides=strides, 

2836 padding=padding, 

2837 explicit_paddings=explicit_paddings, 

2838 data_format=data_format, dilations=dilations, 

2839 name=name) 

2840 _result = _outputs[:] 

2841 if _execute.must_record_gradient(): 

2842 _attrs = ("T", _op._get_attr_type("T"), "strides", 

2843 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

2844 "explicit_paddings", _op.get_attr("explicit_paddings"), 

2845 "data_format", _op.get_attr("data_format"), "dilations", 

2846 _op.get_attr("dilations")) 

2847 _inputs_flat = _op.inputs 

2848 _execute.record_gradient( 

2849 "DepthwiseConv2dNative", _inputs_flat, _attrs, _result) 

2850 _result, = _result 

2851 return _result 

2852 

2853DepthwiseConv2dNative = tf_export("raw_ops.DepthwiseConv2dNative")(_ops.to_raw_op(depthwise_conv2d_native)) 

2854 

2855 

2856def depthwise_conv2d_native_eager_fallback(input, filter, strides, padding, explicit_paddings, data_format, dilations, name, ctx): 

2857 if not isinstance(strides, (list, tuple)): 

2858 raise TypeError( 

2859 "Expected list for 'strides' argument to " 

2860 "'depthwise_conv2d_native' Op, not %r." % strides) 

2861 strides = [_execute.make_int(_i, "strides") for _i in strides] 

2862 padding = _execute.make_str(padding, "padding") 

2863 if explicit_paddings is None: 

2864 explicit_paddings = [] 

2865 if not isinstance(explicit_paddings, (list, tuple)): 

2866 raise TypeError( 

2867 "Expected list for 'explicit_paddings' argument to " 

2868 "'depthwise_conv2d_native' Op, not %r." % explicit_paddings) 

2869 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

2870 if data_format is None: 

2871 data_format = "NHWC" 

2872 data_format = _execute.make_str(data_format, "data_format") 

2873 if dilations is None: 

2874 dilations = [1, 1, 1, 1] 

2875 if not isinstance(dilations, (list, tuple)): 

2876 raise TypeError( 

2877 "Expected list for 'dilations' argument to " 

2878 "'depthwise_conv2d_native' Op, not %r." % dilations) 

2879 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2880 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

2881 (input, filter) = _inputs_T 

2882 _inputs_flat = [input, filter] 

2883 _attrs = ("T", _attr_T, "strides", strides, "padding", padding, 

2884 "explicit_paddings", explicit_paddings, "data_format", data_format, 

2885 "dilations", dilations) 

2886 _result = _execute.execute(b"DepthwiseConv2dNative", 1, inputs=_inputs_flat, 

2887 attrs=_attrs, ctx=ctx, name=name) 

2888 if _execute.must_record_gradient(): 

2889 _execute.record_gradient( 

2890 "DepthwiseConv2dNative", _inputs_flat, _attrs, _result) 

2891 _result, = _result 

2892 return _result 

2893 

2894 

2895def depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop, strides, padding, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None): 

2896 r"""Computes the gradients of depthwise convolution with respect to the filter. 

2897 

2898 Args: 

2899 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

2900 4-D with shape based on `data_format`. For example, if 

2901 `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, 

2902 in_width, in_channels]` tensor. 

2903 filter_sizes: A `Tensor` of type `int32`. 

2904 An integer vector representing the tensor shape of `filter`, 

2905 where `filter` is a 4-D 

2906 `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. 

2907 out_backprop: A `Tensor`. Must have the same type as `input`. 

2908 4-D with shape based on `data_format`. 

2909 For example, if `data_format` is 'NHWC' then 

2910 out_backprop shape is `[batch, out_height, out_width, out_channels]`. 

2911 Gradients w.r.t. the output of the convolution. 

2912 strides: A list of `ints`. 

2913 The stride of the sliding window for each dimension of the input 

2914 of the convolution. 

2915 padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`. 

2916 The type of padding algorithm to use. 

2917 explicit_paddings: An optional list of `ints`. Defaults to `[]`. 

2918 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

2919 Specify the data format of the input and output data. With the 

2920 default format "NHWC", the data is stored in the order of: 

2921 [batch, height, width, channels]. 

2922 Alternatively, the format could be "NCHW", the data storage order of: 

2923 [batch, channels, height, width]. 

2924 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

2925 1-D tensor of length 4. The dilation factor for each dimension of 

2926 `input`. If set to k > 1, there will be k-1 skipped cells between each filter 

2927 element on that dimension. The dimension order is determined by the value of 

2928 `data_format`, see above for details. Dilations in the batch and depth 

2929 dimensions must be 1. 

2930 name: A name for the operation (optional). 

2931 

2932 Returns: 

2933 A `Tensor`. Has the same type as `input`. 

2934 """ 

2935 _ctx = _context._context or _context.context() 

2936 tld = _ctx._thread_local_data 

2937 if tld.is_eager: 

2938 try: 

2939 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

2940 _ctx, "DepthwiseConv2dNativeBackpropFilter", name, input, 

2941 filter_sizes, out_backprop, "strides", strides, "padding", padding, 

2942 "explicit_paddings", explicit_paddings, "data_format", data_format, 

2943 "dilations", dilations) 

2944 return _result 

2945 except _core._NotOkStatusException as e: 

2946 _ops.raise_from_not_ok_status(e, name) 

2947 except _core._FallbackException: 

2948 pass 

2949 try: 

2950 return depthwise_conv2d_native_backprop_filter_eager_fallback( 

2951 input, filter_sizes, out_backprop, strides=strides, padding=padding, 

2952 explicit_paddings=explicit_paddings, data_format=data_format, 

2953 dilations=dilations, name=name, ctx=_ctx) 

2954 except _core._SymbolicException: 

2955 pass # Add nodes to the TensorFlow graph. 

2956 # Add nodes to the TensorFlow graph. 

2957 if not isinstance(strides, (list, tuple)): 

2958 raise TypeError( 

2959 "Expected list for 'strides' argument to " 

2960 "'depthwise_conv2d_native_backprop_filter' Op, not %r." % strides) 

2961 strides = [_execute.make_int(_i, "strides") for _i in strides] 

2962 padding = _execute.make_str(padding, "padding") 

2963 if explicit_paddings is None: 

2964 explicit_paddings = [] 

2965 if not isinstance(explicit_paddings, (list, tuple)): 

2966 raise TypeError( 

2967 "Expected list for 'explicit_paddings' argument to " 

2968 "'depthwise_conv2d_native_backprop_filter' Op, not %r." % explicit_paddings) 

2969 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

2970 if data_format is None: 

2971 data_format = "NHWC" 

2972 data_format = _execute.make_str(data_format, "data_format") 

2973 if dilations is None: 

2974 dilations = [1, 1, 1, 1] 

2975 if not isinstance(dilations, (list, tuple)): 

2976 raise TypeError( 

2977 "Expected list for 'dilations' argument to " 

2978 "'depthwise_conv2d_native_backprop_filter' Op, not %r." % dilations) 

2979 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

2980 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

2981 "DepthwiseConv2dNativeBackpropFilter", input=input, 

2982 filter_sizes=filter_sizes, 

2983 out_backprop=out_backprop, 

2984 strides=strides, 

2985 padding=padding, 

2986 explicit_paddings=explicit_paddings, 

2987 data_format=data_format, 

2988 dilations=dilations, name=name) 

2989 _result = _outputs[:] 

2990 if _execute.must_record_gradient(): 

2991 _attrs = ("T", _op._get_attr_type("T"), "strides", 

2992 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

2993 "explicit_paddings", _op.get_attr("explicit_paddings"), 

2994 "data_format", _op.get_attr("data_format"), "dilations", 

2995 _op.get_attr("dilations")) 

2996 _inputs_flat = _op.inputs 

2997 _execute.record_gradient( 

2998 "DepthwiseConv2dNativeBackpropFilter", _inputs_flat, _attrs, _result) 

2999 _result, = _result 

3000 return _result 

3001 

3002DepthwiseConv2dNativeBackpropFilter = tf_export("raw_ops.DepthwiseConv2dNativeBackpropFilter")(_ops.to_raw_op(depthwise_conv2d_native_backprop_filter)) 

3003 

3004 

3005def depthwise_conv2d_native_backprop_filter_eager_fallback(input, filter_sizes, out_backprop, strides, padding, explicit_paddings, data_format, dilations, name, ctx): 

3006 if not isinstance(strides, (list, tuple)): 

3007 raise TypeError( 

3008 "Expected list for 'strides' argument to " 

3009 "'depthwise_conv2d_native_backprop_filter' Op, not %r." % strides) 

3010 strides = [_execute.make_int(_i, "strides") for _i in strides] 

3011 padding = _execute.make_str(padding, "padding") 

3012 if explicit_paddings is None: 

3013 explicit_paddings = [] 

3014 if not isinstance(explicit_paddings, (list, tuple)): 

3015 raise TypeError( 

3016 "Expected list for 'explicit_paddings' argument to " 

3017 "'depthwise_conv2d_native_backprop_filter' Op, not %r." % explicit_paddings) 

3018 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

3019 if data_format is None: 

3020 data_format = "NHWC" 

3021 data_format = _execute.make_str(data_format, "data_format") 

3022 if dilations is None: 

3023 dilations = [1, 1, 1, 1] 

3024 if not isinstance(dilations, (list, tuple)): 

3025 raise TypeError( 

3026 "Expected list for 'dilations' argument to " 

3027 "'depthwise_conv2d_native_backprop_filter' Op, not %r." % dilations) 

3028 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

3029 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

3030 (input, out_backprop) = _inputs_T 

3031 filter_sizes = _ops.convert_to_tensor(filter_sizes, _dtypes.int32) 

3032 _inputs_flat = [input, filter_sizes, out_backprop] 

3033 _attrs = ("T", _attr_T, "strides", strides, "padding", padding, 

3034 "explicit_paddings", explicit_paddings, "data_format", data_format, 

3035 "dilations", dilations) 

3036 _result = _execute.execute(b"DepthwiseConv2dNativeBackpropFilter", 1, 

3037 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3038 name=name) 

3039 if _execute.must_record_gradient(): 

3040 _execute.record_gradient( 

3041 "DepthwiseConv2dNativeBackpropFilter", _inputs_flat, _attrs, _result) 

3042 _result, = _result 

3043 return _result 

3044 

3045 

3046def depthwise_conv2d_native_backprop_input(input_sizes, filter, out_backprop, strides, padding, explicit_paddings=[], data_format="NHWC", dilations=[1, 1, 1, 1], name=None): 

3047 r"""Computes the gradients of depthwise convolution with respect to the input. 

3048 

3049 Args: 

3050 input_sizes: A `Tensor` of type `int32`. 

3051 An integer vector representing the shape of `input`, based 

3052 on `data_format`. For example, if `data_format` is 'NHWC' then 

3053 `input` is a 4-D `[batch, height, width, channels]` tensor. 

3054 filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

3055 4-D with shape 

3056 `[filter_height, filter_width, in_channels, depthwise_multiplier]`. 

3057 out_backprop: A `Tensor`. Must have the same type as `filter`. 

3058 4-D with shape based on `data_format`. 

3059 For example, if `data_format` is 'NHWC' then 

3060 out_backprop shape is `[batch, out_height, out_width, out_channels]`. 

3061 Gradients w.r.t. the output of the convolution. 

3062 strides: A list of `ints`. 

3063 The stride of the sliding window for each dimension of the input 

3064 of the convolution. 

3065 padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`. 

3066 The type of padding algorithm to use. 

3067 explicit_paddings: An optional list of `ints`. Defaults to `[]`. 

3068 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

3069 Specify the data format of the input and output data. With the 

3070 default format "NHWC", the data is stored in the order of: 

3071 [batch, height, width, channels]. 

3072 Alternatively, the format could be "NCHW", the data storage order of: 

3073 [batch, channels, height, width]. 

3074 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

3075 1-D tensor of length 4. The dilation factor for each dimension of 

3076 `input`. If set to k > 1, there will be k-1 skipped cells between each filter 

3077 element on that dimension. The dimension order is determined by the value of 

3078 `data_format`, see above for details. Dilations in the batch and depth 

3079 dimensions must be 1. 

3080 name: A name for the operation (optional). 

3081 

3082 Returns: 

3083 A `Tensor`. Has the same type as `filter`. 

3084 """ 

3085 _ctx = _context._context or _context.context() 

3086 tld = _ctx._thread_local_data 

3087 if tld.is_eager: 

3088 try: 

3089 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3090 _ctx, "DepthwiseConv2dNativeBackpropInput", name, input_sizes, filter, 

3091 out_backprop, "strides", strides, "padding", padding, 

3092 "explicit_paddings", explicit_paddings, "data_format", data_format, 

3093 "dilations", dilations) 

3094 return _result 

3095 except _core._NotOkStatusException as e: 

3096 _ops.raise_from_not_ok_status(e, name) 

3097 except _core._FallbackException: 

3098 pass 

3099 try: 

3100 return depthwise_conv2d_native_backprop_input_eager_fallback( 

3101 input_sizes, filter, out_backprop, strides=strides, padding=padding, 

3102 explicit_paddings=explicit_paddings, data_format=data_format, 

3103 dilations=dilations, name=name, ctx=_ctx) 

3104 except _core._SymbolicException: 

3105 pass # Add nodes to the TensorFlow graph. 

3106 # Add nodes to the TensorFlow graph. 

3107 if not isinstance(strides, (list, tuple)): 

3108 raise TypeError( 

3109 "Expected list for 'strides' argument to " 

3110 "'depthwise_conv2d_native_backprop_input' Op, not %r." % strides) 

3111 strides = [_execute.make_int(_i, "strides") for _i in strides] 

3112 padding = _execute.make_str(padding, "padding") 

3113 if explicit_paddings is None: 

3114 explicit_paddings = [] 

3115 if not isinstance(explicit_paddings, (list, tuple)): 

3116 raise TypeError( 

3117 "Expected list for 'explicit_paddings' argument to " 

3118 "'depthwise_conv2d_native_backprop_input' Op, not %r." % explicit_paddings) 

3119 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

3120 if data_format is None: 

3121 data_format = "NHWC" 

3122 data_format = _execute.make_str(data_format, "data_format") 

3123 if dilations is None: 

3124 dilations = [1, 1, 1, 1] 

3125 if not isinstance(dilations, (list, tuple)): 

3126 raise TypeError( 

3127 "Expected list for 'dilations' argument to " 

3128 "'depthwise_conv2d_native_backprop_input' Op, not %r." % dilations) 

3129 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

3130 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3131 "DepthwiseConv2dNativeBackpropInput", input_sizes=input_sizes, 

3132 filter=filter, 

3133 out_backprop=out_backprop, 

3134 strides=strides, 

3135 padding=padding, 

3136 explicit_paddings=explicit_paddings, 

3137 data_format=data_format, 

3138 dilations=dilations, name=name) 

3139 _result = _outputs[:] 

3140 if _execute.must_record_gradient(): 

3141 _attrs = ("T", _op._get_attr_type("T"), "strides", 

3142 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

3143 "explicit_paddings", _op.get_attr("explicit_paddings"), 

3144 "data_format", _op.get_attr("data_format"), "dilations", 

3145 _op.get_attr("dilations")) 

3146 _inputs_flat = _op.inputs 

3147 _execute.record_gradient( 

3148 "DepthwiseConv2dNativeBackpropInput", _inputs_flat, _attrs, _result) 

3149 _result, = _result 

3150 return _result 

3151 

3152DepthwiseConv2dNativeBackpropInput = tf_export("raw_ops.DepthwiseConv2dNativeBackpropInput")(_ops.to_raw_op(depthwise_conv2d_native_backprop_input)) 

3153 

3154 

3155def depthwise_conv2d_native_backprop_input_eager_fallback(input_sizes, filter, out_backprop, strides, padding, explicit_paddings, data_format, dilations, name, ctx): 

3156 if not isinstance(strides, (list, tuple)): 

3157 raise TypeError( 

3158 "Expected list for 'strides' argument to " 

3159 "'depthwise_conv2d_native_backprop_input' Op, not %r." % strides) 

3160 strides = [_execute.make_int(_i, "strides") for _i in strides] 

3161 padding = _execute.make_str(padding, "padding") 

3162 if explicit_paddings is None: 

3163 explicit_paddings = [] 

3164 if not isinstance(explicit_paddings, (list, tuple)): 

3165 raise TypeError( 

3166 "Expected list for 'explicit_paddings' argument to " 

3167 "'depthwise_conv2d_native_backprop_input' Op, not %r." % explicit_paddings) 

3168 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

3169 if data_format is None: 

3170 data_format = "NHWC" 

3171 data_format = _execute.make_str(data_format, "data_format") 

3172 if dilations is None: 

3173 dilations = [1, 1, 1, 1] 

3174 if not isinstance(dilations, (list, tuple)): 

3175 raise TypeError( 

3176 "Expected list for 'dilations' argument to " 

3177 "'depthwise_conv2d_native_backprop_input' Op, not %r." % dilations) 

3178 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

3179 _attr_T, _inputs_T = _execute.args_to_matching_eager([filter, out_backprop], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

3180 (filter, out_backprop) = _inputs_T 

3181 input_sizes = _ops.convert_to_tensor(input_sizes, _dtypes.int32) 

3182 _inputs_flat = [input_sizes, filter, out_backprop] 

3183 _attrs = ("T", _attr_T, "strides", strides, "padding", padding, 

3184 "explicit_paddings", explicit_paddings, "data_format", data_format, 

3185 "dilations", dilations) 

3186 _result = _execute.execute(b"DepthwiseConv2dNativeBackpropInput", 1, 

3187 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3188 name=name) 

3189 if _execute.must_record_gradient(): 

3190 _execute.record_gradient( 

3191 "DepthwiseConv2dNativeBackpropInput", _inputs_flat, _attrs, _result) 

3192 _result, = _result 

3193 return _result 

3194 

3195 

3196def dilation2d(input, filter, strides, rates, padding, name=None): 

3197 r"""Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. 

3198 

3199 The `input` tensor has shape `[batch, in_height, in_width, depth]` and the 

3200 `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each 

3201 input channel is processed independently of the others with its own structuring 

3202 function. The `output` tensor has shape 

3203 `[batch, out_height, out_width, depth]`. The spatial dimensions of the output 

3204 tensor depend on the `padding` algorithm. We currently only support the default 

3205 "NHWC" `data_format`. 

3206 

3207 In detail, the grayscale morphological 2-D dilation is the max-sum correlation 

3208 (for consistency with `conv2d`, we use unmirrored filters): 

3209 

3210 output[b, y, x, c] = 

3211 max_{dy, dx} input[b, 

3212 strides[1] * y + rates[1] * dy, 

3213 strides[2] * x + rates[2] * dx, 

3214 c] + 

3215 filter[dy, dx, c] 

3216 

3217 Max-pooling is a special case when the filter has size equal to the pooling 

3218 kernel size and contains all zeros. 

3219 

3220 Note on duality: The dilation of `input` by the `filter` is equal to the 

3221 negation of the erosion of `-input` by the reflected `filter`. 

3222 

3223 Args: 

3224 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

3225 4-D with shape `[batch, in_height, in_width, depth]`. 

3226 filter: A `Tensor`. Must have the same type as `input`. 

3227 3-D with shape `[filter_height, filter_width, depth]`. 

3228 strides: A list of `ints` that has length `>= 4`. 

3229 The stride of the sliding window for each dimension of the input 

3230 tensor. Must be: `[1, stride_height, stride_width, 1]`. 

3231 rates: A list of `ints` that has length `>= 4`. 

3232 The input stride for atrous morphological dilation. Must be: 

3233 `[1, rate_height, rate_width, 1]`. 

3234 padding: A `string` from: `"SAME", "VALID"`. 

3235 The type of padding algorithm to use. 

3236 name: A name for the operation (optional). 

3237 

3238 Returns: 

3239 A `Tensor`. Has the same type as `input`. 

3240 """ 

3241 _ctx = _context._context or _context.context() 

3242 tld = _ctx._thread_local_data 

3243 if tld.is_eager: 

3244 try: 

3245 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3246 _ctx, "Dilation2D", name, input, filter, "strides", strides, "rates", 

3247 rates, "padding", padding) 

3248 return _result 

3249 except _core._NotOkStatusException as e: 

3250 _ops.raise_from_not_ok_status(e, name) 

3251 except _core._FallbackException: 

3252 pass 

3253 try: 

3254 return dilation2d_eager_fallback( 

3255 input, filter, strides=strides, rates=rates, padding=padding, 

3256 name=name, ctx=_ctx) 

3257 except _core._SymbolicException: 

3258 pass # Add nodes to the TensorFlow graph. 

3259 # Add nodes to the TensorFlow graph. 

3260 if not isinstance(strides, (list, tuple)): 

3261 raise TypeError( 

3262 "Expected list for 'strides' argument to " 

3263 "'dilation2d' Op, not %r." % strides) 

3264 strides = [_execute.make_int(_i, "strides") for _i in strides] 

3265 if not isinstance(rates, (list, tuple)): 

3266 raise TypeError( 

3267 "Expected list for 'rates' argument to " 

3268 "'dilation2d' Op, not %r." % rates) 

3269 rates = [_execute.make_int(_i, "rates") for _i in rates] 

3270 padding = _execute.make_str(padding, "padding") 

3271 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3272 "Dilation2D", input=input, filter=filter, strides=strides, 

3273 rates=rates, padding=padding, name=name) 

3274 _result = _outputs[:] 

3275 if _execute.must_record_gradient(): 

3276 _attrs = ("T", _op._get_attr_type("T"), "strides", 

3277 _op.get_attr("strides"), "rates", _op.get_attr("rates"), 

3278 "padding", _op.get_attr("padding")) 

3279 _inputs_flat = _op.inputs 

3280 _execute.record_gradient( 

3281 "Dilation2D", _inputs_flat, _attrs, _result) 

3282 _result, = _result 

3283 return _result 

3284 

3285Dilation2D = tf_export("raw_ops.Dilation2D")(_ops.to_raw_op(dilation2d)) 

3286 

3287 

3288def dilation2d_eager_fallback(input, filter, strides, rates, padding, name, ctx): 

3289 if not isinstance(strides, (list, tuple)): 

3290 raise TypeError( 

3291 "Expected list for 'strides' argument to " 

3292 "'dilation2d' Op, not %r." % strides) 

3293 strides = [_execute.make_int(_i, "strides") for _i in strides] 

3294 if not isinstance(rates, (list, tuple)): 

3295 raise TypeError( 

3296 "Expected list for 'rates' argument to " 

3297 "'dilation2d' Op, not %r." % rates) 

3298 rates = [_execute.make_int(_i, "rates") for _i in rates] 

3299 padding = _execute.make_str(padding, "padding") 

3300 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

3301 (input, filter) = _inputs_T 

3302 _inputs_flat = [input, filter] 

3303 _attrs = ("T", _attr_T, "strides", strides, "rates", rates, "padding", 

3304 padding) 

3305 _result = _execute.execute(b"Dilation2D", 1, inputs=_inputs_flat, 

3306 attrs=_attrs, ctx=ctx, name=name) 

3307 if _execute.must_record_gradient(): 

3308 _execute.record_gradient( 

3309 "Dilation2D", _inputs_flat, _attrs, _result) 

3310 _result, = _result 

3311 return _result 

3312 

3313 

3314def dilation2d_backprop_filter(input, filter, out_backprop, strides, rates, padding, name=None): 

3315 r"""Computes the gradient of morphological 2-D dilation with respect to the filter. 

3316 

3317 Args: 

3318 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

3319 4-D with shape `[batch, in_height, in_width, depth]`. 

3320 filter: A `Tensor`. Must have the same type as `input`. 

3321 3-D with shape `[filter_height, filter_width, depth]`. 

3322 out_backprop: A `Tensor`. Must have the same type as `input`. 

3323 4-D with shape `[batch, out_height, out_width, depth]`. 

3324 strides: A list of `ints` that has length `>= 4`. 

3325 1-D of length 4. The stride of the sliding window for each dimension of 

3326 the input tensor. Must be: `[1, stride_height, stride_width, 1]`. 

3327 rates: A list of `ints` that has length `>= 4`. 

3328 1-D of length 4. The input stride for atrous morphological dilation. 

3329 Must be: `[1, rate_height, rate_width, 1]`. 

3330 padding: A `string` from: `"SAME", "VALID"`. 

3331 The type of padding algorithm to use. 

3332 name: A name for the operation (optional). 

3333 

3334 Returns: 

3335 A `Tensor`. Has the same type as `input`. 

3336 """ 

3337 _ctx = _context._context or _context.context() 

3338 tld = _ctx._thread_local_data 

3339 if tld.is_eager: 

3340 try: 

3341 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3342 _ctx, "Dilation2DBackpropFilter", name, input, filter, out_backprop, 

3343 "strides", strides, "rates", rates, "padding", padding) 

3344 return _result 

3345 except _core._NotOkStatusException as e: 

3346 _ops.raise_from_not_ok_status(e, name) 

3347 except _core._FallbackException: 

3348 pass 

3349 try: 

3350 return dilation2d_backprop_filter_eager_fallback( 

3351 input, filter, out_backprop, strides=strides, rates=rates, 

3352 padding=padding, name=name, ctx=_ctx) 

3353 except _core._SymbolicException: 

3354 pass # Add nodes to the TensorFlow graph. 

3355 # Add nodes to the TensorFlow graph. 

3356 if not isinstance(strides, (list, tuple)): 

3357 raise TypeError( 

3358 "Expected list for 'strides' argument to " 

3359 "'dilation2d_backprop_filter' Op, not %r." % strides) 

3360 strides = [_execute.make_int(_i, "strides") for _i in strides] 

3361 if not isinstance(rates, (list, tuple)): 

3362 raise TypeError( 

3363 "Expected list for 'rates' argument to " 

3364 "'dilation2d_backprop_filter' Op, not %r." % rates) 

3365 rates = [_execute.make_int(_i, "rates") for _i in rates] 

3366 padding = _execute.make_str(padding, "padding") 

3367 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3368 "Dilation2DBackpropFilter", input=input, filter=filter, 

3369 out_backprop=out_backprop, 

3370 strides=strides, rates=rates, 

3371 padding=padding, name=name) 

3372 _result = _outputs[:] 

3373 if _execute.must_record_gradient(): 

3374 _attrs = ("T", _op._get_attr_type("T"), "strides", 

3375 _op.get_attr("strides"), "rates", _op.get_attr("rates"), 

3376 "padding", _op.get_attr("padding")) 

3377 _inputs_flat = _op.inputs 

3378 _execute.record_gradient( 

3379 "Dilation2DBackpropFilter", _inputs_flat, _attrs, _result) 

3380 _result, = _result 

3381 return _result 

3382 

3383Dilation2DBackpropFilter = tf_export("raw_ops.Dilation2DBackpropFilter")(_ops.to_raw_op(dilation2d_backprop_filter)) 

3384 

3385 

3386def dilation2d_backprop_filter_eager_fallback(input, filter, out_backprop, strides, rates, padding, name, ctx): 

3387 if not isinstance(strides, (list, tuple)): 

3388 raise TypeError( 

3389 "Expected list for 'strides' argument to " 

3390 "'dilation2d_backprop_filter' Op, not %r." % strides) 

3391 strides = [_execute.make_int(_i, "strides") for _i in strides] 

3392 if not isinstance(rates, (list, tuple)): 

3393 raise TypeError( 

3394 "Expected list for 'rates' argument to " 

3395 "'dilation2d_backprop_filter' Op, not %r." % rates) 

3396 rates = [_execute.make_int(_i, "rates") for _i in rates] 

3397 padding = _execute.make_str(padding, "padding") 

3398 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

3399 (input, filter, out_backprop) = _inputs_T 

3400 _inputs_flat = [input, filter, out_backprop] 

3401 _attrs = ("T", _attr_T, "strides", strides, "rates", rates, "padding", 

3402 padding) 

3403 _result = _execute.execute(b"Dilation2DBackpropFilter", 1, 

3404 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3405 name=name) 

3406 if _execute.must_record_gradient(): 

3407 _execute.record_gradient( 

3408 "Dilation2DBackpropFilter", _inputs_flat, _attrs, _result) 

3409 _result, = _result 

3410 return _result 

3411 

3412 

3413def dilation2d_backprop_input(input, filter, out_backprop, strides, rates, padding, name=None): 

3414 r"""Computes the gradient of morphological 2-D dilation with respect to the input. 

3415 

3416 Args: 

3417 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

3418 4-D with shape `[batch, in_height, in_width, depth]`. 

3419 filter: A `Tensor`. Must have the same type as `input`. 

3420 3-D with shape `[filter_height, filter_width, depth]`. 

3421 out_backprop: A `Tensor`. Must have the same type as `input`. 

3422 4-D with shape `[batch, out_height, out_width, depth]`. 

3423 strides: A list of `ints` that has length `>= 4`. 

3424 1-D of length 4. The stride of the sliding window for each dimension of 

3425 the input tensor. Must be: `[1, stride_height, stride_width, 1]`. 

3426 rates: A list of `ints` that has length `>= 4`. 

3427 1-D of length 4. The input stride for atrous morphological dilation. 

3428 Must be: `[1, rate_height, rate_width, 1]`. 

3429 padding: A `string` from: `"SAME", "VALID"`. 

3430 The type of padding algorithm to use. 

3431 name: A name for the operation (optional). 

3432 

3433 Returns: 

3434 A `Tensor`. Has the same type as `input`. 

3435 """ 

3436 _ctx = _context._context or _context.context() 

3437 tld = _ctx._thread_local_data 

3438 if tld.is_eager: 

3439 try: 

3440 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3441 _ctx, "Dilation2DBackpropInput", name, input, filter, out_backprop, 

3442 "strides", strides, "rates", rates, "padding", padding) 

3443 return _result 

3444 except _core._NotOkStatusException as e: 

3445 _ops.raise_from_not_ok_status(e, name) 

3446 except _core._FallbackException: 

3447 pass 

3448 try: 

3449 return dilation2d_backprop_input_eager_fallback( 

3450 input, filter, out_backprop, strides=strides, rates=rates, 

3451 padding=padding, name=name, ctx=_ctx) 

3452 except _core._SymbolicException: 

3453 pass # Add nodes to the TensorFlow graph. 

3454 # Add nodes to the TensorFlow graph. 

3455 if not isinstance(strides, (list, tuple)): 

3456 raise TypeError( 

3457 "Expected list for 'strides' argument to " 

3458 "'dilation2d_backprop_input' Op, not %r." % strides) 

3459 strides = [_execute.make_int(_i, "strides") for _i in strides] 

3460 if not isinstance(rates, (list, tuple)): 

3461 raise TypeError( 

3462 "Expected list for 'rates' argument to " 

3463 "'dilation2d_backprop_input' Op, not %r." % rates) 

3464 rates = [_execute.make_int(_i, "rates") for _i in rates] 

3465 padding = _execute.make_str(padding, "padding") 

3466 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3467 "Dilation2DBackpropInput", input=input, filter=filter, 

3468 out_backprop=out_backprop, strides=strides, 

3469 rates=rates, padding=padding, name=name) 

3470 _result = _outputs[:] 

3471 if _execute.must_record_gradient(): 

3472 _attrs = ("T", _op._get_attr_type("T"), "strides", 

3473 _op.get_attr("strides"), "rates", _op.get_attr("rates"), 

3474 "padding", _op.get_attr("padding")) 

3475 _inputs_flat = _op.inputs 

3476 _execute.record_gradient( 

3477 "Dilation2DBackpropInput", _inputs_flat, _attrs, _result) 

3478 _result, = _result 

3479 return _result 

3480 

3481Dilation2DBackpropInput = tf_export("raw_ops.Dilation2DBackpropInput")(_ops.to_raw_op(dilation2d_backprop_input)) 

3482 

3483 

3484def dilation2d_backprop_input_eager_fallback(input, filter, out_backprop, strides, rates, padding, name, ctx): 

3485 if not isinstance(strides, (list, tuple)): 

3486 raise TypeError( 

3487 "Expected list for 'strides' argument to " 

3488 "'dilation2d_backprop_input' Op, not %r." % strides) 

3489 strides = [_execute.make_int(_i, "strides") for _i in strides] 

3490 if not isinstance(rates, (list, tuple)): 

3491 raise TypeError( 

3492 "Expected list for 'rates' argument to " 

3493 "'dilation2d_backprop_input' Op, not %r." % rates) 

3494 rates = [_execute.make_int(_i, "rates") for _i in rates] 

3495 padding = _execute.make_str(padding, "padding") 

3496 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter, out_backprop], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

3497 (input, filter, out_backprop) = _inputs_T 

3498 _inputs_flat = [input, filter, out_backprop] 

3499 _attrs = ("T", _attr_T, "strides", strides, "rates", rates, "padding", 

3500 padding) 

3501 _result = _execute.execute(b"Dilation2DBackpropInput", 1, 

3502 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

3503 name=name) 

3504 if _execute.must_record_gradient(): 

3505 _execute.record_gradient( 

3506 "Dilation2DBackpropInput", _inputs_flat, _attrs, _result) 

3507 _result, = _result 

3508 return _result 

3509 

3510 

3511@_dispatch.add_fallback_dispatch_list 

3512@_dispatch.add_type_based_api_dispatcher 

3513@tf_export('nn.elu') 

3514def elu(features, name=None): 

3515 r"""Computes the exponential linear function. 

3516 

3517 The ELU function is defined as: 

3518 

3519 * $ e ^ x - 1 $ if $ x < 0 $ 

3520 * $ x $ if $ x >= 0 $ 

3521 

3522 Examples: 

3523 

3524 >>> tf.nn.elu(1.0) 

3525 <tf.Tensor: shape=(), dtype=float32, numpy=1.0> 

3526 >>> tf.nn.elu(0.0) 

3527 <tf.Tensor: shape=(), dtype=float32, numpy=0.0> 

3528 >>> tf.nn.elu(-1000.0) 

3529 <tf.Tensor: shape=(), dtype=float32, numpy=-1.0> 

3530 

3531 See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) 

3532 ](http://arxiv.org/abs/1511.07289) 

3533 

3534 Args: 

3535 features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

3536 name: A name for the operation (optional). 

3537 

3538 Returns: 

3539 A `Tensor`. Has the same type as `features`. 

3540 """ 

3541 _ctx = _context._context or _context.context() 

3542 tld = _ctx._thread_local_data 

3543 if tld.is_eager: 

3544 try: 

3545 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3546 _ctx, "Elu", name, features) 

3547 return _result 

3548 except _core._NotOkStatusException as e: 

3549 _ops.raise_from_not_ok_status(e, name) 

3550 except _core._FallbackException: 

3551 pass 

3552 try: 

3553 _result = _dispatcher_for_elu( 

3554 (features, name,), None) 

3555 if _result is not NotImplemented: 

3556 return _result 

3557 return elu_eager_fallback( 

3558 features, name=name, ctx=_ctx) 

3559 except _core._SymbolicException: 

3560 pass # Add nodes to the TensorFlow graph. 

3561 except (TypeError, ValueError): 

3562 _result = _dispatch.dispatch( 

3563 elu, (), dict(features=features, name=name) 

3564 ) 

3565 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

3566 return _result 

3567 raise 

3568 else: 

3569 _result = _dispatcher_for_elu( 

3570 (features, name,), None) 

3571 if _result is not NotImplemented: 

3572 return _result 

3573 # Add nodes to the TensorFlow graph. 

3574 try: 

3575 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3576 "Elu", features=features, name=name) 

3577 except (TypeError, ValueError): 

3578 _result = _dispatch.dispatch( 

3579 elu, (), dict(features=features, name=name) 

3580 ) 

3581 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

3582 return _result 

3583 raise 

3584 _result = _outputs[:] 

3585 if _execute.must_record_gradient(): 

3586 _attrs = ("T", _op._get_attr_type("T")) 

3587 _inputs_flat = _op.inputs 

3588 _execute.record_gradient( 

3589 "Elu", _inputs_flat, _attrs, _result) 

3590 _result, = _result 

3591 return _result 

3592 

3593Elu = tf_export("raw_ops.Elu")(_ops.to_raw_op(elu)) 

3594_dispatcher_for_elu = elu._tf_type_based_dispatcher.Dispatch 

3595 

3596 

3597def elu_eager_fallback(features, name, ctx): 

3598 _attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

3599 _inputs_flat = [features] 

3600 _attrs = ("T", _attr_T) 

3601 _result = _execute.execute(b"Elu", 1, inputs=_inputs_flat, attrs=_attrs, 

3602 ctx=ctx, name=name) 

3603 if _execute.must_record_gradient(): 

3604 _execute.record_gradient( 

3605 "Elu", _inputs_flat, _attrs, _result) 

3606 _result, = _result 

3607 return _result 

3608 

3609 

3610def elu_grad(gradients, outputs, name=None): 

3611 r"""Computes gradients for the exponential linear (Elu) operation. 

3612 

3613 Args: 

3614 gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

3615 The backpropagated gradients to the corresponding Elu operation. 

3616 outputs: A `Tensor`. Must have the same type as `gradients`. 

3617 The outputs of the corresponding Elu operation. 

3618 name: A name for the operation (optional). 

3619 

3620 Returns: 

3621 A `Tensor`. Has the same type as `gradients`. 

3622 """ 

3623 _ctx = _context._context or _context.context() 

3624 tld = _ctx._thread_local_data 

3625 if tld.is_eager: 

3626 try: 

3627 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3628 _ctx, "EluGrad", name, gradients, outputs) 

3629 return _result 

3630 except _core._NotOkStatusException as e: 

3631 _ops.raise_from_not_ok_status(e, name) 

3632 except _core._FallbackException: 

3633 pass 

3634 try: 

3635 return elu_grad_eager_fallback( 

3636 gradients, outputs, name=name, ctx=_ctx) 

3637 except _core._SymbolicException: 

3638 pass # Add nodes to the TensorFlow graph. 

3639 # Add nodes to the TensorFlow graph. 

3640 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3641 "EluGrad", gradients=gradients, outputs=outputs, name=name) 

3642 _result = _outputs[:] 

3643 if _execute.must_record_gradient(): 

3644 _attrs = ("T", _op._get_attr_type("T")) 

3645 _inputs_flat = _op.inputs 

3646 _execute.record_gradient( 

3647 "EluGrad", _inputs_flat, _attrs, _result) 

3648 _result, = _result 

3649 return _result 

3650 

3651EluGrad = tf_export("raw_ops.EluGrad")(_ops.to_raw_op(elu_grad)) 

3652 

3653 

3654def elu_grad_eager_fallback(gradients, outputs, name, ctx): 

3655 _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, outputs], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

3656 (gradients, outputs) = _inputs_T 

3657 _inputs_flat = [gradients, outputs] 

3658 _attrs = ("T", _attr_T) 

3659 _result = _execute.execute(b"EluGrad", 1, inputs=_inputs_flat, attrs=_attrs, 

3660 ctx=ctx, name=name) 

3661 if _execute.must_record_gradient(): 

3662 _execute.record_gradient( 

3663 "EluGrad", _inputs_flat, _attrs, _result) 

3664 _result, = _result 

3665 return _result 

3666 

3667_FractionalAvgPoolOutput = collections.namedtuple( 

3668 "FractionalAvgPool", 

3669 ["output", "row_pooling_sequence", "col_pooling_sequence"]) 

3670 

3671 

3672def fractional_avg_pool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None): 

3673 r"""Performs fractional average pooling on the input. 

3674 

3675 Fractional average pooling is similar to Fractional max pooling in the pooling 

3676 region generation step. The only difference is that after pooling regions are 

3677 generated, a mean operation is performed instead of a max operation in each 

3678 pooling region. 

3679 

3680 Args: 

3681 value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`. 

3682 4-D with shape `[batch, height, width, channels]`. 

3683 pooling_ratio: A list of `floats` that has length `>= 4`. 

3684 Pooling ratio for each dimension of `value`, currently only 

3685 supports row and col dimension and should be >= 1.0. For example, a valid 

3686 pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements 

3687 must be 1.0 because we don't allow pooling on batch and channels 

3688 dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions 

3689 respectively. 

3690 pseudo_random: An optional `bool`. Defaults to `False`. 

3691 When set to True, generates the pooling sequence in a 

3692 pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin 

3693 Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for 

3694 difference between pseudorandom and random. 

3695 overlapping: An optional `bool`. Defaults to `False`. 

3696 When set to True, it means when pooling, the values at the boundary 

3697 of adjacent pooling cells are used by both cells. For example: 

3698 

3699 `index 0 1 2 3 4` 

3700 

3701 `value 20 5 16 3 7` 

3702 

3703 If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. 

3704 The result would be [41/3, 26/3] for fractional avg pooling. 

3705 deterministic: An optional `bool`. Defaults to `False`. 

3706 When set to True, a fixed pooling region will be used when 

3707 iterating over a FractionalAvgPool node in the computation graph. Mainly used 

3708 in unit test to make FractionalAvgPool deterministic. 

3709 seed: An optional `int`. Defaults to `0`. 

3710 If either seed or seed2 are set to be non-zero, the random number 

3711 generator is seeded by the given seed. Otherwise, it is seeded by a 

3712 random seed. 

3713 seed2: An optional `int`. Defaults to `0`. 

3714 An second seed to avoid seed collision. 

3715 name: A name for the operation (optional). 

3716 

3717 Returns: 

3718 A tuple of `Tensor` objects (output, row_pooling_sequence, col_pooling_sequence). 

3719 

3720 output: A `Tensor`. Has the same type as `value`. 

3721 row_pooling_sequence: A `Tensor` of type `int64`. 

3722 col_pooling_sequence: A `Tensor` of type `int64`. 

3723 """ 

3724 _ctx = _context._context or _context.context() 

3725 tld = _ctx._thread_local_data 

3726 if tld.is_eager: 

3727 try: 

3728 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3729 _ctx, "FractionalAvgPool", name, value, "pooling_ratio", 

3730 pooling_ratio, "pseudo_random", pseudo_random, "overlapping", 

3731 overlapping, "deterministic", deterministic, "seed", seed, "seed2", 

3732 seed2) 

3733 _result = _FractionalAvgPoolOutput._make(_result) 

3734 return _result 

3735 except _core._NotOkStatusException as e: 

3736 _ops.raise_from_not_ok_status(e, name) 

3737 except _core._FallbackException: 

3738 pass 

3739 try: 

3740 return fractional_avg_pool_eager_fallback( 

3741 value, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, 

3742 overlapping=overlapping, deterministic=deterministic, seed=seed, 

3743 seed2=seed2, name=name, ctx=_ctx) 

3744 except _core._SymbolicException: 

3745 pass # Add nodes to the TensorFlow graph. 

3746 # Add nodes to the TensorFlow graph. 

3747 if not isinstance(pooling_ratio, (list, tuple)): 

3748 raise TypeError( 

3749 "Expected list for 'pooling_ratio' argument to " 

3750 "'fractional_avg_pool' Op, not %r." % pooling_ratio) 

3751 pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio] 

3752 if pseudo_random is None: 

3753 pseudo_random = False 

3754 pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random") 

3755 if overlapping is None: 

3756 overlapping = False 

3757 overlapping = _execute.make_bool(overlapping, "overlapping") 

3758 if deterministic is None: 

3759 deterministic = False 

3760 deterministic = _execute.make_bool(deterministic, "deterministic") 

3761 if seed is None: 

3762 seed = 0 

3763 seed = _execute.make_int(seed, "seed") 

3764 if seed2 is None: 

3765 seed2 = 0 

3766 seed2 = _execute.make_int(seed2, "seed2") 

3767 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3768 "FractionalAvgPool", value=value, pooling_ratio=pooling_ratio, 

3769 pseudo_random=pseudo_random, 

3770 overlapping=overlapping, 

3771 deterministic=deterministic, seed=seed, 

3772 seed2=seed2, name=name) 

3773 _result = _outputs[:] 

3774 if _execute.must_record_gradient(): 

3775 _attrs = ("pooling_ratio", _op.get_attr("pooling_ratio"), "pseudo_random", 

3776 _op._get_attr_bool("pseudo_random"), "overlapping", 

3777 _op._get_attr_bool("overlapping"), "deterministic", 

3778 _op._get_attr_bool("deterministic"), "seed", 

3779 _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2"), 

3780 "T", _op._get_attr_type("T")) 

3781 _inputs_flat = _op.inputs 

3782 _execute.record_gradient( 

3783 "FractionalAvgPool", _inputs_flat, _attrs, _result) 

3784 _result = _FractionalAvgPoolOutput._make(_result) 

3785 return _result 

3786 

3787FractionalAvgPool = tf_export("raw_ops.FractionalAvgPool")(_ops.to_raw_op(fractional_avg_pool)) 

3788 

3789 

3790def fractional_avg_pool_eager_fallback(value, pooling_ratio, pseudo_random, overlapping, deterministic, seed, seed2, name, ctx): 

3791 if not isinstance(pooling_ratio, (list, tuple)): 

3792 raise TypeError( 

3793 "Expected list for 'pooling_ratio' argument to " 

3794 "'fractional_avg_pool' Op, not %r." % pooling_ratio) 

3795 pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio] 

3796 if pseudo_random is None: 

3797 pseudo_random = False 

3798 pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random") 

3799 if overlapping is None: 

3800 overlapping = False 

3801 overlapping = _execute.make_bool(overlapping, "overlapping") 

3802 if deterministic is None: 

3803 deterministic = False 

3804 deterministic = _execute.make_bool(deterministic, "deterministic") 

3805 if seed is None: 

3806 seed = 0 

3807 seed = _execute.make_int(seed, "seed") 

3808 if seed2 is None: 

3809 seed2 = 0 

3810 seed2 = _execute.make_int(seed2, "seed2") 

3811 _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) 

3812 _inputs_flat = [value] 

3813 _attrs = ("pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random, 

3814 "overlapping", overlapping, "deterministic", deterministic, "seed", seed, 

3815 "seed2", seed2, "T", _attr_T) 

3816 _result = _execute.execute(b"FractionalAvgPool", 3, inputs=_inputs_flat, 

3817 attrs=_attrs, ctx=ctx, name=name) 

3818 if _execute.must_record_gradient(): 

3819 _execute.record_gradient( 

3820 "FractionalAvgPool", _inputs_flat, _attrs, _result) 

3821 _result = _FractionalAvgPoolOutput._make(_result) 

3822 return _result 

3823 

3824 

3825def fractional_avg_pool_grad(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None): 

3826 r"""Computes gradient of the FractionalAvgPool function. 

3827 

3828 Unlike FractionalMaxPoolGrad, we don't need to find arg_max for 

3829 FractionalAvgPoolGrad, we just need to evenly back-propagate each element of 

3830 out_backprop to those indices that form the same pooling cell. Therefore, we 

3831 just need to know the shape of original input tensor, instead of the whole 

3832 tensor. 

3833 

3834 Args: 

3835 orig_input_tensor_shape: A `Tensor` of type `int64`. 

3836 Original input tensor shape for `fractional_avg_pool` 

3837 out_backprop: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`. 

3838 4-D with shape `[batch, height, width, channels]`. Gradients 

3839 w.r.t. the output of `fractional_avg_pool`. 

3840 row_pooling_sequence: A `Tensor` of type `int64`. 

3841 row pooling sequence, form pooling region with 

3842 col_pooling_sequence. 

3843 col_pooling_sequence: A `Tensor` of type `int64`. 

3844 column pooling sequence, form pooling region with 

3845 row_pooling sequence. 

3846 overlapping: An optional `bool`. Defaults to `False`. 

3847 When set to True, it means when pooling, the values at the boundary 

3848 of adjacent pooling cells are used by both cells. For example: 

3849 

3850 `index 0 1 2 3 4` 

3851 

3852 `value 20 5 16 3 7` 

3853 

3854 If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. 

3855 The result would be [41/3, 26/3] for fractional avg pooling. 

3856 name: A name for the operation (optional). 

3857 

3858 Returns: 

3859 A `Tensor`. Has the same type as `out_backprop`. 

3860 """ 

3861 _ctx = _context._context or _context.context() 

3862 tld = _ctx._thread_local_data 

3863 if tld.is_eager: 

3864 try: 

3865 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

3866 _ctx, "FractionalAvgPoolGrad", name, orig_input_tensor_shape, 

3867 out_backprop, row_pooling_sequence, col_pooling_sequence, 

3868 "overlapping", overlapping) 

3869 return _result 

3870 except _core._NotOkStatusException as e: 

3871 _ops.raise_from_not_ok_status(e, name) 

3872 except _core._FallbackException: 

3873 pass 

3874 try: 

3875 return fractional_avg_pool_grad_eager_fallback( 

3876 orig_input_tensor_shape, out_backprop, row_pooling_sequence, 

3877 col_pooling_sequence, overlapping=overlapping, name=name, ctx=_ctx) 

3878 except _core._SymbolicException: 

3879 pass # Add nodes to the TensorFlow graph. 

3880 # Add nodes to the TensorFlow graph. 

3881 if overlapping is None: 

3882 overlapping = False 

3883 overlapping = _execute.make_bool(overlapping, "overlapping") 

3884 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

3885 "FractionalAvgPoolGrad", orig_input_tensor_shape=orig_input_tensor_shape, 

3886 out_backprop=out_backprop, 

3887 row_pooling_sequence=row_pooling_sequence, 

3888 col_pooling_sequence=col_pooling_sequence, 

3889 overlapping=overlapping, name=name) 

3890 _result = _outputs[:] 

3891 if _execute.must_record_gradient(): 

3892 _attrs = ("overlapping", _op._get_attr_bool("overlapping"), "T", 

3893 _op._get_attr_type("T")) 

3894 _inputs_flat = _op.inputs 

3895 _execute.record_gradient( 

3896 "FractionalAvgPoolGrad", _inputs_flat, _attrs, _result) 

3897 _result, = _result 

3898 return _result 

3899 

3900FractionalAvgPoolGrad = tf_export("raw_ops.FractionalAvgPoolGrad")(_ops.to_raw_op(fractional_avg_pool_grad)) 

3901 

3902 

3903def fractional_avg_pool_grad_eager_fallback(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping, name, ctx): 

3904 if overlapping is None: 

3905 overlapping = False 

3906 overlapping = _execute.make_bool(overlapping, "overlapping") 

3907 _attr_T, (out_backprop,) = _execute.args_to_matching_eager([out_backprop], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) 

3908 orig_input_tensor_shape = _ops.convert_to_tensor(orig_input_tensor_shape, _dtypes.int64) 

3909 row_pooling_sequence = _ops.convert_to_tensor(row_pooling_sequence, _dtypes.int64) 

3910 col_pooling_sequence = _ops.convert_to_tensor(col_pooling_sequence, _dtypes.int64) 

3911 _inputs_flat = [orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence] 

3912 _attrs = ("overlapping", overlapping, "T", _attr_T) 

3913 _result = _execute.execute(b"FractionalAvgPoolGrad", 1, inputs=_inputs_flat, 

3914 attrs=_attrs, ctx=ctx, name=name) 

3915 if _execute.must_record_gradient(): 

3916 _execute.record_gradient( 

3917 "FractionalAvgPoolGrad", _inputs_flat, _attrs, _result) 

3918 _result, = _result 

3919 return _result 

3920 

3921_FractionalMaxPoolOutput = collections.namedtuple( 

3922 "FractionalMaxPool", 

3923 ["output", "row_pooling_sequence", "col_pooling_sequence"]) 

3924 

3925 

3926def fractional_max_pool(value, pooling_ratio, pseudo_random=False, overlapping=False, deterministic=False, seed=0, seed2=0, name=None): 

3927 r"""Performs fractional max pooling on the input. 

3928 

3929 Fractional max pooling is slightly different than regular max pooling. In 

3930 regular max pooling, you downsize an input set by taking the maximum value of 

3931 smaller N x N subsections of the set (often 2x2), and try to reduce the set by 

3932 a factor of N, where N is an integer. Fractional max pooling, as you might 

3933 expect from the word "fractional", means that the overall reduction ratio N 

3934 does not have to be an integer. 

3935 

3936 The sizes of the pooling regions are generated randomly but are fairly uniform. 

3937 For example, let's look at the height dimension, and the constraints on the 

3938 list of rows that will be pool boundaries. 

3939 

3940 First we define the following: 

3941 

3942 1. input_row_length : the number of rows from the input set 

3943 2. output_row_length : which will be smaller than the input 

3944 3. alpha = input_row_length / output_row_length : our reduction ratio 

3945 4. K = floor(alpha) 

3946 5. row_pooling_sequence : this is the result list of pool boundary rows 

3947 

3948 Then, row_pooling_sequence should satisfy: 

3949 

3950 1. a[0] = 0 : the first value of the sequence is 0 

3951 2. a[end] = input_row_length : the last value of the sequence is the size 

3952 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size 

3953 4. length(row_pooling_sequence) = output_row_length+1 

3954 

3955 For more details on fractional max pooling, see this paper: 

3956 [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) 

3957 

3958 Args: 

3959 value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`. 

3960 4-D with shape `[batch, height, width, channels]`. 

3961 pooling_ratio: A list of `floats` that has length `>= 4`. 

3962 Pooling ratio for each dimension of `value`, currently only 

3963 supports row and col dimension and should be >= 1.0. For example, a valid 

3964 pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements 

3965 must be 1.0 because we don't allow pooling on batch and channels 

3966 dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions 

3967 respectively. 

3968 pseudo_random: An optional `bool`. Defaults to `False`. 

3969 When set to True, generates the pooling sequence in a 

3970 pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin 

3971 Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for 

3972 difference between pseudorandom and random. 

3973 overlapping: An optional `bool`. Defaults to `False`. 

3974 When set to True, it means when pooling, the values at the boundary 

3975 of adjacent pooling cells are used by both cells. For example: 

3976 

3977 `index 0 1 2 3 4` 

3978 

3979 `value 20 5 16 3 7` 

3980 

3981 If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. 

3982 The result would be [20, 16] for fractional max pooling. 

3983 deterministic: An optional `bool`. Defaults to `False`. 

3984 When set to True, a fixed pooling region will be used when 

3985 iterating over a FractionalMaxPool node in the computation graph. Mainly used 

3986 in unit test to make FractionalMaxPool deterministic. 

3987 seed: An optional `int`. Defaults to `0`. 

3988 If either seed or seed2 are set to be non-zero, the random number 

3989 generator is seeded by the given seed. Otherwise, it is seeded by a 

3990 random seed. 

3991 seed2: An optional `int`. Defaults to `0`. 

3992 An second seed to avoid seed collision. 

3993 name: A name for the operation (optional). 

3994 

3995 Returns: 

3996 A tuple of `Tensor` objects (output, row_pooling_sequence, col_pooling_sequence). 

3997 

3998 output: A `Tensor`. Has the same type as `value`. 

3999 row_pooling_sequence: A `Tensor` of type `int64`. 

4000 col_pooling_sequence: A `Tensor` of type `int64`. 

4001 """ 

4002 _ctx = _context._context or _context.context() 

4003 tld = _ctx._thread_local_data 

4004 if tld.is_eager: 

4005 try: 

4006 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4007 _ctx, "FractionalMaxPool", name, value, "pooling_ratio", 

4008 pooling_ratio, "pseudo_random", pseudo_random, "overlapping", 

4009 overlapping, "deterministic", deterministic, "seed", seed, "seed2", 

4010 seed2) 

4011 _result = _FractionalMaxPoolOutput._make(_result) 

4012 return _result 

4013 except _core._NotOkStatusException as e: 

4014 _ops.raise_from_not_ok_status(e, name) 

4015 except _core._FallbackException: 

4016 pass 

4017 try: 

4018 return fractional_max_pool_eager_fallback( 

4019 value, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, 

4020 overlapping=overlapping, deterministic=deterministic, seed=seed, 

4021 seed2=seed2, name=name, ctx=_ctx) 

4022 except _core._SymbolicException: 

4023 pass # Add nodes to the TensorFlow graph. 

4024 # Add nodes to the TensorFlow graph. 

4025 if not isinstance(pooling_ratio, (list, tuple)): 

4026 raise TypeError( 

4027 "Expected list for 'pooling_ratio' argument to " 

4028 "'fractional_max_pool' Op, not %r." % pooling_ratio) 

4029 pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio] 

4030 if pseudo_random is None: 

4031 pseudo_random = False 

4032 pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random") 

4033 if overlapping is None: 

4034 overlapping = False 

4035 overlapping = _execute.make_bool(overlapping, "overlapping") 

4036 if deterministic is None: 

4037 deterministic = False 

4038 deterministic = _execute.make_bool(deterministic, "deterministic") 

4039 if seed is None: 

4040 seed = 0 

4041 seed = _execute.make_int(seed, "seed") 

4042 if seed2 is None: 

4043 seed2 = 0 

4044 seed2 = _execute.make_int(seed2, "seed2") 

4045 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4046 "FractionalMaxPool", value=value, pooling_ratio=pooling_ratio, 

4047 pseudo_random=pseudo_random, 

4048 overlapping=overlapping, 

4049 deterministic=deterministic, seed=seed, 

4050 seed2=seed2, name=name) 

4051 _result = _outputs[:] 

4052 if _execute.must_record_gradient(): 

4053 _attrs = ("pooling_ratio", _op.get_attr("pooling_ratio"), "pseudo_random", 

4054 _op._get_attr_bool("pseudo_random"), "overlapping", 

4055 _op._get_attr_bool("overlapping"), "deterministic", 

4056 _op._get_attr_bool("deterministic"), "seed", 

4057 _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2"), 

4058 "T", _op._get_attr_type("T")) 

4059 _inputs_flat = _op.inputs 

4060 _execute.record_gradient( 

4061 "FractionalMaxPool", _inputs_flat, _attrs, _result) 

4062 _result = _FractionalMaxPoolOutput._make(_result) 

4063 return _result 

4064 

4065FractionalMaxPool = tf_export("raw_ops.FractionalMaxPool")(_ops.to_raw_op(fractional_max_pool)) 

4066 

4067 

4068def fractional_max_pool_eager_fallback(value, pooling_ratio, pseudo_random, overlapping, deterministic, seed, seed2, name, ctx): 

4069 if not isinstance(pooling_ratio, (list, tuple)): 

4070 raise TypeError( 

4071 "Expected list for 'pooling_ratio' argument to " 

4072 "'fractional_max_pool' Op, not %r." % pooling_ratio) 

4073 pooling_ratio = [_execute.make_float(_f, "pooling_ratio") for _f in pooling_ratio] 

4074 if pseudo_random is None: 

4075 pseudo_random = False 

4076 pseudo_random = _execute.make_bool(pseudo_random, "pseudo_random") 

4077 if overlapping is None: 

4078 overlapping = False 

4079 overlapping = _execute.make_bool(overlapping, "overlapping") 

4080 if deterministic is None: 

4081 deterministic = False 

4082 deterministic = _execute.make_bool(deterministic, "deterministic") 

4083 if seed is None: 

4084 seed = 0 

4085 seed = _execute.make_int(seed, "seed") 

4086 if seed2 is None: 

4087 seed2 = 0 

4088 seed2 = _execute.make_int(seed2, "seed2") 

4089 _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) 

4090 _inputs_flat = [value] 

4091 _attrs = ("pooling_ratio", pooling_ratio, "pseudo_random", pseudo_random, 

4092 "overlapping", overlapping, "deterministic", deterministic, "seed", seed, 

4093 "seed2", seed2, "T", _attr_T) 

4094 _result = _execute.execute(b"FractionalMaxPool", 3, inputs=_inputs_flat, 

4095 attrs=_attrs, ctx=ctx, name=name) 

4096 if _execute.must_record_gradient(): 

4097 _execute.record_gradient( 

4098 "FractionalMaxPool", _inputs_flat, _attrs, _result) 

4099 _result = _FractionalMaxPoolOutput._make(_result) 

4100 return _result 

4101 

4102 

4103def fractional_max_pool_grad(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping=False, name=None): 

4104 r"""Computes gradient of the FractionalMaxPool function. 

4105 

4106 Args: 

4107 orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`. 

4108 Original input for `fractional_max_pool` 

4109 orig_output: A `Tensor`. Must have the same type as `orig_input`. 

4110 Original output for `fractional_max_pool` 

4111 out_backprop: A `Tensor`. Must have the same type as `orig_input`. 

4112 4-D with shape `[batch, height, width, channels]`. Gradients 

4113 w.r.t. the output of `fractional_max_pool`. 

4114 row_pooling_sequence: A `Tensor` of type `int64`. 

4115 row pooling sequence, form pooling region with 

4116 col_pooling_sequence. 

4117 col_pooling_sequence: A `Tensor` of type `int64`. 

4118 column pooling sequence, form pooling region with 

4119 row_pooling sequence. 

4120 overlapping: An optional `bool`. Defaults to `False`. 

4121 When set to True, it means when pooling, the values at the boundary 

4122 of adjacent pooling cells are used by both cells. For example: 

4123 

4124 `index 0 1 2 3 4` 

4125 

4126 `value 20 5 16 3 7` 

4127 

4128 If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. 

4129 The result would be [20, 16] for fractional max pooling. 

4130 name: A name for the operation (optional). 

4131 

4132 Returns: 

4133 A `Tensor`. Has the same type as `orig_input`. 

4134 """ 

4135 _ctx = _context._context or _context.context() 

4136 tld = _ctx._thread_local_data 

4137 if tld.is_eager: 

4138 try: 

4139 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4140 _ctx, "FractionalMaxPoolGrad", name, orig_input, orig_output, 

4141 out_backprop, row_pooling_sequence, col_pooling_sequence, 

4142 "overlapping", overlapping) 

4143 return _result 

4144 except _core._NotOkStatusException as e: 

4145 _ops.raise_from_not_ok_status(e, name) 

4146 except _core._FallbackException: 

4147 pass 

4148 try: 

4149 return fractional_max_pool_grad_eager_fallback( 

4150 orig_input, orig_output, out_backprop, row_pooling_sequence, 

4151 col_pooling_sequence, overlapping=overlapping, name=name, ctx=_ctx) 

4152 except _core._SymbolicException: 

4153 pass # Add nodes to the TensorFlow graph. 

4154 # Add nodes to the TensorFlow graph. 

4155 if overlapping is None: 

4156 overlapping = False 

4157 overlapping = _execute.make_bool(overlapping, "overlapping") 

4158 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4159 "FractionalMaxPoolGrad", orig_input=orig_input, 

4160 orig_output=orig_output, 

4161 out_backprop=out_backprop, 

4162 row_pooling_sequence=row_pooling_sequence, 

4163 col_pooling_sequence=col_pooling_sequence, 

4164 overlapping=overlapping, name=name) 

4165 _result = _outputs[:] 

4166 if _execute.must_record_gradient(): 

4167 _attrs = ("overlapping", _op._get_attr_bool("overlapping"), "T", 

4168 _op._get_attr_type("T")) 

4169 _inputs_flat = _op.inputs 

4170 _execute.record_gradient( 

4171 "FractionalMaxPoolGrad", _inputs_flat, _attrs, _result) 

4172 _result, = _result 

4173 return _result 

4174 

4175FractionalMaxPoolGrad = tf_export("raw_ops.FractionalMaxPoolGrad")(_ops.to_raw_op(fractional_max_pool_grad)) 

4176 

4177 

4178def fractional_max_pool_grad_eager_fallback(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence, overlapping, name, ctx): 

4179 if overlapping is None: 

4180 overlapping = False 

4181 overlapping = _execute.make_bool(overlapping, "overlapping") 

4182 _attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, out_backprop], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, ]) 

4183 (orig_input, orig_output, out_backprop) = _inputs_T 

4184 row_pooling_sequence = _ops.convert_to_tensor(row_pooling_sequence, _dtypes.int64) 

4185 col_pooling_sequence = _ops.convert_to_tensor(col_pooling_sequence, _dtypes.int64) 

4186 _inputs_flat = [orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence] 

4187 _attrs = ("overlapping", overlapping, "T", _attr_T) 

4188 _result = _execute.execute(b"FractionalMaxPoolGrad", 1, inputs=_inputs_flat, 

4189 attrs=_attrs, ctx=ctx, name=name) 

4190 if _execute.must_record_gradient(): 

4191 _execute.record_gradient( 

4192 "FractionalMaxPoolGrad", _inputs_flat, _attrs, _result) 

4193 _result, = _result 

4194 return _result 

4195 

4196_FusedBatchNormOutput = collections.namedtuple( 

4197 "FusedBatchNorm", 

4198 ["y", "batch_mean", "batch_variance", "reserve_space_1", "reserve_space_2"]) 

4199 

4200 

4201def _fused_batch_norm(x, scale, offset, mean, variance, epsilon=0.0001, exponential_avg_factor=1, data_format="NHWC", is_training=True, name=None): 

4202 r"""Batch normalization. 

4203 

4204 Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". 

4205 The size of 1D Tensors matches the dimension C of the 4D Tensors. 

4206 

4207 Args: 

4208 x: A `Tensor`. Must be one of the following types: `float32`. 

4209 A 4D Tensor for input data. 

4210 scale: A `Tensor`. Must have the same type as `x`. 

4211 A 1D Tensor for scaling factor, to scale the normalized x. 

4212 offset: A `Tensor`. Must have the same type as `x`. 

4213 A 1D Tensor for offset, to shift to the normalized x. 

4214 mean: A `Tensor`. Must have the same type as `x`. 

4215 A 1D Tensor for population mean. Used for inference only; 

4216 must be empty for training. 

4217 variance: A `Tensor`. Must have the same type as `x`. 

4218 A 1D Tensor for population variance. Used for inference only; 

4219 must be empty for training. 

4220 epsilon: An optional `float`. Defaults to `0.0001`. 

4221 A small float number added to the variance of x. 

4222 exponential_avg_factor: An optional `float`. Defaults to `1`. 

4223 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

4224 The data format for x and y. Either "NHWC" (default) or "NCHW". 

4225 is_training: An optional `bool`. Defaults to `True`. 

4226 A bool value to indicate the operation is for training (default) 

4227 or inference. 

4228 name: A name for the operation (optional). 

4229 

4230 Returns: 

4231 A tuple of `Tensor` objects (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2). 

4232 

4233 y: A `Tensor`. Has the same type as `x`. 

4234 batch_mean: A `Tensor`. Has the same type as `x`. 

4235 batch_variance: A `Tensor`. Has the same type as `x`. 

4236 reserve_space_1: A `Tensor`. Has the same type as `x`. 

4237 reserve_space_2: A `Tensor`. Has the same type as `x`. 

4238 """ 

4239 _ctx = _context._context or _context.context() 

4240 tld = _ctx._thread_local_data 

4241 if tld.is_eager: 

4242 try: 

4243 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4244 _ctx, "FusedBatchNorm", name, x, scale, offset, mean, variance, 

4245 "epsilon", epsilon, "exponential_avg_factor", exponential_avg_factor, 

4246 "data_format", data_format, "is_training", is_training) 

4247 _result = _FusedBatchNormOutput._make(_result) 

4248 return _result 

4249 except _core._NotOkStatusException as e: 

4250 _ops.raise_from_not_ok_status(e, name) 

4251 except _core._FallbackException: 

4252 pass 

4253 try: 

4254 return _fused_batch_norm_eager_fallback( 

4255 x, scale, offset, mean, variance, epsilon=epsilon, 

4256 exponential_avg_factor=exponential_avg_factor, 

4257 data_format=data_format, is_training=is_training, name=name, 

4258 ctx=_ctx) 

4259 except _core._SymbolicException: 

4260 pass # Add nodes to the TensorFlow graph. 

4261 # Add nodes to the TensorFlow graph. 

4262 if epsilon is None: 

4263 epsilon = 0.0001 

4264 epsilon = _execute.make_float(epsilon, "epsilon") 

4265 if exponential_avg_factor is None: 

4266 exponential_avg_factor = 1 

4267 exponential_avg_factor = _execute.make_float(exponential_avg_factor, "exponential_avg_factor") 

4268 if data_format is None: 

4269 data_format = "NHWC" 

4270 data_format = _execute.make_str(data_format, "data_format") 

4271 if is_training is None: 

4272 is_training = True 

4273 is_training = _execute.make_bool(is_training, "is_training") 

4274 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4275 "FusedBatchNorm", x=x, scale=scale, offset=offset, mean=mean, 

4276 variance=variance, epsilon=epsilon, 

4277 exponential_avg_factor=exponential_avg_factor, 

4278 data_format=data_format, is_training=is_training, 

4279 name=name) 

4280 _result = _outputs[:] 

4281 if _execute.must_record_gradient(): 

4282 _attrs = ("T", _op._get_attr_type("T"), "epsilon", 

4283 _op.get_attr("epsilon"), "exponential_avg_factor", 

4284 _op.get_attr("exponential_avg_factor"), "data_format", 

4285 _op.get_attr("data_format"), "is_training", 

4286 _op._get_attr_bool("is_training")) 

4287 _inputs_flat = _op.inputs 

4288 _execute.record_gradient( 

4289 "FusedBatchNorm", _inputs_flat, _attrs, _result) 

4290 _result = _FusedBatchNormOutput._make(_result) 

4291 return _result 

4292 

4293FusedBatchNorm = tf_export("raw_ops.FusedBatchNorm")(_ops.to_raw_op(_fused_batch_norm)) 

4294 

4295 

4296def _fused_batch_norm_eager_fallback(x, scale, offset, mean, variance, epsilon, exponential_avg_factor, data_format, is_training, name, ctx): 

4297 if epsilon is None: 

4298 epsilon = 0.0001 

4299 epsilon = _execute.make_float(epsilon, "epsilon") 

4300 if exponential_avg_factor is None: 

4301 exponential_avg_factor = 1 

4302 exponential_avg_factor = _execute.make_float(exponential_avg_factor, "exponential_avg_factor") 

4303 if data_format is None: 

4304 data_format = "NHWC" 

4305 data_format = _execute.make_str(data_format, "data_format") 

4306 if is_training is None: 

4307 is_training = True 

4308 is_training = _execute.make_bool(is_training, "is_training") 

4309 _attr_T, _inputs_T = _execute.args_to_matching_eager([x, scale, offset, mean, variance], ctx, [_dtypes.float32, ]) 

4310 (x, scale, offset, mean, variance) = _inputs_T 

4311 _inputs_flat = [x, scale, offset, mean, variance] 

4312 _attrs = ("T", _attr_T, "epsilon", epsilon, "exponential_avg_factor", 

4313 exponential_avg_factor, "data_format", data_format, "is_training", 

4314 is_training) 

4315 _result = _execute.execute(b"FusedBatchNorm", 5, inputs=_inputs_flat, 

4316 attrs=_attrs, ctx=ctx, name=name) 

4317 if _execute.must_record_gradient(): 

4318 _execute.record_gradient( 

4319 "FusedBatchNorm", _inputs_flat, _attrs, _result) 

4320 _result = _FusedBatchNormOutput._make(_result) 

4321 return _result 

4322 

4323_FusedBatchNormGradOutput = collections.namedtuple( 

4324 "FusedBatchNormGrad", 

4325 ["x_backprop", "scale_backprop", "offset_backprop", "reserve_space_3", "reserve_space_4"]) 

4326 

4327 

4328def fused_batch_norm_grad(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None): 

4329 r"""Gradient for batch normalization. 

4330 

4331 Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". 

4332 The size of 1D Tensors matches the dimension C of the 4D Tensors. 

4333 

4334 Args: 

4335 y_backprop: A `Tensor`. Must be one of the following types: `float32`. 

4336 A 4D Tensor for the gradient with respect to y. 

4337 x: A `Tensor`. Must have the same type as `y_backprop`. 

4338 A 4D Tensor for input data. 

4339 scale: A `Tensor`. Must have the same type as `y_backprop`. 

4340 A 1D Tensor for scaling factor, to scale the normalized x. 

4341 reserve_space_1: A `Tensor`. Must have the same type as `y_backprop`. 

4342 When is_training is True, a 1D Tensor for the computed batch 

4343 mean to be reused in gradient computation. When is_training is 

4344 False, a 1D Tensor for the population mean to be reused in both 

4345 1st and 2nd order gradient computation. 

4346 reserve_space_2: A `Tensor`. Must have the same type as `y_backprop`. 

4347 When is_training is True, a 1D Tensor for the computed batch 

4348 variance (inverted variance in the cuDNN case) to be reused in 

4349 gradient computation. When is_training is False, a 1D Tensor 

4350 for the population variance to be reused in both 1st and 2nd 

4351 order gradient computation. 

4352 epsilon: An optional `float`. Defaults to `0.0001`. 

4353 A small float number added to the variance of x. 

4354 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

4355 The data format for y_backprop, x, x_backprop. 

4356 Either "NHWC" (default) or "NCHW". 

4357 is_training: An optional `bool`. Defaults to `True`. 

4358 A bool value to indicate the operation is for training (default) 

4359 or inference. 

4360 name: A name for the operation (optional). 

4361 

4362 Returns: 

4363 A tuple of `Tensor` objects (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4). 

4364 

4365 x_backprop: A `Tensor`. Has the same type as `y_backprop`. 

4366 scale_backprop: A `Tensor`. Has the same type as `y_backprop`. 

4367 offset_backprop: A `Tensor`. Has the same type as `y_backprop`. 

4368 reserve_space_3: A `Tensor`. Has the same type as `y_backprop`. 

4369 reserve_space_4: A `Tensor`. Has the same type as `y_backprop`. 

4370 """ 

4371 _ctx = _context._context or _context.context() 

4372 tld = _ctx._thread_local_data 

4373 if tld.is_eager: 

4374 try: 

4375 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4376 _ctx, "FusedBatchNormGrad", name, y_backprop, x, scale, 

4377 reserve_space_1, reserve_space_2, "epsilon", epsilon, "data_format", 

4378 data_format, "is_training", is_training) 

4379 _result = _FusedBatchNormGradOutput._make(_result) 

4380 return _result 

4381 except _core._NotOkStatusException as e: 

4382 _ops.raise_from_not_ok_status(e, name) 

4383 except _core._FallbackException: 

4384 pass 

4385 try: 

4386 return fused_batch_norm_grad_eager_fallback( 

4387 y_backprop, x, scale, reserve_space_1, reserve_space_2, 

4388 epsilon=epsilon, data_format=data_format, is_training=is_training, 

4389 name=name, ctx=_ctx) 

4390 except _core._SymbolicException: 

4391 pass # Add nodes to the TensorFlow graph. 

4392 # Add nodes to the TensorFlow graph. 

4393 if epsilon is None: 

4394 epsilon = 0.0001 

4395 epsilon = _execute.make_float(epsilon, "epsilon") 

4396 if data_format is None: 

4397 data_format = "NHWC" 

4398 data_format = _execute.make_str(data_format, "data_format") 

4399 if is_training is None: 

4400 is_training = True 

4401 is_training = _execute.make_bool(is_training, "is_training") 

4402 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4403 "FusedBatchNormGrad", y_backprop=y_backprop, x=x, scale=scale, 

4404 reserve_space_1=reserve_space_1, 

4405 reserve_space_2=reserve_space_2, 

4406 epsilon=epsilon, data_format=data_format, 

4407 is_training=is_training, name=name) 

4408 _result = _outputs[:] 

4409 if _execute.must_record_gradient(): 

4410 _attrs = ("T", _op._get_attr_type("T"), "epsilon", 

4411 _op.get_attr("epsilon"), "data_format", 

4412 _op.get_attr("data_format"), "is_training", 

4413 _op._get_attr_bool("is_training")) 

4414 _inputs_flat = _op.inputs 

4415 _execute.record_gradient( 

4416 "FusedBatchNormGrad", _inputs_flat, _attrs, _result) 

4417 _result = _FusedBatchNormGradOutput._make(_result) 

4418 return _result 

4419 

4420FusedBatchNormGrad = tf_export("raw_ops.FusedBatchNormGrad")(_ops.to_raw_op(fused_batch_norm_grad)) 

4421 

4422 

4423def fused_batch_norm_grad_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon, data_format, is_training, name, ctx): 

4424 if epsilon is None: 

4425 epsilon = 0.0001 

4426 epsilon = _execute.make_float(epsilon, "epsilon") 

4427 if data_format is None: 

4428 data_format = "NHWC" 

4429 data_format = _execute.make_str(data_format, "data_format") 

4430 if is_training is None: 

4431 is_training = True 

4432 is_training = _execute.make_bool(is_training, "is_training") 

4433 _attr_T, _inputs_T = _execute.args_to_matching_eager([y_backprop, x, scale, reserve_space_1, reserve_space_2], ctx, [_dtypes.float32, ]) 

4434 (y_backprop, x, scale, reserve_space_1, reserve_space_2) = _inputs_T 

4435 _inputs_flat = [y_backprop, x, scale, reserve_space_1, reserve_space_2] 

4436 _attrs = ("T", _attr_T, "epsilon", epsilon, "data_format", data_format, 

4437 "is_training", is_training) 

4438 _result = _execute.execute(b"FusedBatchNormGrad", 5, inputs=_inputs_flat, 

4439 attrs=_attrs, ctx=ctx, name=name) 

4440 if _execute.must_record_gradient(): 

4441 _execute.record_gradient( 

4442 "FusedBatchNormGrad", _inputs_flat, _attrs, _result) 

4443 _result = _FusedBatchNormGradOutput._make(_result) 

4444 return _result 

4445 

4446_FusedBatchNormGradV2Output = collections.namedtuple( 

4447 "FusedBatchNormGradV2", 

4448 ["x_backprop", "scale_backprop", "offset_backprop", "reserve_space_3", "reserve_space_4"]) 

4449 

4450 

4451def fused_batch_norm_grad_v2(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon=0.0001, data_format="NHWC", is_training=True, name=None): 

4452 r"""Gradient for batch normalization. 

4453 

4454 Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". 

4455 The size of 1D Tensors matches the dimension C of the 4D Tensors. 

4456 

4457 Args: 

4458 y_backprop: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`. 

4459 A 4D Tensor for the gradient with respect to y. 

4460 x: A `Tensor`. Must have the same type as `y_backprop`. 

4461 A 4D Tensor for input data. 

4462 scale: A `Tensor` of type `float32`. 

4463 A 1D Tensor for scaling factor, to scale the normalized x. 

4464 reserve_space_1: A `Tensor`. Must be one of the following types: `float32`. 

4465 When is_training is True, a 1D Tensor for the computed batch 

4466 mean to be reused in gradient computation. When is_training is 

4467 False, a 1D Tensor for the population mean to be reused in both 

4468 1st and 2nd order gradient computation. 

4469 reserve_space_2: A `Tensor`. Must have the same type as `reserve_space_1`. 

4470 When is_training is True, a 1D Tensor for the computed batch 

4471 variance (inverted variance in the cuDNN case) to be reused in 

4472 gradient computation. When is_training is False, a 1D Tensor 

4473 for the population variance to be reused in both 1st and 2nd 

4474 order gradient computation. 

4475 epsilon: An optional `float`. Defaults to `0.0001`. 

4476 A small float number added to the variance of x. 

4477 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

4478 The data format for y_backprop, x, x_backprop. 

4479 Either "NHWC" (default) or "NCHW". 

4480 is_training: An optional `bool`. Defaults to `True`. 

4481 A bool value to indicate the operation is for training (default) 

4482 or inference. 

4483 name: A name for the operation (optional). 

4484 

4485 Returns: 

4486 A tuple of `Tensor` objects (x_backprop, scale_backprop, offset_backprop, reserve_space_3, reserve_space_4). 

4487 

4488 x_backprop: A `Tensor`. Has the same type as `y_backprop`. 

4489 scale_backprop: A `Tensor`. Has the same type as `reserve_space_1`. 

4490 offset_backprop: A `Tensor`. Has the same type as `reserve_space_1`. 

4491 reserve_space_3: A `Tensor`. Has the same type as `reserve_space_1`. 

4492 reserve_space_4: A `Tensor`. Has the same type as `reserve_space_1`. 

4493 """ 

4494 _ctx = _context._context or _context.context() 

4495 tld = _ctx._thread_local_data 

4496 if tld.is_eager: 

4497 try: 

4498 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4499 _ctx, "FusedBatchNormGradV2", name, y_backprop, x, scale, 

4500 reserve_space_1, reserve_space_2, "epsilon", epsilon, "data_format", 

4501 data_format, "is_training", is_training) 

4502 _result = _FusedBatchNormGradV2Output._make(_result) 

4503 return _result 

4504 except _core._NotOkStatusException as e: 

4505 _ops.raise_from_not_ok_status(e, name) 

4506 except _core._FallbackException: 

4507 pass 

4508 try: 

4509 return fused_batch_norm_grad_v2_eager_fallback( 

4510 y_backprop, x, scale, reserve_space_1, reserve_space_2, 

4511 epsilon=epsilon, data_format=data_format, is_training=is_training, 

4512 name=name, ctx=_ctx) 

4513 except _core._SymbolicException: 

4514 pass # Add nodes to the TensorFlow graph. 

4515 # Add nodes to the TensorFlow graph. 

4516 if epsilon is None: 

4517 epsilon = 0.0001 

4518 epsilon = _execute.make_float(epsilon, "epsilon") 

4519 if data_format is None: 

4520 data_format = "NHWC" 

4521 data_format = _execute.make_str(data_format, "data_format") 

4522 if is_training is None: 

4523 is_training = True 

4524 is_training = _execute.make_bool(is_training, "is_training") 

4525 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4526 "FusedBatchNormGradV2", y_backprop=y_backprop, x=x, scale=scale, 

4527 reserve_space_1=reserve_space_1, 

4528 reserve_space_2=reserve_space_2, 

4529 epsilon=epsilon, data_format=data_format, 

4530 is_training=is_training, name=name) 

4531 _result = _outputs[:] 

4532 if _execute.must_record_gradient(): 

4533 _attrs = ("T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"), 

4534 "epsilon", _op.get_attr("epsilon"), "data_format", 

4535 _op.get_attr("data_format"), "is_training", 

4536 _op._get_attr_bool("is_training")) 

4537 _inputs_flat = _op.inputs 

4538 _execute.record_gradient( 

4539 "FusedBatchNormGradV2", _inputs_flat, _attrs, _result) 

4540 _result = _FusedBatchNormGradV2Output._make(_result) 

4541 return _result 

4542 

4543FusedBatchNormGradV2 = tf_export("raw_ops.FusedBatchNormGradV2")(_ops.to_raw_op(fused_batch_norm_grad_v2)) 

4544 

4545 

4546def fused_batch_norm_grad_v2_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, epsilon, data_format, is_training, name, ctx): 

4547 if epsilon is None: 

4548 epsilon = 0.0001 

4549 epsilon = _execute.make_float(epsilon, "epsilon") 

4550 if data_format is None: 

4551 data_format = "NHWC" 

4552 data_format = _execute.make_str(data_format, "data_format") 

4553 if is_training is None: 

4554 is_training = True 

4555 is_training = _execute.make_bool(is_training, "is_training") 

4556 _attr_T, _inputs_T = _execute.args_to_matching_eager([y_backprop, x], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ]) 

4557 (y_backprop, x) = _inputs_T 

4558 _attr_U, _inputs_U = _execute.args_to_matching_eager([reserve_space_1, reserve_space_2], ctx, [_dtypes.float32, ]) 

4559 (reserve_space_1, reserve_space_2) = _inputs_U 

4560 scale = _ops.convert_to_tensor(scale, _dtypes.float32) 

4561 _inputs_flat = [y_backprop, x, scale, reserve_space_1, reserve_space_2] 

4562 _attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon, "data_format", 

4563 data_format, "is_training", is_training) 

4564 _result = _execute.execute(b"FusedBatchNormGradV2", 5, inputs=_inputs_flat, 

4565 attrs=_attrs, ctx=ctx, name=name) 

4566 if _execute.must_record_gradient(): 

4567 _execute.record_gradient( 

4568 "FusedBatchNormGradV2", _inputs_flat, _attrs, _result) 

4569 _result = _FusedBatchNormGradV2Output._make(_result) 

4570 return _result 

4571 

4572_FusedBatchNormGradV3Output = collections.namedtuple( 

4573 "FusedBatchNormGradV3", 

4574 ["x_backprop", "scale_backprop", "offset_backprop", "reserve_space_4", "reserve_space_5"]) 

4575 

4576 

4577def fused_batch_norm_grad_v3(y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3, epsilon=0.0001, data_format="NHWC", is_training=True, name=None): 

4578 r"""Gradient for batch normalization. 

4579 

4580 Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". 

4581 The size of 1D Tensors matches the dimension C of the 4D Tensors. 

4582 

4583 Args: 

4584 y_backprop: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`. 

4585 A 4D Tensor for the gradient with respect to y. 

4586 x: A `Tensor`. Must have the same type as `y_backprop`. 

4587 A 4D Tensor for input data. 

4588 scale: A `Tensor` of type `float32`. 

4589 A 1D Tensor for scaling factor, to scale the normalized x. 

4590 reserve_space_1: A `Tensor`. Must be one of the following types: `float32`. 

4591 When is_training is True, a 1D Tensor for the computed batch 

4592 mean to be reused in gradient computation. When is_training is 

4593 False, a 1D Tensor for the population mean to be reused in both 

4594 1st and 2nd order gradient computation. 

4595 reserve_space_2: A `Tensor`. Must have the same type as `reserve_space_1`. 

4596 When is_training is True, a 1D Tensor for the computed batch 

4597 variance (inverted variance in the cuDNN case) to be reused in 

4598 gradient computation. When is_training is False, a 1D Tensor 

4599 for the population variance to be reused in both 1st and 2nd 

4600 order gradient computation. 

4601 reserve_space_3: A `Tensor`. Must have the same type as `reserve_space_1`. 

4602 When is_training is True, a 1D Tensor for some intermediate results to be reused 

4603 in gradient computation. When is_training is False, a dummy empty Tensor will be 

4604 created. 

4605 epsilon: An optional `float`. Defaults to `0.0001`. 

4606 A small float number added to the variance of x. 

4607 data_format: An optional `string` from: `"NHWC", "NCHW", "NDHWC", "NCDHW"`. Defaults to `"NHWC"`. 

4608 The data format for y_backprop, x, x_backprop. 

4609 Either "NHWC" (default) or "NCHW". 

4610 is_training: An optional `bool`. Defaults to `True`. 

4611 A bool value to indicate the operation is for training (default) 

4612 or inference. 

4613 name: A name for the operation (optional). 

4614 

4615 Returns: 

4616 A tuple of `Tensor` objects (x_backprop, scale_backprop, offset_backprop, reserve_space_4, reserve_space_5). 

4617 

4618 x_backprop: A `Tensor`. Has the same type as `y_backprop`. 

4619 scale_backprop: A `Tensor`. Has the same type as `reserve_space_1`. 

4620 offset_backprop: A `Tensor`. Has the same type as `reserve_space_1`. 

4621 reserve_space_4: A `Tensor`. Has the same type as `reserve_space_1`. 

4622 reserve_space_5: A `Tensor`. Has the same type as `reserve_space_1`. 

4623 """ 

4624 _ctx = _context._context or _context.context() 

4625 tld = _ctx._thread_local_data 

4626 if tld.is_eager: 

4627 try: 

4628 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4629 _ctx, "FusedBatchNormGradV3", name, y_backprop, x, scale, 

4630 reserve_space_1, reserve_space_2, reserve_space_3, "epsilon", epsilon, 

4631 "data_format", data_format, "is_training", is_training) 

4632 _result = _FusedBatchNormGradV3Output._make(_result) 

4633 return _result 

4634 except _core._NotOkStatusException as e: 

4635 _ops.raise_from_not_ok_status(e, name) 

4636 except _core._FallbackException: 

4637 pass 

4638 try: 

4639 return fused_batch_norm_grad_v3_eager_fallback( 

4640 y_backprop, x, scale, reserve_space_1, reserve_space_2, 

4641 reserve_space_3, epsilon=epsilon, data_format=data_format, 

4642 is_training=is_training, name=name, ctx=_ctx) 

4643 except _core._SymbolicException: 

4644 pass # Add nodes to the TensorFlow graph. 

4645 # Add nodes to the TensorFlow graph. 

4646 if epsilon is None: 

4647 epsilon = 0.0001 

4648 epsilon = _execute.make_float(epsilon, "epsilon") 

4649 if data_format is None: 

4650 data_format = "NHWC" 

4651 data_format = _execute.make_str(data_format, "data_format") 

4652 if is_training is None: 

4653 is_training = True 

4654 is_training = _execute.make_bool(is_training, "is_training") 

4655 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4656 "FusedBatchNormGradV3", y_backprop=y_backprop, x=x, scale=scale, 

4657 reserve_space_1=reserve_space_1, 

4658 reserve_space_2=reserve_space_2, 

4659 reserve_space_3=reserve_space_3, 

4660 epsilon=epsilon, data_format=data_format, 

4661 is_training=is_training, name=name) 

4662 _result = _outputs[:] 

4663 if _execute.must_record_gradient(): 

4664 _attrs = ("T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"), 

4665 "epsilon", _op.get_attr("epsilon"), "data_format", 

4666 _op.get_attr("data_format"), "is_training", 

4667 _op._get_attr_bool("is_training")) 

4668 _inputs_flat = _op.inputs 

4669 _execute.record_gradient( 

4670 "FusedBatchNormGradV3", _inputs_flat, _attrs, _result) 

4671 _result = _FusedBatchNormGradV3Output._make(_result) 

4672 return _result 

4673 

4674FusedBatchNormGradV3 = tf_export("raw_ops.FusedBatchNormGradV3")(_ops.to_raw_op(fused_batch_norm_grad_v3)) 

4675 

4676 

4677def fused_batch_norm_grad_v3_eager_fallback(y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3, epsilon, data_format, is_training, name, ctx): 

4678 if epsilon is None: 

4679 epsilon = 0.0001 

4680 epsilon = _execute.make_float(epsilon, "epsilon") 

4681 if data_format is None: 

4682 data_format = "NHWC" 

4683 data_format = _execute.make_str(data_format, "data_format") 

4684 if is_training is None: 

4685 is_training = True 

4686 is_training = _execute.make_bool(is_training, "is_training") 

4687 _attr_T, _inputs_T = _execute.args_to_matching_eager([y_backprop, x], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ]) 

4688 (y_backprop, x) = _inputs_T 

4689 _attr_U, _inputs_U = _execute.args_to_matching_eager([reserve_space_1, reserve_space_2, reserve_space_3], ctx, [_dtypes.float32, ]) 

4690 (reserve_space_1, reserve_space_2, reserve_space_3) = _inputs_U 

4691 scale = _ops.convert_to_tensor(scale, _dtypes.float32) 

4692 _inputs_flat = [y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3] 

4693 _attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon, "data_format", 

4694 data_format, "is_training", is_training) 

4695 _result = _execute.execute(b"FusedBatchNormGradV3", 5, inputs=_inputs_flat, 

4696 attrs=_attrs, ctx=ctx, name=name) 

4697 if _execute.must_record_gradient(): 

4698 _execute.record_gradient( 

4699 "FusedBatchNormGradV3", _inputs_flat, _attrs, _result) 

4700 _result = _FusedBatchNormGradV3Output._make(_result) 

4701 return _result 

4702 

4703_FusedBatchNormV2Output = collections.namedtuple( 

4704 "FusedBatchNormV2", 

4705 ["y", "batch_mean", "batch_variance", "reserve_space_1", "reserve_space_2"]) 

4706 

4707 

4708def fused_batch_norm_v2(x, scale, offset, mean, variance, epsilon=0.0001, exponential_avg_factor=1, data_format="NHWC", is_training=True, name=None): 

4709 r"""Batch normalization. 

4710 

4711 Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". 

4712 The size of 1D Tensors matches the dimension C of the 4D Tensors. 

4713 

4714 Args: 

4715 x: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`. 

4716 A 4D Tensor for input data. 

4717 scale: A `Tensor`. Must be one of the following types: `float32`. 

4718 A 1D Tensor for scaling factor, to scale the normalized x. 

4719 offset: A `Tensor`. Must have the same type as `scale`. 

4720 A 1D Tensor for offset, to shift to the normalized x. 

4721 mean: A `Tensor`. Must have the same type as `scale`. 

4722 A 1D Tensor for population mean. Used for inference only; 

4723 must be empty for training. 

4724 variance: A `Tensor`. Must have the same type as `scale`. 

4725 A 1D Tensor for population variance. Used for inference only; 

4726 must be empty for training. 

4727 epsilon: An optional `float`. Defaults to `0.0001`. 

4728 A small float number added to the variance of x. 

4729 exponential_avg_factor: An optional `float`. Defaults to `1`. 

4730 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

4731 The data format for x and y. Either "NHWC" (default) or "NCHW". 

4732 is_training: An optional `bool`. Defaults to `True`. 

4733 A bool value to indicate the operation is for training (default) 

4734 or inference. 

4735 name: A name for the operation (optional). 

4736 

4737 Returns: 

4738 A tuple of `Tensor` objects (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2). 

4739 

4740 y: A `Tensor`. Has the same type as `x`. 

4741 batch_mean: A `Tensor`. Has the same type as `scale`. 

4742 batch_variance: A `Tensor`. Has the same type as `scale`. 

4743 reserve_space_1: A `Tensor`. Has the same type as `scale`. 

4744 reserve_space_2: A `Tensor`. Has the same type as `scale`. 

4745 """ 

4746 _ctx = _context._context or _context.context() 

4747 tld = _ctx._thread_local_data 

4748 if tld.is_eager: 

4749 try: 

4750 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4751 _ctx, "FusedBatchNormV2", name, x, scale, offset, mean, variance, 

4752 "epsilon", epsilon, "exponential_avg_factor", exponential_avg_factor, 

4753 "data_format", data_format, "is_training", is_training) 

4754 _result = _FusedBatchNormV2Output._make(_result) 

4755 return _result 

4756 except _core._NotOkStatusException as e: 

4757 _ops.raise_from_not_ok_status(e, name) 

4758 except _core._FallbackException: 

4759 pass 

4760 try: 

4761 return fused_batch_norm_v2_eager_fallback( 

4762 x, scale, offset, mean, variance, epsilon=epsilon, 

4763 exponential_avg_factor=exponential_avg_factor, 

4764 data_format=data_format, is_training=is_training, name=name, 

4765 ctx=_ctx) 

4766 except _core._SymbolicException: 

4767 pass # Add nodes to the TensorFlow graph. 

4768 # Add nodes to the TensorFlow graph. 

4769 if epsilon is None: 

4770 epsilon = 0.0001 

4771 epsilon = _execute.make_float(epsilon, "epsilon") 

4772 if exponential_avg_factor is None: 

4773 exponential_avg_factor = 1 

4774 exponential_avg_factor = _execute.make_float(exponential_avg_factor, "exponential_avg_factor") 

4775 if data_format is None: 

4776 data_format = "NHWC" 

4777 data_format = _execute.make_str(data_format, "data_format") 

4778 if is_training is None: 

4779 is_training = True 

4780 is_training = _execute.make_bool(is_training, "is_training") 

4781 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4782 "FusedBatchNormV2", x=x, scale=scale, offset=offset, mean=mean, 

4783 variance=variance, epsilon=epsilon, 

4784 exponential_avg_factor=exponential_avg_factor, 

4785 data_format=data_format, is_training=is_training, 

4786 name=name) 

4787 _result = _outputs[:] 

4788 if _execute.must_record_gradient(): 

4789 _attrs = ("T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"), 

4790 "epsilon", _op.get_attr("epsilon"), "exponential_avg_factor", 

4791 _op.get_attr("exponential_avg_factor"), "data_format", 

4792 _op.get_attr("data_format"), "is_training", 

4793 _op._get_attr_bool("is_training")) 

4794 _inputs_flat = _op.inputs 

4795 _execute.record_gradient( 

4796 "FusedBatchNormV2", _inputs_flat, _attrs, _result) 

4797 _result = _FusedBatchNormV2Output._make(_result) 

4798 return _result 

4799 

4800FusedBatchNormV2 = tf_export("raw_ops.FusedBatchNormV2")(_ops.to_raw_op(fused_batch_norm_v2)) 

4801 

4802 

4803def fused_batch_norm_v2_eager_fallback(x, scale, offset, mean, variance, epsilon, exponential_avg_factor, data_format, is_training, name, ctx): 

4804 if epsilon is None: 

4805 epsilon = 0.0001 

4806 epsilon = _execute.make_float(epsilon, "epsilon") 

4807 if exponential_avg_factor is None: 

4808 exponential_avg_factor = 1 

4809 exponential_avg_factor = _execute.make_float(exponential_avg_factor, "exponential_avg_factor") 

4810 if data_format is None: 

4811 data_format = "NHWC" 

4812 data_format = _execute.make_str(data_format, "data_format") 

4813 if is_training is None: 

4814 is_training = True 

4815 is_training = _execute.make_bool(is_training, "is_training") 

4816 _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ]) 

4817 _attr_U, _inputs_U = _execute.args_to_matching_eager([scale, offset, mean, variance], ctx, [_dtypes.float32, ]) 

4818 (scale, offset, mean, variance) = _inputs_U 

4819 _inputs_flat = [x, scale, offset, mean, variance] 

4820 _attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon, 

4821 "exponential_avg_factor", exponential_avg_factor, "data_format", 

4822 data_format, "is_training", is_training) 

4823 _result = _execute.execute(b"FusedBatchNormV2", 5, inputs=_inputs_flat, 

4824 attrs=_attrs, ctx=ctx, name=name) 

4825 if _execute.must_record_gradient(): 

4826 _execute.record_gradient( 

4827 "FusedBatchNormV2", _inputs_flat, _attrs, _result) 

4828 _result = _FusedBatchNormV2Output._make(_result) 

4829 return _result 

4830 

4831_FusedBatchNormV3Output = collections.namedtuple( 

4832 "FusedBatchNormV3", 

4833 ["y", "batch_mean", "batch_variance", "reserve_space_1", "reserve_space_2", "reserve_space_3"]) 

4834 

4835 

4836def fused_batch_norm_v3(x, scale, offset, mean, variance, epsilon=0.0001, exponential_avg_factor=1, data_format="NHWC", is_training=True, name=None): 

4837 r"""Batch normalization. 

4838 

4839 Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". 

4840 The size of 1D Tensors matches the dimension C of the 4D Tensors. 

4841 

4842 Args: 

4843 x: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`. 

4844 A 4D Tensor for input data. 

4845 scale: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`. 

4846 A 1D Tensor for scaling factor, to scale the normalized x. 

4847 offset: A `Tensor`. Must have the same type as `scale`. 

4848 A 1D Tensor for offset, to shift to the normalized x. 

4849 mean: A `Tensor`. Must have the same type as `scale`. 

4850 A 1D Tensor for population mean. Used for inference only; 

4851 must be empty for training. 

4852 variance: A `Tensor`. Must have the same type as `scale`. 

4853 A 1D Tensor for population variance. Used for inference only; 

4854 must be empty for training. 

4855 epsilon: An optional `float`. Defaults to `0.0001`. 

4856 A small float number added to the variance of x. 

4857 exponential_avg_factor: An optional `float`. Defaults to `1`. 

4858 data_format: An optional `string` from: `"NHWC", "NCHW", "NDHWC", "NCDHW"`. Defaults to `"NHWC"`. 

4859 The data format for x and y. Either "NHWC" (default) or "NCHW". 

4860 is_training: An optional `bool`. Defaults to `True`. 

4861 A bool value to indicate the operation is for training (default) 

4862 or inference. 

4863 name: A name for the operation (optional). 

4864 

4865 Returns: 

4866 A tuple of `Tensor` objects (y, batch_mean, batch_variance, reserve_space_1, reserve_space_2, reserve_space_3). 

4867 

4868 y: A `Tensor`. Has the same type as `x`. 

4869 batch_mean: A `Tensor`. Has the same type as `scale`. 

4870 batch_variance: A `Tensor`. Has the same type as `scale`. 

4871 reserve_space_1: A `Tensor`. Has the same type as `scale`. 

4872 reserve_space_2: A `Tensor`. Has the same type as `scale`. 

4873 reserve_space_3: A `Tensor`. Has the same type as `scale`. 

4874 """ 

4875 _ctx = _context._context or _context.context() 

4876 tld = _ctx._thread_local_data 

4877 if tld.is_eager: 

4878 try: 

4879 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

4880 _ctx, "FusedBatchNormV3", name, x, scale, offset, mean, variance, 

4881 "epsilon", epsilon, "exponential_avg_factor", exponential_avg_factor, 

4882 "data_format", data_format, "is_training", is_training) 

4883 _result = _FusedBatchNormV3Output._make(_result) 

4884 return _result 

4885 except _core._NotOkStatusException as e: 

4886 _ops.raise_from_not_ok_status(e, name) 

4887 except _core._FallbackException: 

4888 pass 

4889 try: 

4890 return fused_batch_norm_v3_eager_fallback( 

4891 x, scale, offset, mean, variance, epsilon=epsilon, 

4892 exponential_avg_factor=exponential_avg_factor, 

4893 data_format=data_format, is_training=is_training, name=name, 

4894 ctx=_ctx) 

4895 except _core._SymbolicException: 

4896 pass # Add nodes to the TensorFlow graph. 

4897 # Add nodes to the TensorFlow graph. 

4898 if epsilon is None: 

4899 epsilon = 0.0001 

4900 epsilon = _execute.make_float(epsilon, "epsilon") 

4901 if exponential_avg_factor is None: 

4902 exponential_avg_factor = 1 

4903 exponential_avg_factor = _execute.make_float(exponential_avg_factor, "exponential_avg_factor") 

4904 if data_format is None: 

4905 data_format = "NHWC" 

4906 data_format = _execute.make_str(data_format, "data_format") 

4907 if is_training is None: 

4908 is_training = True 

4909 is_training = _execute.make_bool(is_training, "is_training") 

4910 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

4911 "FusedBatchNormV3", x=x, scale=scale, offset=offset, mean=mean, 

4912 variance=variance, epsilon=epsilon, 

4913 exponential_avg_factor=exponential_avg_factor, 

4914 data_format=data_format, is_training=is_training, 

4915 name=name) 

4916 _result = _outputs[:] 

4917 if _execute.must_record_gradient(): 

4918 _attrs = ("T", _op._get_attr_type("T"), "U", _op._get_attr_type("U"), 

4919 "epsilon", _op.get_attr("epsilon"), "exponential_avg_factor", 

4920 _op.get_attr("exponential_avg_factor"), "data_format", 

4921 _op.get_attr("data_format"), "is_training", 

4922 _op._get_attr_bool("is_training")) 

4923 _inputs_flat = _op.inputs 

4924 _execute.record_gradient( 

4925 "FusedBatchNormV3", _inputs_flat, _attrs, _result) 

4926 _result = _FusedBatchNormV3Output._make(_result) 

4927 return _result 

4928 

4929FusedBatchNormV3 = tf_export("raw_ops.FusedBatchNormV3")(_ops.to_raw_op(fused_batch_norm_v3)) 

4930 

4931 

4932def fused_batch_norm_v3_eager_fallback(x, scale, offset, mean, variance, epsilon, exponential_avg_factor, data_format, is_training, name, ctx): 

4933 if epsilon is None: 

4934 epsilon = 0.0001 

4935 epsilon = _execute.make_float(epsilon, "epsilon") 

4936 if exponential_avg_factor is None: 

4937 exponential_avg_factor = 1 

4938 exponential_avg_factor = _execute.make_float(exponential_avg_factor, "exponential_avg_factor") 

4939 if data_format is None: 

4940 data_format = "NHWC" 

4941 data_format = _execute.make_str(data_format, "data_format") 

4942 if is_training is None: 

4943 is_training = True 

4944 is_training = _execute.make_bool(is_training, "is_training") 

4945 _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ]) 

4946 _attr_U, _inputs_U = _execute.args_to_matching_eager([scale, offset, mean, variance], ctx, [_dtypes.bfloat16, _dtypes.float32, ]) 

4947 (scale, offset, mean, variance) = _inputs_U 

4948 _inputs_flat = [x, scale, offset, mean, variance] 

4949 _attrs = ("T", _attr_T, "U", _attr_U, "epsilon", epsilon, 

4950 "exponential_avg_factor", exponential_avg_factor, "data_format", 

4951 data_format, "is_training", is_training) 

4952 _result = _execute.execute(b"FusedBatchNormV3", 6, inputs=_inputs_flat, 

4953 attrs=_attrs, ctx=ctx, name=name) 

4954 if _execute.must_record_gradient(): 

4955 _execute.record_gradient( 

4956 "FusedBatchNormV3", _inputs_flat, _attrs, _result) 

4957 _result = _FusedBatchNormV3Output._make(_result) 

4958 return _result 

4959 

4960 

4961def fused_pad_conv2d(input, paddings, filter, mode, strides, padding, name=None): 

4962 r"""Performs a padding as a preprocess during a convolution. 

4963 

4964 Similar to FusedResizeAndPadConv2d, this op allows for an optimized 

4965 implementation where the spatial padding transformation stage is fused with the 

4966 im2col lookup, but in this case without the bilinear filtering required for 

4967 resizing. Fusing the padding prevents the need to write out the intermediate 

4968 results as whole tensors, reducing memory pressure, and we can get some latency 

4969 gains by merging the transformation calculations. 

4970 The data_format attribute for Conv2D isn't supported by this op, and 'NHWC' 

4971 order is used instead. 

4972 Internally this op uses a single per-graph scratch buffer, which means that it 

4973 will block if multiple versions are being run in parallel. This is because this 

4974 operator is primarily an optimization to minimize memory usage. 

4975 

4976 Args: 

4977 input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. 

4978 4-D with shape `[batch, in_height, in_width, in_channels]`. 

4979 paddings: A `Tensor` of type `int32`. 

4980 A two-column matrix specifying the padding sizes. The number of 

4981 rows must be the same as the rank of `input`. 

4982 filter: A `Tensor`. Must have the same type as `input`. 4-D with shape 

4983 `[filter_height, filter_width, in_channels, out_channels]`. 

4984 mode: A `string` from: `"REFLECT", "SYMMETRIC"`. 

4985 strides: A list of `ints`. 

4986 1-D of length 4. The stride of the sliding window for each dimension 

4987 of `input`. Must be in the same order as the dimension specified with format. 

4988 padding: A `string` from: `"SAME", "VALID"`. 

4989 The type of padding algorithm to use. 

4990 name: A name for the operation (optional). 

4991 

4992 Returns: 

4993 A `Tensor`. Has the same type as `input`. 

4994 """ 

4995 _ctx = _context._context or _context.context() 

4996 tld = _ctx._thread_local_data 

4997 if tld.is_eager: 

4998 try: 

4999 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5000 _ctx, "FusedPadConv2D", name, input, paddings, filter, "mode", mode, 

5001 "strides", strides, "padding", padding) 

5002 return _result 

5003 except _core._NotOkStatusException as e: 

5004 _ops.raise_from_not_ok_status(e, name) 

5005 except _core._FallbackException: 

5006 pass 

5007 try: 

5008 return fused_pad_conv2d_eager_fallback( 

5009 input, paddings, filter, mode=mode, strides=strides, 

5010 padding=padding, name=name, ctx=_ctx) 

5011 except _core._SymbolicException: 

5012 pass # Add nodes to the TensorFlow graph. 

5013 # Add nodes to the TensorFlow graph. 

5014 mode = _execute.make_str(mode, "mode") 

5015 if not isinstance(strides, (list, tuple)): 

5016 raise TypeError( 

5017 "Expected list for 'strides' argument to " 

5018 "'fused_pad_conv2d' Op, not %r." % strides) 

5019 strides = [_execute.make_int(_i, "strides") for _i in strides] 

5020 padding = _execute.make_str(padding, "padding") 

5021 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5022 "FusedPadConv2D", input=input, paddings=paddings, filter=filter, 

5023 mode=mode, strides=strides, padding=padding, 

5024 name=name) 

5025 _result = _outputs[:] 

5026 if _execute.must_record_gradient(): 

5027 _attrs = ("T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"), 

5028 "strides", _op.get_attr("strides"), "padding", 

5029 _op.get_attr("padding")) 

5030 _inputs_flat = _op.inputs 

5031 _execute.record_gradient( 

5032 "FusedPadConv2D", _inputs_flat, _attrs, _result) 

5033 _result, = _result 

5034 return _result 

5035 

5036FusedPadConv2D = tf_export("raw_ops.FusedPadConv2D")(_ops.to_raw_op(fused_pad_conv2d)) 

5037 

5038 

5039def fused_pad_conv2d_eager_fallback(input, paddings, filter, mode, strides, padding, name, ctx): 

5040 mode = _execute.make_str(mode, "mode") 

5041 if not isinstance(strides, (list, tuple)): 

5042 raise TypeError( 

5043 "Expected list for 'strides' argument to " 

5044 "'fused_pad_conv2d' Op, not %r." % strides) 

5045 strides = [_execute.make_int(_i, "strides") for _i in strides] 

5046 padding = _execute.make_str(padding, "padding") 

5047 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

5048 (input, filter) = _inputs_T 

5049 paddings = _ops.convert_to_tensor(paddings, _dtypes.int32) 

5050 _inputs_flat = [input, paddings, filter] 

5051 _attrs = ("T", _attr_T, "mode", mode, "strides", strides, "padding", 

5052 padding) 

5053 _result = _execute.execute(b"FusedPadConv2D", 1, inputs=_inputs_flat, 

5054 attrs=_attrs, ctx=ctx, name=name) 

5055 if _execute.must_record_gradient(): 

5056 _execute.record_gradient( 

5057 "FusedPadConv2D", _inputs_flat, _attrs, _result) 

5058 _result, = _result 

5059 return _result 

5060 

5061 

5062def fused_resize_and_pad_conv2d(input, size, paddings, filter, mode, strides, padding, resize_align_corners=False, name=None): 

5063 r"""Performs a resize and padding as a preprocess during a convolution. 

5064 

5065 It's often possible to do spatial transformations more efficiently as part of 

5066 the packing stage of a convolution, so this op allows for an optimized 

5067 implementation where these stages are fused together. This prevents the need to 

5068 write out the intermediate results as whole tensors, reducing memory pressure, 

5069 and we can get some latency gains by merging the transformation calculations. 

5070 The data_format attribute for Conv2D isn't supported by this op, and defaults to 

5071 'NHWC' order. 

5072 Internally this op uses a single per-graph scratch buffer, which means that it 

5073 will block if multiple versions are being run in parallel. This is because this 

5074 operator is primarily an optimization to minimize memory usage. 

5075 

5076 Args: 

5077 input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`. 

5078 4-D with shape `[batch, in_height, in_width, in_channels]`. 

5079 size: A `Tensor` of type `int32`. 

5080 A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The 

5081 new size for the images. 

5082 paddings: A `Tensor` of type `int32`. 

5083 A two-column matrix specifying the padding sizes. The number of 

5084 rows must be the same as the rank of `input`. 

5085 filter: A `Tensor`. Must have the same type as `input`. 4-D with shape 

5086 `[filter_height, filter_width, in_channels, out_channels]`. 

5087 mode: A `string` from: `"REFLECT", "SYMMETRIC"`. 

5088 strides: A list of `ints`. 

5089 1-D of length 4. The stride of the sliding window for each dimension 

5090 of `input`. Must be in the same order as the dimension specified with format. 

5091 padding: A `string` from: `"SAME", "VALID"`. 

5092 The type of padding algorithm to use. 

5093 resize_align_corners: An optional `bool`. Defaults to `False`. 

5094 If true, the centers of the 4 corner pixels of the input and output tensors are 

5095 aligned, preserving the values at the corner pixels. Defaults to false. 

5096 name: A name for the operation (optional). 

5097 

5098 Returns: 

5099 A `Tensor`. Has the same type as `input`. 

5100 """ 

5101 _ctx = _context._context or _context.context() 

5102 tld = _ctx._thread_local_data 

5103 if tld.is_eager: 

5104 try: 

5105 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5106 _ctx, "FusedResizeAndPadConv2D", name, input, size, paddings, filter, 

5107 "resize_align_corners", resize_align_corners, "mode", mode, "strides", 

5108 strides, "padding", padding) 

5109 return _result 

5110 except _core._NotOkStatusException as e: 

5111 _ops.raise_from_not_ok_status(e, name) 

5112 except _core._FallbackException: 

5113 pass 

5114 try: 

5115 return fused_resize_and_pad_conv2d_eager_fallback( 

5116 input, size, paddings, filter, 

5117 resize_align_corners=resize_align_corners, mode=mode, 

5118 strides=strides, padding=padding, name=name, ctx=_ctx) 

5119 except _core._SymbolicException: 

5120 pass # Add nodes to the TensorFlow graph. 

5121 # Add nodes to the TensorFlow graph. 

5122 mode = _execute.make_str(mode, "mode") 

5123 if not isinstance(strides, (list, tuple)): 

5124 raise TypeError( 

5125 "Expected list for 'strides' argument to " 

5126 "'fused_resize_and_pad_conv2d' Op, not %r." % strides) 

5127 strides = [_execute.make_int(_i, "strides") for _i in strides] 

5128 padding = _execute.make_str(padding, "padding") 

5129 if resize_align_corners is None: 

5130 resize_align_corners = False 

5131 resize_align_corners = _execute.make_bool(resize_align_corners, "resize_align_corners") 

5132 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5133 "FusedResizeAndPadConv2D", input=input, size=size, paddings=paddings, 

5134 filter=filter, mode=mode, strides=strides, 

5135 padding=padding, 

5136 resize_align_corners=resize_align_corners, 

5137 name=name) 

5138 _result = _outputs[:] 

5139 if _execute.must_record_gradient(): 

5140 _attrs = ("T", _op._get_attr_type("T"), "resize_align_corners", 

5141 _op._get_attr_bool("resize_align_corners"), "mode", 

5142 _op.get_attr("mode"), "strides", _op.get_attr("strides"), 

5143 "padding", _op.get_attr("padding")) 

5144 _inputs_flat = _op.inputs 

5145 _execute.record_gradient( 

5146 "FusedResizeAndPadConv2D", _inputs_flat, _attrs, _result) 

5147 _result, = _result 

5148 return _result 

5149 

5150FusedResizeAndPadConv2D = tf_export("raw_ops.FusedResizeAndPadConv2D")(_ops.to_raw_op(fused_resize_and_pad_conv2d)) 

5151 

5152 

5153def fused_resize_and_pad_conv2d_eager_fallback(input, size, paddings, filter, mode, strides, padding, resize_align_corners, name, ctx): 

5154 mode = _execute.make_str(mode, "mode") 

5155 if not isinstance(strides, (list, tuple)): 

5156 raise TypeError( 

5157 "Expected list for 'strides' argument to " 

5158 "'fused_resize_and_pad_conv2d' Op, not %r." % strides) 

5159 strides = [_execute.make_int(_i, "strides") for _i in strides] 

5160 padding = _execute.make_str(padding, "padding") 

5161 if resize_align_corners is None: 

5162 resize_align_corners = False 

5163 resize_align_corners = _execute.make_bool(resize_align_corners, "resize_align_corners") 

5164 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, filter], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ]) 

5165 (input, filter) = _inputs_T 

5166 size = _ops.convert_to_tensor(size, _dtypes.int32) 

5167 paddings = _ops.convert_to_tensor(paddings, _dtypes.int32) 

5168 _inputs_flat = [input, size, paddings, filter] 

5169 _attrs = ("T", _attr_T, "resize_align_corners", resize_align_corners, 

5170 "mode", mode, "strides", strides, "padding", padding) 

5171 _result = _execute.execute(b"FusedResizeAndPadConv2D", 1, 

5172 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

5173 name=name) 

5174 if _execute.must_record_gradient(): 

5175 _execute.record_gradient( 

5176 "FusedResizeAndPadConv2D", _inputs_flat, _attrs, _result) 

5177 _result, = _result 

5178 return _result 

5179 

5180 

5181def in_top_k(predictions, targets, k, name=None): 

5182 r"""Says whether the targets are in the top `K` predictions. 

5183 

5184 This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the 

5185 prediction for the target class is among the top `k` predictions among 

5186 all predictions for example `i`. Note that the behavior of `InTopK` differs 

5187 from the `TopK` op in its handling of ties; if multiple classes have the 

5188 same prediction value and straddle the top-`k` boundary, all of those 

5189 classes are considered to be in the top `k`. 

5190 

5191 More formally, let 

5192 

5193 \\(predictions_i\\) be the predictions for all classes for example `i`, 

5194 \\(targets_i\\) be the target class for example `i`, 

5195 \\(out_i\\) be the output for example `i`, 

5196 

5197 $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ 

5198 

5199 Args: 

5200 predictions: A `Tensor` of type `float32`. 

5201 A `batch_size` x `classes` tensor. 

5202 targets: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

5203 A `batch_size` vector of class ids. 

5204 k: An `int`. Number of top elements to look at for computing precision. 

5205 name: A name for the operation (optional). 

5206 

5207 Returns: 

5208 A `Tensor` of type `bool`. 

5209 """ 

5210 _ctx = _context._context or _context.context() 

5211 tld = _ctx._thread_local_data 

5212 if tld.is_eager: 

5213 try: 

5214 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5215 _ctx, "InTopK", name, predictions, targets, "k", k) 

5216 return _result 

5217 except _core._NotOkStatusException as e: 

5218 _ops.raise_from_not_ok_status(e, name) 

5219 except _core._FallbackException: 

5220 pass 

5221 try: 

5222 return in_top_k_eager_fallback( 

5223 predictions, targets, k=k, name=name, ctx=_ctx) 

5224 except _core._SymbolicException: 

5225 pass # Add nodes to the TensorFlow graph. 

5226 # Add nodes to the TensorFlow graph. 

5227 k = _execute.make_int(k, "k") 

5228 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5229 "InTopK", predictions=predictions, targets=targets, k=k, name=name) 

5230 _result = _outputs[:] 

5231 if _execute.must_record_gradient(): 

5232 _attrs = ("k", _op._get_attr_int("k"), "T", _op._get_attr_type("T")) 

5233 _inputs_flat = _op.inputs 

5234 _execute.record_gradient( 

5235 "InTopK", _inputs_flat, _attrs, _result) 

5236 _result, = _result 

5237 return _result 

5238 

5239InTopK = tf_export("raw_ops.InTopK")(_ops.to_raw_op(in_top_k)) 

5240 

5241 

5242def in_top_k_eager_fallback(predictions, targets, k, name, ctx): 

5243 k = _execute.make_int(k, "k") 

5244 _attr_T, (targets,) = _execute.args_to_matching_eager([targets], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

5245 predictions = _ops.convert_to_tensor(predictions, _dtypes.float32) 

5246 _inputs_flat = [predictions, targets] 

5247 _attrs = ("k", k, "T", _attr_T) 

5248 _result = _execute.execute(b"InTopK", 1, inputs=_inputs_flat, attrs=_attrs, 

5249 ctx=ctx, name=name) 

5250 if _execute.must_record_gradient(): 

5251 _execute.record_gradient( 

5252 "InTopK", _inputs_flat, _attrs, _result) 

5253 _result, = _result 

5254 return _result 

5255 

5256 

5257def in_top_kv2(predictions, targets, k, name=None): 

5258 r"""Says whether the targets are in the top `K` predictions. 

5259 

5260 This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the 

5261 prediction for the target class is among the top `k` predictions among 

5262 all predictions for example `i`. Note that the behavior of `InTopK` differs 

5263 from the `TopK` op in its handling of ties; if multiple classes have the 

5264 same prediction value and straddle the top-`k` boundary, all of those 

5265 classes are considered to be in the top `k`. 

5266 

5267 More formally, let 

5268 

5269 \\(predictions_i\\) be the predictions for all classes for example `i`, 

5270 \\(targets_i\\) be the target class for example `i`, 

5271 \\(out_i\\) be the output for example `i`, 

5272 

5273 $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ 

5274 

5275 Args: 

5276 predictions: A `Tensor` of type `float32`. 

5277 A `batch_size` x `classes` tensor. 

5278 targets: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

5279 A `batch_size` vector of class ids. 

5280 k: A `Tensor`. Must have the same type as `targets`. 

5281 Number of top elements to look at for computing precision. 

5282 name: A name for the operation (optional). 

5283 

5284 Returns: 

5285 A `Tensor` of type `bool`. 

5286 """ 

5287 _ctx = _context._context or _context.context() 

5288 tld = _ctx._thread_local_data 

5289 if tld.is_eager: 

5290 try: 

5291 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5292 _ctx, "InTopKV2", name, predictions, targets, k) 

5293 return _result 

5294 except _core._NotOkStatusException as e: 

5295 _ops.raise_from_not_ok_status(e, name) 

5296 except _core._FallbackException: 

5297 pass 

5298 try: 

5299 return in_top_kv2_eager_fallback( 

5300 predictions, targets, k, name=name, ctx=_ctx) 

5301 except _core._SymbolicException: 

5302 pass # Add nodes to the TensorFlow graph. 

5303 # Add nodes to the TensorFlow graph. 

5304 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5305 "InTopKV2", predictions=predictions, targets=targets, k=k, name=name) 

5306 _result = _outputs[:] 

5307 if _execute.must_record_gradient(): 

5308 _attrs = ("T", _op._get_attr_type("T")) 

5309 _inputs_flat = _op.inputs 

5310 _execute.record_gradient( 

5311 "InTopKV2", _inputs_flat, _attrs, _result) 

5312 _result, = _result 

5313 return _result 

5314 

5315InTopKV2 = tf_export("raw_ops.InTopKV2")(_ops.to_raw_op(in_top_kv2)) 

5316 

5317 

5318def in_top_kv2_eager_fallback(predictions, targets, k, name, ctx): 

5319 _attr_T, _inputs_T = _execute.args_to_matching_eager([targets, k], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

5320 (targets, k) = _inputs_T 

5321 predictions = _ops.convert_to_tensor(predictions, _dtypes.float32) 

5322 _inputs_flat = [predictions, targets, k] 

5323 _attrs = ("T", _attr_T) 

5324 _result = _execute.execute(b"InTopKV2", 1, inputs=_inputs_flat, 

5325 attrs=_attrs, ctx=ctx, name=name) 

5326 if _execute.must_record_gradient(): 

5327 _execute.record_gradient( 

5328 "InTopKV2", _inputs_flat, _attrs, _result) 

5329 _result, = _result 

5330 return _result 

5331 

5332_IsotonicRegressionOutput = collections.namedtuple( 

5333 "IsotonicRegression", 

5334 ["output", "segments"]) 

5335 

5336 

5337def isotonic_regression(input, output_dtype=_dtypes.float32, name=None): 

5338 r"""Solves a batch of isotonic regression problems. 

5339 

5340 Args: 

5341 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

5342 A (batch_size, dim)-tensor holding a batch of inputs. 

5343 output_dtype: An optional `tf.DType` from: `tf.half, tf.bfloat16, tf.float32, tf.float64`. Defaults to `tf.float32`. 

5344 Dtype of output. 

5345 name: A name for the operation (optional). 

5346 

5347 Returns: 

5348 A tuple of `Tensor` objects (output, segments). 

5349 

5350 output: A `Tensor` of type `output_dtype`. 

5351 segments: A `Tensor` of type `int32`. 

5352 """ 

5353 _ctx = _context._context or _context.context() 

5354 tld = _ctx._thread_local_data 

5355 if tld.is_eager: 

5356 try: 

5357 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5358 _ctx, "IsotonicRegression", name, input, "output_dtype", output_dtype) 

5359 _result = _IsotonicRegressionOutput._make(_result) 

5360 return _result 

5361 except _core._NotOkStatusException as e: 

5362 _ops.raise_from_not_ok_status(e, name) 

5363 except _core._FallbackException: 

5364 pass 

5365 try: 

5366 return isotonic_regression_eager_fallback( 

5367 input, output_dtype=output_dtype, name=name, ctx=_ctx) 

5368 except _core._SymbolicException: 

5369 pass # Add nodes to the TensorFlow graph. 

5370 # Add nodes to the TensorFlow graph. 

5371 if output_dtype is None: 

5372 output_dtype = _dtypes.float32 

5373 output_dtype = _execute.make_type(output_dtype, "output_dtype") 

5374 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5375 "IsotonicRegression", input=input, output_dtype=output_dtype, 

5376 name=name) 

5377 _result = _outputs[:] 

5378 if _execute.must_record_gradient(): 

5379 _attrs = ("T", _op._get_attr_type("T"), "output_dtype", 

5380 _op._get_attr_type("output_dtype")) 

5381 _inputs_flat = _op.inputs 

5382 _execute.record_gradient( 

5383 "IsotonicRegression", _inputs_flat, _attrs, _result) 

5384 _result = _IsotonicRegressionOutput._make(_result) 

5385 return _result 

5386 

5387IsotonicRegression = tf_export("raw_ops.IsotonicRegression")(_ops.to_raw_op(isotonic_regression)) 

5388 

5389 

5390def isotonic_regression_eager_fallback(input, output_dtype, name, ctx): 

5391 if output_dtype is None: 

5392 output_dtype = _dtypes.float32 

5393 output_dtype = _execute.make_type(output_dtype, "output_dtype") 

5394 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

5395 _inputs_flat = [input] 

5396 _attrs = ("T", _attr_T, "output_dtype", output_dtype) 

5397 _result = _execute.execute(b"IsotonicRegression", 2, inputs=_inputs_flat, 

5398 attrs=_attrs, ctx=ctx, name=name) 

5399 if _execute.must_record_gradient(): 

5400 _execute.record_gradient( 

5401 "IsotonicRegression", _inputs_flat, _attrs, _result) 

5402 _result = _IsotonicRegressionOutput._make(_result) 

5403 return _result 

5404 

5405 

5406@_dispatch.add_fallback_dispatch_list 

5407@_dispatch.add_type_based_api_dispatcher 

5408@tf_export('nn.l2_loss') 

5409def l2_loss(t, name=None): 

5410 r"""L2 Loss. 

5411 

5412 Computes half the L2 norm of a tensor without the `sqrt`: 

5413 

5414 output = sum(t ** 2) / 2 

5415 

5416 Args: 

5417 t: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

5418 Typically 2-D, but may have any dimensions. 

5419 name: A name for the operation (optional). 

5420 

5421 Returns: 

5422 A `Tensor`. Has the same type as `t`. 

5423 """ 

5424 _ctx = _context._context or _context.context() 

5425 tld = _ctx._thread_local_data 

5426 if tld.is_eager: 

5427 try: 

5428 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5429 _ctx, "L2Loss", name, t) 

5430 return _result 

5431 except _core._NotOkStatusException as e: 

5432 _ops.raise_from_not_ok_status(e, name) 

5433 except _core._FallbackException: 

5434 pass 

5435 try: 

5436 _result = _dispatcher_for_l2_loss( 

5437 (t, name,), None) 

5438 if _result is not NotImplemented: 

5439 return _result 

5440 return l2_loss_eager_fallback( 

5441 t, name=name, ctx=_ctx) 

5442 except _core._SymbolicException: 

5443 pass # Add nodes to the TensorFlow graph. 

5444 except (TypeError, ValueError): 

5445 _result = _dispatch.dispatch( 

5446 l2_loss, (), dict(t=t, name=name) 

5447 ) 

5448 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

5449 return _result 

5450 raise 

5451 else: 

5452 _result = _dispatcher_for_l2_loss( 

5453 (t, name,), None) 

5454 if _result is not NotImplemented: 

5455 return _result 

5456 # Add nodes to the TensorFlow graph. 

5457 try: 

5458 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5459 "L2Loss", t=t, name=name) 

5460 except (TypeError, ValueError): 

5461 _result = _dispatch.dispatch( 

5462 l2_loss, (), dict(t=t, name=name) 

5463 ) 

5464 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

5465 return _result 

5466 raise 

5467 _result = _outputs[:] 

5468 if _execute.must_record_gradient(): 

5469 _attrs = ("T", _op._get_attr_type("T")) 

5470 _inputs_flat = _op.inputs 

5471 _execute.record_gradient( 

5472 "L2Loss", _inputs_flat, _attrs, _result) 

5473 _result, = _result 

5474 return _result 

5475 

5476L2Loss = tf_export("raw_ops.L2Loss")(_ops.to_raw_op(l2_loss)) 

5477_dispatcher_for_l2_loss = l2_loss._tf_type_based_dispatcher.Dispatch 

5478 

5479 

5480def l2_loss_eager_fallback(t, name, ctx): 

5481 _attr_T, (t,) = _execute.args_to_matching_eager([t], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

5482 _inputs_flat = [t] 

5483 _attrs = ("T", _attr_T) 

5484 _result = _execute.execute(b"L2Loss", 1, inputs=_inputs_flat, attrs=_attrs, 

5485 ctx=ctx, name=name) 

5486 if _execute.must_record_gradient(): 

5487 _execute.record_gradient( 

5488 "L2Loss", _inputs_flat, _attrs, _result) 

5489 _result, = _result 

5490 return _result 

5491 

5492 

5493@_dispatch.add_fallback_dispatch_list 

5494@_dispatch.add_type_based_api_dispatcher 

5495@tf_export('nn.local_response_normalization', 'nn.lrn') 

5496def lrn(input, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None): 

5497 r"""Local Response Normalization. 

5498 

5499 The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last 

5500 dimension), and each vector is normalized independently. Within a given vector, 

5501 each component is divided by the weighted, squared sum of inputs within 

5502 `depth_radius`. In detail, 

5503 

5504 sqr_sum[a, b, c, d] = 

5505 sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) 

5506 output = input / (bias + alpha * sqr_sum) ** beta 

5507 

5508 For details, see [Krizhevsky et al., ImageNet classification with deep 

5509 convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). 

5510 

5511 Args: 

5512 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`. 

5513 4-D. 

5514 depth_radius: An optional `int`. Defaults to `5`. 

5515 0-D. Half-width of the 1-D normalization window. 

5516 bias: An optional `float`. Defaults to `1`. 

5517 An offset (usually positive to avoid dividing by 0). 

5518 alpha: An optional `float`. Defaults to `1`. 

5519 A scale factor, usually positive. 

5520 beta: An optional `float`. Defaults to `0.5`. An exponent. 

5521 name: A name for the operation (optional). 

5522 

5523 Returns: 

5524 A `Tensor`. Has the same type as `input`. 

5525 """ 

5526 _ctx = _context._context or _context.context() 

5527 tld = _ctx._thread_local_data 

5528 if tld.is_eager: 

5529 try: 

5530 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5531 _ctx, "LRN", name, input, "depth_radius", depth_radius, "bias", bias, 

5532 "alpha", alpha, "beta", beta) 

5533 return _result 

5534 except _core._NotOkStatusException as e: 

5535 _ops.raise_from_not_ok_status(e, name) 

5536 except _core._FallbackException: 

5537 pass 

5538 try: 

5539 _result = _dispatcher_for_lrn( 

5540 (input, depth_radius, bias, alpha, beta, name,), None) 

5541 if _result is not NotImplemented: 

5542 return _result 

5543 return lrn_eager_fallback( 

5544 input, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta, 

5545 name=name, ctx=_ctx) 

5546 except _core._SymbolicException: 

5547 pass # Add nodes to the TensorFlow graph. 

5548 except (TypeError, ValueError): 

5549 _result = _dispatch.dispatch( 

5550 lrn, (), dict(input=input, depth_radius=depth_radius, bias=bias, 

5551 alpha=alpha, beta=beta, name=name) 

5552 ) 

5553 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

5554 return _result 

5555 raise 

5556 else: 

5557 _result = _dispatcher_for_lrn( 

5558 (input, depth_radius, bias, alpha, beta, name,), None) 

5559 if _result is not NotImplemented: 

5560 return _result 

5561 # Add nodes to the TensorFlow graph. 

5562 if depth_radius is None: 

5563 depth_radius = 5 

5564 depth_radius = _execute.make_int(depth_radius, "depth_radius") 

5565 if bias is None: 

5566 bias = 1 

5567 bias = _execute.make_float(bias, "bias") 

5568 if alpha is None: 

5569 alpha = 1 

5570 alpha = _execute.make_float(alpha, "alpha") 

5571 if beta is None: 

5572 beta = 0.5 

5573 beta = _execute.make_float(beta, "beta") 

5574 try: 

5575 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5576 "LRN", input=input, depth_radius=depth_radius, bias=bias, alpha=alpha, 

5577 beta=beta, name=name) 

5578 except (TypeError, ValueError): 

5579 _result = _dispatch.dispatch( 

5580 lrn, (), dict(input=input, depth_radius=depth_radius, bias=bias, 

5581 alpha=alpha, beta=beta, name=name) 

5582 ) 

5583 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

5584 return _result 

5585 raise 

5586 _result = _outputs[:] 

5587 if _execute.must_record_gradient(): 

5588 _attrs = ("depth_radius", _op._get_attr_int("depth_radius"), "bias", 

5589 _op.get_attr("bias"), "alpha", _op.get_attr("alpha"), "beta", 

5590 _op.get_attr("beta"), "T", _op._get_attr_type("T")) 

5591 _inputs_flat = _op.inputs 

5592 _execute.record_gradient( 

5593 "LRN", _inputs_flat, _attrs, _result) 

5594 _result, = _result 

5595 return _result 

5596 

5597LRN = tf_export("raw_ops.LRN")(_ops.to_raw_op(lrn)) 

5598_dispatcher_for_lrn = lrn._tf_type_based_dispatcher.Dispatch 

5599 

5600 

5601def lrn_eager_fallback(input, depth_radius, bias, alpha, beta, name, ctx): 

5602 if depth_radius is None: 

5603 depth_radius = 5 

5604 depth_radius = _execute.make_int(depth_radius, "depth_radius") 

5605 if bias is None: 

5606 bias = 1 

5607 bias = _execute.make_float(bias, "bias") 

5608 if alpha is None: 

5609 alpha = 1 

5610 alpha = _execute.make_float(alpha, "alpha") 

5611 if beta is None: 

5612 beta = 0.5 

5613 beta = _execute.make_float(beta, "beta") 

5614 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ], _dtypes.float32) 

5615 _inputs_flat = [input] 

5616 _attrs = ("depth_radius", depth_radius, "bias", bias, "alpha", alpha, 

5617 "beta", beta, "T", _attr_T) 

5618 _result = _execute.execute(b"LRN", 1, inputs=_inputs_flat, attrs=_attrs, 

5619 ctx=ctx, name=name) 

5620 if _execute.must_record_gradient(): 

5621 _execute.record_gradient( 

5622 "LRN", _inputs_flat, _attrs, _result) 

5623 _result, = _result 

5624 return _result 

5625 

5626 

5627def lrn_grad(input_grads, input_image, output_image, depth_radius=5, bias=1, alpha=1, beta=0.5, name=None): 

5628 r"""Gradients for Local Response Normalization. 

5629 

5630 Args: 

5631 input_grads: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`. 

5632 4-D with shape `[batch, height, width, channels]`. 

5633 input_image: A `Tensor`. Must have the same type as `input_grads`. 

5634 4-D with shape `[batch, height, width, channels]`. 

5635 output_image: A `Tensor`. Must have the same type as `input_grads`. 

5636 4-D with shape `[batch, height, width, channels]`. 

5637 depth_radius: An optional `int`. Defaults to `5`. A depth radius. 

5638 bias: An optional `float`. Defaults to `1`. 

5639 An offset (usually > 0 to avoid dividing by 0). 

5640 alpha: An optional `float`. Defaults to `1`. 

5641 A scale factor, usually positive. 

5642 beta: An optional `float`. Defaults to `0.5`. An exponent. 

5643 name: A name for the operation (optional). 

5644 

5645 Returns: 

5646 A `Tensor`. Has the same type as `input_grads`. 

5647 """ 

5648 _ctx = _context._context or _context.context() 

5649 tld = _ctx._thread_local_data 

5650 if tld.is_eager: 

5651 try: 

5652 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5653 _ctx, "LRNGrad", name, input_grads, input_image, output_image, 

5654 "depth_radius", depth_radius, "bias", bias, "alpha", alpha, "beta", 

5655 beta) 

5656 return _result 

5657 except _core._NotOkStatusException as e: 

5658 _ops.raise_from_not_ok_status(e, name) 

5659 except _core._FallbackException: 

5660 pass 

5661 try: 

5662 return lrn_grad_eager_fallback( 

5663 input_grads, input_image, output_image, depth_radius=depth_radius, 

5664 bias=bias, alpha=alpha, beta=beta, name=name, ctx=_ctx) 

5665 except _core._SymbolicException: 

5666 pass # Add nodes to the TensorFlow graph. 

5667 # Add nodes to the TensorFlow graph. 

5668 if depth_radius is None: 

5669 depth_radius = 5 

5670 depth_radius = _execute.make_int(depth_radius, "depth_radius") 

5671 if bias is None: 

5672 bias = 1 

5673 bias = _execute.make_float(bias, "bias") 

5674 if alpha is None: 

5675 alpha = 1 

5676 alpha = _execute.make_float(alpha, "alpha") 

5677 if beta is None: 

5678 beta = 0.5 

5679 beta = _execute.make_float(beta, "beta") 

5680 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5681 "LRNGrad", input_grads=input_grads, input_image=input_image, 

5682 output_image=output_image, depth_radius=depth_radius, 

5683 bias=bias, alpha=alpha, beta=beta, name=name) 

5684 _result = _outputs[:] 

5685 if _execute.must_record_gradient(): 

5686 _attrs = ("depth_radius", _op._get_attr_int("depth_radius"), "bias", 

5687 _op.get_attr("bias"), "alpha", _op.get_attr("alpha"), "beta", 

5688 _op.get_attr("beta"), "T", _op._get_attr_type("T")) 

5689 _inputs_flat = _op.inputs 

5690 _execute.record_gradient( 

5691 "LRNGrad", _inputs_flat, _attrs, _result) 

5692 _result, = _result 

5693 return _result 

5694 

5695LRNGrad = tf_export("raw_ops.LRNGrad")(_ops.to_raw_op(lrn_grad)) 

5696 

5697 

5698def lrn_grad_eager_fallback(input_grads, input_image, output_image, depth_radius, bias, alpha, beta, name, ctx): 

5699 if depth_radius is None: 

5700 depth_radius = 5 

5701 depth_radius = _execute.make_int(depth_radius, "depth_radius") 

5702 if bias is None: 

5703 bias = 1 

5704 bias = _execute.make_float(bias, "bias") 

5705 if alpha is None: 

5706 alpha = 1 

5707 alpha = _execute.make_float(alpha, "alpha") 

5708 if beta is None: 

5709 beta = 0.5 

5710 beta = _execute.make_float(beta, "beta") 

5711 _attr_T, _inputs_T = _execute.args_to_matching_eager([input_grads, input_image, output_image], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ], _dtypes.float32) 

5712 (input_grads, input_image, output_image) = _inputs_T 

5713 _inputs_flat = [input_grads, input_image, output_image] 

5714 _attrs = ("depth_radius", depth_radius, "bias", bias, "alpha", alpha, 

5715 "beta", beta, "T", _attr_T) 

5716 _result = _execute.execute(b"LRNGrad", 1, inputs=_inputs_flat, attrs=_attrs, 

5717 ctx=ctx, name=name) 

5718 if _execute.must_record_gradient(): 

5719 _execute.record_gradient( 

5720 "LRNGrad", _inputs_flat, _attrs, _result) 

5721 _result, = _result 

5722 return _result 

5723 

5724 

5725def leaky_relu(features, alpha=0.2, name=None): 

5726 r"""Computes rectified linear: `max(features, features * alpha)`. 

5727 

5728 Args: 

5729 features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

5730 alpha: An optional `float`. Defaults to `0.2`. 

5731 name: A name for the operation (optional). 

5732 

5733 Returns: 

5734 A `Tensor`. Has the same type as `features`. 

5735 """ 

5736 _ctx = _context._context or _context.context() 

5737 tld = _ctx._thread_local_data 

5738 if tld.is_eager: 

5739 try: 

5740 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5741 _ctx, "LeakyRelu", name, features, "alpha", alpha) 

5742 return _result 

5743 except _core._NotOkStatusException as e: 

5744 _ops.raise_from_not_ok_status(e, name) 

5745 except _core._FallbackException: 

5746 pass 

5747 try: 

5748 return leaky_relu_eager_fallback( 

5749 features, alpha=alpha, name=name, ctx=_ctx) 

5750 except _core._SymbolicException: 

5751 pass # Add nodes to the TensorFlow graph. 

5752 # Add nodes to the TensorFlow graph. 

5753 if alpha is None: 

5754 alpha = 0.2 

5755 alpha = _execute.make_float(alpha, "alpha") 

5756 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5757 "LeakyRelu", features=features, alpha=alpha, name=name) 

5758 _result = _outputs[:] 

5759 if _execute.must_record_gradient(): 

5760 _attrs = ("alpha", _op.get_attr("alpha"), "T", _op._get_attr_type("T")) 

5761 _inputs_flat = _op.inputs 

5762 _execute.record_gradient( 

5763 "LeakyRelu", _inputs_flat, _attrs, _result) 

5764 _result, = _result 

5765 return _result 

5766 

5767LeakyRelu = tf_export("raw_ops.LeakyRelu")(_ops.to_raw_op(leaky_relu)) 

5768 

5769 

5770def leaky_relu_eager_fallback(features, alpha, name, ctx): 

5771 if alpha is None: 

5772 alpha = 0.2 

5773 alpha = _execute.make_float(alpha, "alpha") 

5774 _attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ], _dtypes.float32) 

5775 _inputs_flat = [features] 

5776 _attrs = ("alpha", alpha, "T", _attr_T) 

5777 _result = _execute.execute(b"LeakyRelu", 1, inputs=_inputs_flat, 

5778 attrs=_attrs, ctx=ctx, name=name) 

5779 if _execute.must_record_gradient(): 

5780 _execute.record_gradient( 

5781 "LeakyRelu", _inputs_flat, _attrs, _result) 

5782 _result, = _result 

5783 return _result 

5784 

5785 

5786def leaky_relu_grad(gradients, features, alpha=0.2, name=None): 

5787 r"""Computes rectified linear gradients for a LeakyRelu operation. 

5788 

5789 Args: 

5790 gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

5791 The backpropagated gradients to the corresponding LeakyRelu operation. 

5792 features: A `Tensor`. Must have the same type as `gradients`. 

5793 The features passed as input to the corresponding LeakyRelu operation, 

5794 OR the outputs of that operation (both work equivalently). 

5795 alpha: An optional `float`. Defaults to `0.2`. 

5796 name: A name for the operation (optional). 

5797 

5798 Returns: 

5799 A `Tensor`. Has the same type as `gradients`. 

5800 """ 

5801 _ctx = _context._context or _context.context() 

5802 tld = _ctx._thread_local_data 

5803 if tld.is_eager: 

5804 try: 

5805 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5806 _ctx, "LeakyReluGrad", name, gradients, features, "alpha", alpha) 

5807 return _result 

5808 except _core._NotOkStatusException as e: 

5809 _ops.raise_from_not_ok_status(e, name) 

5810 except _core._FallbackException: 

5811 pass 

5812 try: 

5813 return leaky_relu_grad_eager_fallback( 

5814 gradients, features, alpha=alpha, name=name, ctx=_ctx) 

5815 except _core._SymbolicException: 

5816 pass # Add nodes to the TensorFlow graph. 

5817 # Add nodes to the TensorFlow graph. 

5818 if alpha is None: 

5819 alpha = 0.2 

5820 alpha = _execute.make_float(alpha, "alpha") 

5821 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5822 "LeakyReluGrad", gradients=gradients, features=features, alpha=alpha, 

5823 name=name) 

5824 _result = _outputs[:] 

5825 if _execute.must_record_gradient(): 

5826 _attrs = ("alpha", _op.get_attr("alpha"), "T", _op._get_attr_type("T")) 

5827 _inputs_flat = _op.inputs 

5828 _execute.record_gradient( 

5829 "LeakyReluGrad", _inputs_flat, _attrs, _result) 

5830 _result, = _result 

5831 return _result 

5832 

5833LeakyReluGrad = tf_export("raw_ops.LeakyReluGrad")(_ops.to_raw_op(leaky_relu_grad)) 

5834 

5835 

5836def leaky_relu_grad_eager_fallback(gradients, features, alpha, name, ctx): 

5837 if alpha is None: 

5838 alpha = 0.2 

5839 alpha = _execute.make_float(alpha, "alpha") 

5840 _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ], _dtypes.float32) 

5841 (gradients, features) = _inputs_T 

5842 _inputs_flat = [gradients, features] 

5843 _attrs = ("alpha", alpha, "T", _attr_T) 

5844 _result = _execute.execute(b"LeakyReluGrad", 1, inputs=_inputs_flat, 

5845 attrs=_attrs, ctx=ctx, name=name) 

5846 if _execute.must_record_gradient(): 

5847 _execute.record_gradient( 

5848 "LeakyReluGrad", _inputs_flat, _attrs, _result) 

5849 _result, = _result 

5850 return _result 

5851 

5852 

5853def log_softmax(logits, name=None): 

5854 r"""Computes log softmax activations. 

5855 

5856 For each batch `i` and class `j` we have 

5857 

5858 logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) 

5859 

5860 Args: 

5861 logits: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

5862 2-D with shape `[batch_size, num_classes]`. 

5863 name: A name for the operation (optional). 

5864 

5865 Returns: 

5866 A `Tensor`. Has the same type as `logits`. 

5867 """ 

5868 _ctx = _context._context or _context.context() 

5869 tld = _ctx._thread_local_data 

5870 if tld.is_eager: 

5871 try: 

5872 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5873 _ctx, "LogSoftmax", name, logits) 

5874 return _result 

5875 except _core._NotOkStatusException as e: 

5876 _ops.raise_from_not_ok_status(e, name) 

5877 except _core._FallbackException: 

5878 pass 

5879 try: 

5880 return log_softmax_eager_fallback( 

5881 logits, name=name, ctx=_ctx) 

5882 except _core._SymbolicException: 

5883 pass # Add nodes to the TensorFlow graph. 

5884 # Add nodes to the TensorFlow graph. 

5885 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5886 "LogSoftmax", logits=logits, name=name) 

5887 _result = _outputs[:] 

5888 if _execute.must_record_gradient(): 

5889 _attrs = ("T", _op._get_attr_type("T")) 

5890 _inputs_flat = _op.inputs 

5891 _execute.record_gradient( 

5892 "LogSoftmax", _inputs_flat, _attrs, _result) 

5893 _result, = _result 

5894 return _result 

5895 

5896LogSoftmax = tf_export("raw_ops.LogSoftmax")(_ops.to_raw_op(log_softmax)) 

5897 

5898 

5899def log_softmax_eager_fallback(logits, name, ctx): 

5900 _attr_T, (logits,) = _execute.args_to_matching_eager([logits], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

5901 _inputs_flat = [logits] 

5902 _attrs = ("T", _attr_T) 

5903 _result = _execute.execute(b"LogSoftmax", 1, inputs=_inputs_flat, 

5904 attrs=_attrs, ctx=ctx, name=name) 

5905 if _execute.must_record_gradient(): 

5906 _execute.record_gradient( 

5907 "LogSoftmax", _inputs_flat, _attrs, _result) 

5908 _result, = _result 

5909 return _result 

5910 

5911 

5912def max_pool(input, ksize, strides, padding, explicit_paddings=[], data_format="NHWC", name=None): 

5913 r"""Performs max pooling on the input. 

5914 

5915 Args: 

5916 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `qint8`. 

5917 4-D input to pool over. 

5918 ksize: A list of `ints` that has length `>= 4`. 

5919 The size of the window for each dimension of the input tensor. 

5920 strides: A list of `ints` that has length `>= 4`. 

5921 The stride of the sliding window for each dimension of the 

5922 input tensor. 

5923 padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`. 

5924 The type of padding algorithm to use. 

5925 explicit_paddings: An optional list of `ints`. Defaults to `[]`. 

5926 data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`. 

5927 Specify the data format of the input and output data. With the 

5928 default format "NHWC", the data is stored in the order of: 

5929 [batch, in_height, in_width, in_channels]. 

5930 Alternatively, the format could be "NCHW", the data storage order of: 

5931 [batch, in_channels, in_height, in_width]. 

5932 name: A name for the operation (optional). 

5933 

5934 Returns: 

5935 A `Tensor`. Has the same type as `input`. 

5936 """ 

5937 _ctx = _context._context or _context.context() 

5938 tld = _ctx._thread_local_data 

5939 if tld.is_eager: 

5940 try: 

5941 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

5942 _ctx, "MaxPool", name, input, "ksize", ksize, "strides", strides, 

5943 "padding", padding, "explicit_paddings", explicit_paddings, 

5944 "data_format", data_format) 

5945 return _result 

5946 except _core._NotOkStatusException as e: 

5947 _ops.raise_from_not_ok_status(e, name) 

5948 except _core._FallbackException: 

5949 pass 

5950 try: 

5951 return max_pool_eager_fallback( 

5952 input, ksize=ksize, strides=strides, padding=padding, 

5953 explicit_paddings=explicit_paddings, data_format=data_format, 

5954 name=name, ctx=_ctx) 

5955 except _core._SymbolicException: 

5956 pass # Add nodes to the TensorFlow graph. 

5957 # Add nodes to the TensorFlow graph. 

5958 if not isinstance(ksize, (list, tuple)): 

5959 raise TypeError( 

5960 "Expected list for 'ksize' argument to " 

5961 "'max_pool' Op, not %r." % ksize) 

5962 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

5963 if not isinstance(strides, (list, tuple)): 

5964 raise TypeError( 

5965 "Expected list for 'strides' argument to " 

5966 "'max_pool' Op, not %r." % strides) 

5967 strides = [_execute.make_int(_i, "strides") for _i in strides] 

5968 padding = _execute.make_str(padding, "padding") 

5969 if explicit_paddings is None: 

5970 explicit_paddings = [] 

5971 if not isinstance(explicit_paddings, (list, tuple)): 

5972 raise TypeError( 

5973 "Expected list for 'explicit_paddings' argument to " 

5974 "'max_pool' Op, not %r." % explicit_paddings) 

5975 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

5976 if data_format is None: 

5977 data_format = "NHWC" 

5978 data_format = _execute.make_str(data_format, "data_format") 

5979 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

5980 "MaxPool", input=input, ksize=ksize, strides=strides, padding=padding, 

5981 explicit_paddings=explicit_paddings, 

5982 data_format=data_format, name=name) 

5983 _result = _outputs[:] 

5984 if _execute.must_record_gradient(): 

5985 _attrs = ("T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"), 

5986 "strides", _op.get_attr("strides"), "padding", 

5987 _op.get_attr("padding"), "explicit_paddings", 

5988 _op.get_attr("explicit_paddings"), "data_format", 

5989 _op.get_attr("data_format")) 

5990 _inputs_flat = _op.inputs 

5991 _execute.record_gradient( 

5992 "MaxPool", _inputs_flat, _attrs, _result) 

5993 _result, = _result 

5994 return _result 

5995 

5996MaxPool = tf_export("raw_ops.MaxPool")(_ops.to_raw_op(max_pool)) 

5997 

5998 

5999def max_pool_eager_fallback(input, ksize, strides, padding, explicit_paddings, data_format, name, ctx): 

6000 if not isinstance(ksize, (list, tuple)): 

6001 raise TypeError( 

6002 "Expected list for 'ksize' argument to " 

6003 "'max_pool' Op, not %r." % ksize) 

6004 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6005 if not isinstance(strides, (list, tuple)): 

6006 raise TypeError( 

6007 "Expected list for 'strides' argument to " 

6008 "'max_pool' Op, not %r." % strides) 

6009 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6010 padding = _execute.make_str(padding, "padding") 

6011 if explicit_paddings is None: 

6012 explicit_paddings = [] 

6013 if not isinstance(explicit_paddings, (list, tuple)): 

6014 raise TypeError( 

6015 "Expected list for 'explicit_paddings' argument to " 

6016 "'max_pool' Op, not %r." % explicit_paddings) 

6017 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

6018 if data_format is None: 

6019 data_format = "NHWC" 

6020 data_format = _execute.make_str(data_format, "data_format") 

6021 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.uint16, _dtypes.qint8, ], _dtypes.float32) 

6022 _inputs_flat = [input] 

6023 _attrs = ("T", _attr_T, "ksize", ksize, "strides", strides, "padding", 

6024 padding, "explicit_paddings", explicit_paddings, "data_format", data_format) 

6025 _result = _execute.execute(b"MaxPool", 1, inputs=_inputs_flat, attrs=_attrs, 

6026 ctx=ctx, name=name) 

6027 if _execute.must_record_gradient(): 

6028 _execute.record_gradient( 

6029 "MaxPool", _inputs_flat, _attrs, _result) 

6030 _result, = _result 

6031 return _result 

6032 

6033 

6034def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): 

6035 r"""Performs 3D max pooling on the input. 

6036 

6037 Args: 

6038 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`. 

6039 Shape `[batch, depth, rows, cols, channels]` tensor to pool over. 

6040 ksize: A list of `ints` that has length `>= 5`. 

6041 1-D tensor of length 5. The size of the window for each dimension of 

6042 the input tensor. Must have `ksize[0] = ksize[4] = 1`. 

6043 strides: A list of `ints` that has length `>= 5`. 

6044 1-D tensor of length 5. The stride of the sliding window for each 

6045 dimension of `input`. Must have `strides[0] = strides[4] = 1`. 

6046 padding: A `string` from: `"SAME", "VALID"`. 

6047 The type of padding algorithm to use. 

6048 data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`. 

6049 The data format of the input and output data. With the 

6050 default format "NDHWC", the data is stored in the order of: 

6051 [batch, in_depth, in_height, in_width, in_channels]. 

6052 Alternatively, the format could be "NCDHW", the data storage order is: 

6053 [batch, in_channels, in_depth, in_height, in_width]. 

6054 name: A name for the operation (optional). 

6055 

6056 Returns: 

6057 A `Tensor`. Has the same type as `input`. 

6058 """ 

6059 _ctx = _context._context or _context.context() 

6060 tld = _ctx._thread_local_data 

6061 if tld.is_eager: 

6062 try: 

6063 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6064 _ctx, "MaxPool3D", name, input, "ksize", ksize, "strides", strides, 

6065 "padding", padding, "data_format", data_format) 

6066 return _result 

6067 except _core._NotOkStatusException as e: 

6068 _ops.raise_from_not_ok_status(e, name) 

6069 except _core._FallbackException: 

6070 pass 

6071 try: 

6072 return max_pool3d_eager_fallback( 

6073 input, ksize=ksize, strides=strides, padding=padding, 

6074 data_format=data_format, name=name, ctx=_ctx) 

6075 except _core._SymbolicException: 

6076 pass # Add nodes to the TensorFlow graph. 

6077 # Add nodes to the TensorFlow graph. 

6078 if not isinstance(ksize, (list, tuple)): 

6079 raise TypeError( 

6080 "Expected list for 'ksize' argument to " 

6081 "'max_pool3d' Op, not %r." % ksize) 

6082 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6083 if not isinstance(strides, (list, tuple)): 

6084 raise TypeError( 

6085 "Expected list for 'strides' argument to " 

6086 "'max_pool3d' Op, not %r." % strides) 

6087 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6088 padding = _execute.make_str(padding, "padding") 

6089 if data_format is None: 

6090 data_format = "NDHWC" 

6091 data_format = _execute.make_str(data_format, "data_format") 

6092 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6093 "MaxPool3D", input=input, ksize=ksize, strides=strides, 

6094 padding=padding, data_format=data_format, name=name) 

6095 _result = _outputs[:] 

6096 if _execute.must_record_gradient(): 

6097 _attrs = ("ksize", _op.get_attr("ksize"), "strides", 

6098 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

6099 "data_format", _op.get_attr("data_format"), "T", 

6100 _op._get_attr_type("T")) 

6101 _inputs_flat = _op.inputs 

6102 _execute.record_gradient( 

6103 "MaxPool3D", _inputs_flat, _attrs, _result) 

6104 _result, = _result 

6105 return _result 

6106 

6107MaxPool3D = tf_export("raw_ops.MaxPool3D")(_ops.to_raw_op(max_pool3d)) 

6108 

6109 

6110def max_pool3d_eager_fallback(input, ksize, strides, padding, data_format, name, ctx): 

6111 if not isinstance(ksize, (list, tuple)): 

6112 raise TypeError( 

6113 "Expected list for 'ksize' argument to " 

6114 "'max_pool3d' Op, not %r." % ksize) 

6115 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6116 if not isinstance(strides, (list, tuple)): 

6117 raise TypeError( 

6118 "Expected list for 'strides' argument to " 

6119 "'max_pool3d' Op, not %r." % strides) 

6120 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6121 padding = _execute.make_str(padding, "padding") 

6122 if data_format is None: 

6123 data_format = "NDHWC" 

6124 data_format = _execute.make_str(data_format, "data_format") 

6125 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ]) 

6126 _inputs_flat = [input] 

6127 _attrs = ("ksize", ksize, "strides", strides, "padding", padding, 

6128 "data_format", data_format, "T", _attr_T) 

6129 _result = _execute.execute(b"MaxPool3D", 1, inputs=_inputs_flat, 

6130 attrs=_attrs, ctx=ctx, name=name) 

6131 if _execute.must_record_gradient(): 

6132 _execute.record_gradient( 

6133 "MaxPool3D", _inputs_flat, _attrs, _result) 

6134 _result, = _result 

6135 return _result 

6136 

6137 

6138def max_pool3d_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None): 

6139 r"""Computes gradients of 3D max pooling function. 

6140 

6141 Args: 

6142 orig_input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`. 

6143 The original input tensor. 

6144 orig_output: A `Tensor`. Must have the same type as `orig_input`. 

6145 The original output tensor. 

6146 grad: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`. 

6147 Output backprop of shape `[batch, depth, rows, cols, channels]`. 

6148 ksize: A list of `ints` that has length `>= 5`. 

6149 1-D tensor of length 5. The size of the window for each dimension of 

6150 the input tensor. Must have `ksize[0] = ksize[4] = 1`. 

6151 strides: A list of `ints` that has length `>= 5`. 

6152 1-D tensor of length 5. The stride of the sliding window for each 

6153 dimension of `input`. Must have `strides[0] = strides[4] = 1`. 

6154 padding: A `string` from: `"SAME", "VALID"`. 

6155 The type of padding algorithm to use. 

6156 data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`. 

6157 The data format of the input and output data. With the 

6158 default format "NDHWC", the data is stored in the order of: 

6159 [batch, in_depth, in_height, in_width, in_channels]. 

6160 Alternatively, the format could be "NCDHW", the data storage order is: 

6161 [batch, in_channels, in_depth, in_height, in_width]. 

6162 name: A name for the operation (optional). 

6163 

6164 Returns: 

6165 A `Tensor`. Has the same type as `grad`. 

6166 """ 

6167 _ctx = _context._context or _context.context() 

6168 tld = _ctx._thread_local_data 

6169 if tld.is_eager: 

6170 try: 

6171 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6172 _ctx, "MaxPool3DGrad", name, orig_input, orig_output, grad, "ksize", 

6173 ksize, "strides", strides, "padding", padding, "data_format", 

6174 data_format) 

6175 return _result 

6176 except _core._NotOkStatusException as e: 

6177 _ops.raise_from_not_ok_status(e, name) 

6178 except _core._FallbackException: 

6179 pass 

6180 try: 

6181 return max_pool3d_grad_eager_fallback( 

6182 orig_input, orig_output, grad, ksize=ksize, strides=strides, 

6183 padding=padding, data_format=data_format, name=name, ctx=_ctx) 

6184 except _core._SymbolicException: 

6185 pass # Add nodes to the TensorFlow graph. 

6186 # Add nodes to the TensorFlow graph. 

6187 if not isinstance(ksize, (list, tuple)): 

6188 raise TypeError( 

6189 "Expected list for 'ksize' argument to " 

6190 "'max_pool3d_grad' Op, not %r." % ksize) 

6191 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6192 if not isinstance(strides, (list, tuple)): 

6193 raise TypeError( 

6194 "Expected list for 'strides' argument to " 

6195 "'max_pool3d_grad' Op, not %r." % strides) 

6196 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6197 padding = _execute.make_str(padding, "padding") 

6198 if data_format is None: 

6199 data_format = "NDHWC" 

6200 data_format = _execute.make_str(data_format, "data_format") 

6201 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6202 "MaxPool3DGrad", orig_input=orig_input, orig_output=orig_output, 

6203 grad=grad, ksize=ksize, strides=strides, 

6204 padding=padding, data_format=data_format, name=name) 

6205 _result = _outputs[:] 

6206 if _execute.must_record_gradient(): 

6207 _attrs = ("ksize", _op.get_attr("ksize"), "strides", 

6208 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

6209 "data_format", _op.get_attr("data_format"), "T", 

6210 _op._get_attr_type("T"), "TInput", _op._get_attr_type("TInput")) 

6211 _inputs_flat = _op.inputs 

6212 _execute.record_gradient( 

6213 "MaxPool3DGrad", _inputs_flat, _attrs, _result) 

6214 _result, = _result 

6215 return _result 

6216 

6217MaxPool3DGrad = tf_export("raw_ops.MaxPool3DGrad")(_ops.to_raw_op(max_pool3d_grad)) 

6218 

6219 

6220def max_pool3d_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format, name, ctx): 

6221 if not isinstance(ksize, (list, tuple)): 

6222 raise TypeError( 

6223 "Expected list for 'ksize' argument to " 

6224 "'max_pool3d_grad' Op, not %r." % ksize) 

6225 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6226 if not isinstance(strides, (list, tuple)): 

6227 raise TypeError( 

6228 "Expected list for 'strides' argument to " 

6229 "'max_pool3d_grad' Op, not %r." % strides) 

6230 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6231 padding = _execute.make_str(padding, "padding") 

6232 if data_format is None: 

6233 data_format = "NDHWC" 

6234 data_format = _execute.make_str(data_format, "data_format") 

6235 _attr_T, (grad,) = _execute.args_to_matching_eager([grad], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ], _dtypes.float32) 

6236 _attr_TInput, _inputs_TInput = _execute.args_to_matching_eager([orig_input, orig_output], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, ], _dtypes.float32) 

6237 (orig_input, orig_output) = _inputs_TInput 

6238 _inputs_flat = [orig_input, orig_output, grad] 

6239 _attrs = ("ksize", ksize, "strides", strides, "padding", padding, 

6240 "data_format", data_format, "T", _attr_T, "TInput", _attr_TInput) 

6241 _result = _execute.execute(b"MaxPool3DGrad", 1, inputs=_inputs_flat, 

6242 attrs=_attrs, ctx=ctx, name=name) 

6243 if _execute.must_record_gradient(): 

6244 _execute.record_gradient( 

6245 "MaxPool3DGrad", _inputs_flat, _attrs, _result) 

6246 _result, = _result 

6247 return _result 

6248 

6249 

6250def max_pool3d_grad_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NDHWC", name=None): 

6251 r"""Computes second-order gradients of the maxpooling function. 

6252 

6253 Args: 

6254 orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

6255 The original input tensor. 

6256 orig_output: A `Tensor`. Must have the same type as `orig_input`. 

6257 The original output tensor. 

6258 grad: A `Tensor`. Must have the same type as `orig_input`. 

6259 Output backprop of shape `[batch, depth, rows, cols, channels]`. 

6260 ksize: A list of `ints` that has length `>= 5`. 

6261 1-D tensor of length 5. The size of the window for each dimension of 

6262 the input tensor. Must have `ksize[0] = ksize[4] = 1`. 

6263 strides: A list of `ints` that has length `>= 5`. 

6264 1-D tensor of length 5. The stride of the sliding window for each 

6265 dimension of `input`. Must have `strides[0] = strides[4] = 1`. 

6266 padding: A `string` from: `"SAME", "VALID"`. 

6267 The type of padding algorithm to use. 

6268 data_format: An optional `string` from: `"NDHWC", "NCDHW"`. Defaults to `"NDHWC"`. 

6269 The data format of the input and output data. With the 

6270 default format "NDHWC", the data is stored in the order of: 

6271 [batch, in_depth, in_height, in_width, in_channels]. 

6272 Alternatively, the format could be "NCDHW", the data storage order is: 

6273 [batch, in_channels, in_depth, in_height, in_width]. 

6274 name: A name for the operation (optional). 

6275 

6276 Returns: 

6277 A `Tensor`. Has the same type as `orig_input`. 

6278 """ 

6279 _ctx = _context._context or _context.context() 

6280 tld = _ctx._thread_local_data 

6281 if tld.is_eager: 

6282 try: 

6283 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6284 _ctx, "MaxPool3DGradGrad", name, orig_input, orig_output, grad, 

6285 "ksize", ksize, "strides", strides, "padding", padding, "data_format", 

6286 data_format) 

6287 return _result 

6288 except _core._NotOkStatusException as e: 

6289 _ops.raise_from_not_ok_status(e, name) 

6290 except _core._FallbackException: 

6291 pass 

6292 try: 

6293 return max_pool3d_grad_grad_eager_fallback( 

6294 orig_input, orig_output, grad, ksize=ksize, strides=strides, 

6295 padding=padding, data_format=data_format, name=name, ctx=_ctx) 

6296 except _core._SymbolicException: 

6297 pass # Add nodes to the TensorFlow graph. 

6298 # Add nodes to the TensorFlow graph. 

6299 if not isinstance(ksize, (list, tuple)): 

6300 raise TypeError( 

6301 "Expected list for 'ksize' argument to " 

6302 "'max_pool3d_grad_grad' Op, not %r." % ksize) 

6303 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6304 if not isinstance(strides, (list, tuple)): 

6305 raise TypeError( 

6306 "Expected list for 'strides' argument to " 

6307 "'max_pool3d_grad_grad' Op, not %r." % strides) 

6308 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6309 padding = _execute.make_str(padding, "padding") 

6310 if data_format is None: 

6311 data_format = "NDHWC" 

6312 data_format = _execute.make_str(data_format, "data_format") 

6313 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6314 "MaxPool3DGradGrad", orig_input=orig_input, orig_output=orig_output, 

6315 grad=grad, ksize=ksize, strides=strides, 

6316 padding=padding, data_format=data_format, 

6317 name=name) 

6318 _result = _outputs[:] 

6319 if _execute.must_record_gradient(): 

6320 _attrs = ("ksize", _op.get_attr("ksize"), "strides", 

6321 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

6322 "data_format", _op.get_attr("data_format"), "T", 

6323 _op._get_attr_type("T")) 

6324 _inputs_flat = _op.inputs 

6325 _execute.record_gradient( 

6326 "MaxPool3DGradGrad", _inputs_flat, _attrs, _result) 

6327 _result, = _result 

6328 return _result 

6329 

6330MaxPool3DGradGrad = tf_export("raw_ops.MaxPool3DGradGrad")(_ops.to_raw_op(max_pool3d_grad_grad)) 

6331 

6332 

6333def max_pool3d_grad_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format, name, ctx): 

6334 if not isinstance(ksize, (list, tuple)): 

6335 raise TypeError( 

6336 "Expected list for 'ksize' argument to " 

6337 "'max_pool3d_grad_grad' Op, not %r." % ksize) 

6338 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6339 if not isinstance(strides, (list, tuple)): 

6340 raise TypeError( 

6341 "Expected list for 'strides' argument to " 

6342 "'max_pool3d_grad_grad' Op, not %r." % strides) 

6343 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6344 padding = _execute.make_str(padding, "padding") 

6345 if data_format is None: 

6346 data_format = "NDHWC" 

6347 data_format = _execute.make_str(data_format, "data_format") 

6348 _attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

6349 (orig_input, orig_output, grad) = _inputs_T 

6350 _inputs_flat = [orig_input, orig_output, grad] 

6351 _attrs = ("ksize", ksize, "strides", strides, "padding", padding, 

6352 "data_format", data_format, "T", _attr_T) 

6353 _result = _execute.execute(b"MaxPool3DGradGrad", 1, inputs=_inputs_flat, 

6354 attrs=_attrs, ctx=ctx, name=name) 

6355 if _execute.must_record_gradient(): 

6356 _execute.record_gradient( 

6357 "MaxPool3DGradGrad", _inputs_flat, _attrs, _result) 

6358 _result, = _result 

6359 return _result 

6360 

6361 

6362def max_pool_grad(orig_input, orig_output, grad, ksize, strides, padding, explicit_paddings=[], data_format="NHWC", name=None): 

6363 r"""Computes gradients of the maxpooling function. 

6364 

6365 Args: 

6366 orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

6367 The original input tensor. 

6368 orig_output: A `Tensor`. Must have the same type as `orig_input`. 

6369 The original output tensor. 

6370 grad: A `Tensor`. Must have the same type as `orig_input`. 

6371 4-D. Gradients w.r.t. the output of `max_pool`. 

6372 ksize: A list of `ints` that has length `>= 4`. 

6373 The size of the window for each dimension of the input tensor. 

6374 strides: A list of `ints` that has length `>= 4`. 

6375 The stride of the sliding window for each dimension of the 

6376 input tensor. 

6377 padding: A `string` from: `"SAME", "VALID", "EXPLICIT"`. 

6378 The type of padding algorithm to use. 

6379 explicit_paddings: An optional list of `ints`. Defaults to `[]`. 

6380 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

6381 Specify the data format of the input and output data. With the 

6382 default format "NHWC", the data is stored in the order of: 

6383 [batch, in_height, in_width, in_channels]. 

6384 Alternatively, the format could be "NCHW", the data storage order of: 

6385 [batch, in_channels, in_height, in_width]. 

6386 name: A name for the operation (optional). 

6387 

6388 Returns: 

6389 A `Tensor`. Has the same type as `orig_input`. 

6390 """ 

6391 _ctx = _context._context or _context.context() 

6392 tld = _ctx._thread_local_data 

6393 if tld.is_eager: 

6394 try: 

6395 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6396 _ctx, "MaxPoolGrad", name, orig_input, orig_output, grad, "ksize", 

6397 ksize, "strides", strides, "padding", padding, "explicit_paddings", 

6398 explicit_paddings, "data_format", data_format) 

6399 return _result 

6400 except _core._NotOkStatusException as e: 

6401 _ops.raise_from_not_ok_status(e, name) 

6402 except _core._FallbackException: 

6403 pass 

6404 try: 

6405 return max_pool_grad_eager_fallback( 

6406 orig_input, orig_output, grad, ksize=ksize, strides=strides, 

6407 padding=padding, explicit_paddings=explicit_paddings, 

6408 data_format=data_format, name=name, ctx=_ctx) 

6409 except _core._SymbolicException: 

6410 pass # Add nodes to the TensorFlow graph. 

6411 # Add nodes to the TensorFlow graph. 

6412 if not isinstance(ksize, (list, tuple)): 

6413 raise TypeError( 

6414 "Expected list for 'ksize' argument to " 

6415 "'max_pool_grad' Op, not %r." % ksize) 

6416 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6417 if not isinstance(strides, (list, tuple)): 

6418 raise TypeError( 

6419 "Expected list for 'strides' argument to " 

6420 "'max_pool_grad' Op, not %r." % strides) 

6421 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6422 padding = _execute.make_str(padding, "padding") 

6423 if explicit_paddings is None: 

6424 explicit_paddings = [] 

6425 if not isinstance(explicit_paddings, (list, tuple)): 

6426 raise TypeError( 

6427 "Expected list for 'explicit_paddings' argument to " 

6428 "'max_pool_grad' Op, not %r." % explicit_paddings) 

6429 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

6430 if data_format is None: 

6431 data_format = "NHWC" 

6432 data_format = _execute.make_str(data_format, "data_format") 

6433 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6434 "MaxPoolGrad", orig_input=orig_input, orig_output=orig_output, 

6435 grad=grad, ksize=ksize, strides=strides, 

6436 padding=padding, explicit_paddings=explicit_paddings, 

6437 data_format=data_format, name=name) 

6438 _result = _outputs[:] 

6439 if _execute.must_record_gradient(): 

6440 _attrs = ("ksize", _op.get_attr("ksize"), "strides", 

6441 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

6442 "explicit_paddings", _op.get_attr("explicit_paddings"), 

6443 "data_format", _op.get_attr("data_format"), "T", 

6444 _op._get_attr_type("T")) 

6445 _inputs_flat = _op.inputs 

6446 _execute.record_gradient( 

6447 "MaxPoolGrad", _inputs_flat, _attrs, _result) 

6448 _result, = _result 

6449 return _result 

6450 

6451MaxPoolGrad = tf_export("raw_ops.MaxPoolGrad")(_ops.to_raw_op(max_pool_grad)) 

6452 

6453 

6454def max_pool_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, explicit_paddings, data_format, name, ctx): 

6455 if not isinstance(ksize, (list, tuple)): 

6456 raise TypeError( 

6457 "Expected list for 'ksize' argument to " 

6458 "'max_pool_grad' Op, not %r." % ksize) 

6459 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6460 if not isinstance(strides, (list, tuple)): 

6461 raise TypeError( 

6462 "Expected list for 'strides' argument to " 

6463 "'max_pool_grad' Op, not %r." % strides) 

6464 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6465 padding = _execute.make_str(padding, "padding") 

6466 if explicit_paddings is None: 

6467 explicit_paddings = [] 

6468 if not isinstance(explicit_paddings, (list, tuple)): 

6469 raise TypeError( 

6470 "Expected list for 'explicit_paddings' argument to " 

6471 "'max_pool_grad' Op, not %r." % explicit_paddings) 

6472 explicit_paddings = [_execute.make_int(_i, "explicit_paddings") for _i in explicit_paddings] 

6473 if data_format is None: 

6474 data_format = "NHWC" 

6475 data_format = _execute.make_str(data_format, "data_format") 

6476 _attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ], _dtypes.float32) 

6477 (orig_input, orig_output, grad) = _inputs_T 

6478 _inputs_flat = [orig_input, orig_output, grad] 

6479 _attrs = ("ksize", ksize, "strides", strides, "padding", padding, 

6480 "explicit_paddings", explicit_paddings, "data_format", data_format, "T", 

6481 _attr_T) 

6482 _result = _execute.execute(b"MaxPoolGrad", 1, inputs=_inputs_flat, 

6483 attrs=_attrs, ctx=ctx, name=name) 

6484 if _execute.must_record_gradient(): 

6485 _execute.record_gradient( 

6486 "MaxPoolGrad", _inputs_flat, _attrs, _result) 

6487 _result, = _result 

6488 return _result 

6489 

6490 

6491def max_pool_grad_grad(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None): 

6492 r"""Computes second-order gradients of the maxpooling function. 

6493 

6494 Args: 

6495 orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

6496 The original input tensor. 

6497 orig_output: A `Tensor`. Must have the same type as `orig_input`. 

6498 The original output tensor. 

6499 grad: A `Tensor`. Must have the same type as `orig_input`. 

6500 4-D. Gradients of gradients w.r.t. the input of `max_pool`. 

6501 ksize: A list of `ints` that has length `>= 4`. 

6502 The size of the window for each dimension of the input tensor. 

6503 strides: A list of `ints` that has length `>= 4`. 

6504 The stride of the sliding window for each dimension of the 

6505 input tensor. 

6506 padding: A `string` from: `"SAME", "VALID"`. 

6507 The type of padding algorithm to use. 

6508 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

6509 Specify the data format of the input and output data. With the 

6510 default format "NHWC", the data is stored in the order of: 

6511 [batch, in_height, in_width, in_channels]. 

6512 Alternatively, the format could be "NCHW", the data storage order of: 

6513 [batch, in_channels, in_height, in_width]. 

6514 name: A name for the operation (optional). 

6515 

6516 Returns: 

6517 A `Tensor`. Has the same type as `orig_input`. 

6518 """ 

6519 _ctx = _context._context or _context.context() 

6520 tld = _ctx._thread_local_data 

6521 if tld.is_eager: 

6522 try: 

6523 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6524 _ctx, "MaxPoolGradGrad", name, orig_input, orig_output, grad, "ksize", 

6525 ksize, "strides", strides, "padding", padding, "data_format", 

6526 data_format) 

6527 return _result 

6528 except _core._NotOkStatusException as e: 

6529 _ops.raise_from_not_ok_status(e, name) 

6530 except _core._FallbackException: 

6531 pass 

6532 try: 

6533 return max_pool_grad_grad_eager_fallback( 

6534 orig_input, orig_output, grad, ksize=ksize, strides=strides, 

6535 padding=padding, data_format=data_format, name=name, ctx=_ctx) 

6536 except _core._SymbolicException: 

6537 pass # Add nodes to the TensorFlow graph. 

6538 # Add nodes to the TensorFlow graph. 

6539 if not isinstance(ksize, (list, tuple)): 

6540 raise TypeError( 

6541 "Expected list for 'ksize' argument to " 

6542 "'max_pool_grad_grad' Op, not %r." % ksize) 

6543 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6544 if not isinstance(strides, (list, tuple)): 

6545 raise TypeError( 

6546 "Expected list for 'strides' argument to " 

6547 "'max_pool_grad_grad' Op, not %r." % strides) 

6548 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6549 padding = _execute.make_str(padding, "padding") 

6550 if data_format is None: 

6551 data_format = "NHWC" 

6552 data_format = _execute.make_str(data_format, "data_format") 

6553 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6554 "MaxPoolGradGrad", orig_input=orig_input, orig_output=orig_output, 

6555 grad=grad, ksize=ksize, strides=strides, 

6556 padding=padding, data_format=data_format, 

6557 name=name) 

6558 _result = _outputs[:] 

6559 if _execute.must_record_gradient(): 

6560 _attrs = ("ksize", _op.get_attr("ksize"), "strides", 

6561 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

6562 "data_format", _op.get_attr("data_format"), "T", 

6563 _op._get_attr_type("T")) 

6564 _inputs_flat = _op.inputs 

6565 _execute.record_gradient( 

6566 "MaxPoolGradGrad", _inputs_flat, _attrs, _result) 

6567 _result, = _result 

6568 return _result 

6569 

6570MaxPoolGradGrad = tf_export("raw_ops.MaxPoolGradGrad")(_ops.to_raw_op(max_pool_grad_grad)) 

6571 

6572 

6573def max_pool_grad_grad_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format, name, ctx): 

6574 if not isinstance(ksize, (list, tuple)): 

6575 raise TypeError( 

6576 "Expected list for 'ksize' argument to " 

6577 "'max_pool_grad_grad' Op, not %r." % ksize) 

6578 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6579 if not isinstance(strides, (list, tuple)): 

6580 raise TypeError( 

6581 "Expected list for 'strides' argument to " 

6582 "'max_pool_grad_grad' Op, not %r." % strides) 

6583 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6584 padding = _execute.make_str(padding, "padding") 

6585 if data_format is None: 

6586 data_format = "NHWC" 

6587 data_format = _execute.make_str(data_format, "data_format") 

6588 _attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

6589 (orig_input, orig_output, grad) = _inputs_T 

6590 _inputs_flat = [orig_input, orig_output, grad] 

6591 _attrs = ("ksize", ksize, "strides", strides, "padding", padding, 

6592 "data_format", data_format, "T", _attr_T) 

6593 _result = _execute.execute(b"MaxPoolGradGrad", 1, inputs=_inputs_flat, 

6594 attrs=_attrs, ctx=ctx, name=name) 

6595 if _execute.must_record_gradient(): 

6596 _execute.record_gradient( 

6597 "MaxPoolGradGrad", _inputs_flat, _attrs, _result) 

6598 _result, = _result 

6599 return _result 

6600 

6601 

6602def max_pool_grad_grad_v2(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None): 

6603 r"""Computes second-order gradients of the maxpooling function. 

6604 

6605 Args: 

6606 orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

6607 The original input tensor. 

6608 orig_output: A `Tensor`. Must have the same type as `orig_input`. 

6609 The original output tensor. 

6610 grad: A `Tensor`. Must have the same type as `orig_input`. 

6611 4-D. Gradients of gradients w.r.t. the input of `max_pool`. 

6612 ksize: A `Tensor` of type `int32`. 

6613 The size of the window for each dimension of the input tensor. 

6614 strides: A `Tensor` of type `int32`. 

6615 The stride of the sliding window for each dimension of the 

6616 input tensor. 

6617 padding: A `string` from: `"SAME", "VALID"`. 

6618 The type of padding algorithm to use. 

6619 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

6620 Specify the data format of the input and output data. With the 

6621 default format "NHWC", the data is stored in the order of: 

6622 [batch, in_height, in_width, in_channels]. 

6623 Alternatively, the format could be "NCHW", the data storage order of: 

6624 [batch, in_channels, in_height, in_width]. 

6625 name: A name for the operation (optional). 

6626 

6627 Returns: 

6628 A `Tensor`. Has the same type as `orig_input`. 

6629 """ 

6630 _ctx = _context._context or _context.context() 

6631 tld = _ctx._thread_local_data 

6632 if tld.is_eager: 

6633 try: 

6634 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6635 _ctx, "MaxPoolGradGradV2", name, orig_input, orig_output, grad, ksize, 

6636 strides, "padding", padding, "data_format", data_format) 

6637 return _result 

6638 except _core._NotOkStatusException as e: 

6639 _ops.raise_from_not_ok_status(e, name) 

6640 except _core._FallbackException: 

6641 pass 

6642 try: 

6643 return max_pool_grad_grad_v2_eager_fallback( 

6644 orig_input, orig_output, grad, ksize, strides, padding=padding, 

6645 data_format=data_format, name=name, ctx=_ctx) 

6646 except _core._SymbolicException: 

6647 pass # Add nodes to the TensorFlow graph. 

6648 # Add nodes to the TensorFlow graph. 

6649 padding = _execute.make_str(padding, "padding") 

6650 if data_format is None: 

6651 data_format = "NHWC" 

6652 data_format = _execute.make_str(data_format, "data_format") 

6653 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6654 "MaxPoolGradGradV2", orig_input=orig_input, orig_output=orig_output, 

6655 grad=grad, ksize=ksize, strides=strides, 

6656 padding=padding, data_format=data_format, 

6657 name=name) 

6658 _result = _outputs[:] 

6659 if _execute.must_record_gradient(): 

6660 _attrs = ("padding", _op.get_attr("padding"), "data_format", 

6661 _op.get_attr("data_format"), "T", _op._get_attr_type("T")) 

6662 _inputs_flat = _op.inputs 

6663 _execute.record_gradient( 

6664 "MaxPoolGradGradV2", _inputs_flat, _attrs, _result) 

6665 _result, = _result 

6666 return _result 

6667 

6668MaxPoolGradGradV2 = tf_export("raw_ops.MaxPoolGradGradV2")(_ops.to_raw_op(max_pool_grad_grad_v2)) 

6669 

6670 

6671def max_pool_grad_grad_v2_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format, name, ctx): 

6672 padding = _execute.make_str(padding, "padding") 

6673 if data_format is None: 

6674 data_format = "NHWC" 

6675 data_format = _execute.make_str(data_format, "data_format") 

6676 _attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

6677 (orig_input, orig_output, grad) = _inputs_T 

6678 ksize = _ops.convert_to_tensor(ksize, _dtypes.int32) 

6679 strides = _ops.convert_to_tensor(strides, _dtypes.int32) 

6680 _inputs_flat = [orig_input, orig_output, grad, ksize, strides] 

6681 _attrs = ("padding", padding, "data_format", data_format, "T", _attr_T) 

6682 _result = _execute.execute(b"MaxPoolGradGradV2", 1, inputs=_inputs_flat, 

6683 attrs=_attrs, ctx=ctx, name=name) 

6684 if _execute.must_record_gradient(): 

6685 _execute.record_gradient( 

6686 "MaxPoolGradGradV2", _inputs_flat, _attrs, _result) 

6687 _result, = _result 

6688 return _result 

6689 

6690 

6691def max_pool_grad_grad_with_argmax(input, grad, argmax, ksize, strides, padding, include_batch_in_index=False, name=None): 

6692 r"""Computes second-order gradients of the maxpooling function. 

6693 

6694 Args: 

6695 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

6696 The original input. 

6697 grad: A `Tensor`. Must have the same type as `input`. 

6698 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the 

6699 input of `max_pool`. 

6700 argmax: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

6701 The indices of the maximum values chosen for each output of `max_pool`. 

6702 ksize: A list of `ints` that has length `>= 4`. 

6703 The size of the window for each dimension of the input tensor. 

6704 strides: A list of `ints` that has length `>= 4`. 

6705 The stride of the sliding window for each dimension of the 

6706 input tensor. 

6707 padding: A `string` from: `"SAME", "VALID"`. 

6708 The type of padding algorithm to use. 

6709 include_batch_in_index: An optional `bool`. Defaults to `False`. 

6710 Whether to include batch dimension in flattened index of `argmax`. 

6711 name: A name for the operation (optional). 

6712 

6713 Returns: 

6714 A `Tensor`. Has the same type as `input`. 

6715 """ 

6716 _ctx = _context._context or _context.context() 

6717 tld = _ctx._thread_local_data 

6718 if tld.is_eager: 

6719 try: 

6720 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6721 _ctx, "MaxPoolGradGradWithArgmax", name, input, grad, argmax, "ksize", 

6722 ksize, "strides", strides, "padding", padding, 

6723 "include_batch_in_index", include_batch_in_index) 

6724 return _result 

6725 except _core._NotOkStatusException as e: 

6726 _ops.raise_from_not_ok_status(e, name) 

6727 except _core._FallbackException: 

6728 pass 

6729 try: 

6730 return max_pool_grad_grad_with_argmax_eager_fallback( 

6731 input, grad, argmax, ksize=ksize, strides=strides, padding=padding, 

6732 include_batch_in_index=include_batch_in_index, name=name, ctx=_ctx) 

6733 except _core._SymbolicException: 

6734 pass # Add nodes to the TensorFlow graph. 

6735 # Add nodes to the TensorFlow graph. 

6736 if not isinstance(ksize, (list, tuple)): 

6737 raise TypeError( 

6738 "Expected list for 'ksize' argument to " 

6739 "'max_pool_grad_grad_with_argmax' Op, not %r." % ksize) 

6740 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6741 if not isinstance(strides, (list, tuple)): 

6742 raise TypeError( 

6743 "Expected list for 'strides' argument to " 

6744 "'max_pool_grad_grad_with_argmax' Op, not %r." % strides) 

6745 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6746 padding = _execute.make_str(padding, "padding") 

6747 if include_batch_in_index is None: 

6748 include_batch_in_index = False 

6749 include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index") 

6750 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6751 "MaxPoolGradGradWithArgmax", input=input, grad=grad, argmax=argmax, 

6752 ksize=ksize, strides=strides, 

6753 padding=padding, 

6754 include_batch_in_index=include_batch_in_index, 

6755 name=name) 

6756 _result = _outputs[:] 

6757 if _execute.must_record_gradient(): 

6758 _attrs = ("ksize", _op.get_attr("ksize"), "strides", 

6759 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

6760 "include_batch_in_index", 

6761 _op._get_attr_bool("include_batch_in_index"), "Targmax", 

6762 _op._get_attr_type("Targmax"), "T", _op._get_attr_type("T")) 

6763 _inputs_flat = _op.inputs 

6764 _execute.record_gradient( 

6765 "MaxPoolGradGradWithArgmax", _inputs_flat, _attrs, _result) 

6766 _result, = _result 

6767 return _result 

6768 

6769MaxPoolGradGradWithArgmax = tf_export("raw_ops.MaxPoolGradGradWithArgmax")(_ops.to_raw_op(max_pool_grad_grad_with_argmax)) 

6770 

6771 

6772def max_pool_grad_grad_with_argmax_eager_fallback(input, grad, argmax, ksize, strides, padding, include_batch_in_index, name, ctx): 

6773 if not isinstance(ksize, (list, tuple)): 

6774 raise TypeError( 

6775 "Expected list for 'ksize' argument to " 

6776 "'max_pool_grad_grad_with_argmax' Op, not %r." % ksize) 

6777 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6778 if not isinstance(strides, (list, tuple)): 

6779 raise TypeError( 

6780 "Expected list for 'strides' argument to " 

6781 "'max_pool_grad_grad_with_argmax' Op, not %r." % strides) 

6782 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6783 padding = _execute.make_str(padding, "padding") 

6784 if include_batch_in_index is None: 

6785 include_batch_in_index = False 

6786 include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index") 

6787 _attr_Targmax, (argmax,) = _execute.args_to_matching_eager([argmax], ctx, [_dtypes.int32, _dtypes.int64, ]) 

6788 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

6789 (input, grad) = _inputs_T 

6790 _inputs_flat = [input, grad, argmax] 

6791 _attrs = ("ksize", ksize, "strides", strides, "padding", padding, 

6792 "include_batch_in_index", include_batch_in_index, "Targmax", _attr_Targmax, 

6793 "T", _attr_T) 

6794 _result = _execute.execute(b"MaxPoolGradGradWithArgmax", 1, 

6795 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

6796 name=name) 

6797 if _execute.must_record_gradient(): 

6798 _execute.record_gradient( 

6799 "MaxPoolGradGradWithArgmax", _inputs_flat, _attrs, _result) 

6800 _result, = _result 

6801 return _result 

6802 

6803 

6804def max_pool_grad_v2(orig_input, orig_output, grad, ksize, strides, padding, data_format="NHWC", name=None): 

6805 r"""Computes gradients of the maxpooling function. 

6806 

6807 Args: 

6808 orig_input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

6809 The original input tensor. 

6810 orig_output: A `Tensor`. Must have the same type as `orig_input`. 

6811 The original output tensor. 

6812 grad: A `Tensor`. Must have the same type as `orig_input`. 

6813 4-D. Gradients w.r.t. the output of `max_pool`. 

6814 ksize: A `Tensor` of type `int32`. 

6815 The size of the window for each dimension of the input tensor. 

6816 strides: A `Tensor` of type `int32`. 

6817 The stride of the sliding window for each dimension of the 

6818 input tensor. 

6819 padding: A `string` from: `"SAME", "VALID"`. 

6820 The type of padding algorithm to use. 

6821 data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to `"NHWC"`. 

6822 Specify the data format of the input and output data. With the 

6823 default format "NHWC", the data is stored in the order of: 

6824 [batch, in_height, in_width, in_channels]. 

6825 Alternatively, the format could be "NCHW", the data storage order of: 

6826 [batch, in_channels, in_height, in_width]. 

6827 name: A name for the operation (optional). 

6828 

6829 Returns: 

6830 A `Tensor`. Has the same type as `orig_input`. 

6831 """ 

6832 _ctx = _context._context or _context.context() 

6833 tld = _ctx._thread_local_data 

6834 if tld.is_eager: 

6835 try: 

6836 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6837 _ctx, "MaxPoolGradV2", name, orig_input, orig_output, grad, ksize, 

6838 strides, "padding", padding, "data_format", data_format) 

6839 return _result 

6840 except _core._NotOkStatusException as e: 

6841 _ops.raise_from_not_ok_status(e, name) 

6842 except _core._FallbackException: 

6843 pass 

6844 try: 

6845 return max_pool_grad_v2_eager_fallback( 

6846 orig_input, orig_output, grad, ksize, strides, padding=padding, 

6847 data_format=data_format, name=name, ctx=_ctx) 

6848 except _core._SymbolicException: 

6849 pass # Add nodes to the TensorFlow graph. 

6850 # Add nodes to the TensorFlow graph. 

6851 padding = _execute.make_str(padding, "padding") 

6852 if data_format is None: 

6853 data_format = "NHWC" 

6854 data_format = _execute.make_str(data_format, "data_format") 

6855 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6856 "MaxPoolGradV2", orig_input=orig_input, orig_output=orig_output, 

6857 grad=grad, ksize=ksize, strides=strides, 

6858 padding=padding, data_format=data_format, name=name) 

6859 _result = _outputs[:] 

6860 if _execute.must_record_gradient(): 

6861 _attrs = ("padding", _op.get_attr("padding"), "data_format", 

6862 _op.get_attr("data_format"), "T", _op._get_attr_type("T")) 

6863 _inputs_flat = _op.inputs 

6864 _execute.record_gradient( 

6865 "MaxPoolGradV2", _inputs_flat, _attrs, _result) 

6866 _result, = _result 

6867 return _result 

6868 

6869MaxPoolGradV2 = tf_export("raw_ops.MaxPoolGradV2")(_ops.to_raw_op(max_pool_grad_v2)) 

6870 

6871 

6872def max_pool_grad_v2_eager_fallback(orig_input, orig_output, grad, ksize, strides, padding, data_format, name, ctx): 

6873 padding = _execute.make_str(padding, "padding") 

6874 if data_format is None: 

6875 data_format = "NHWC" 

6876 data_format = _execute.make_str(data_format, "data_format") 

6877 _attr_T, _inputs_T = _execute.args_to_matching_eager([orig_input, orig_output, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ], _dtypes.float32) 

6878 (orig_input, orig_output, grad) = _inputs_T 

6879 ksize = _ops.convert_to_tensor(ksize, _dtypes.int32) 

6880 strides = _ops.convert_to_tensor(strides, _dtypes.int32) 

6881 _inputs_flat = [orig_input, orig_output, grad, ksize, strides] 

6882 _attrs = ("padding", padding, "data_format", data_format, "T", _attr_T) 

6883 _result = _execute.execute(b"MaxPoolGradV2", 1, inputs=_inputs_flat, 

6884 attrs=_attrs, ctx=ctx, name=name) 

6885 if _execute.must_record_gradient(): 

6886 _execute.record_gradient( 

6887 "MaxPoolGradV2", _inputs_flat, _attrs, _result) 

6888 _result, = _result 

6889 return _result 

6890 

6891 

6892def max_pool_grad_with_argmax(input, grad, argmax, ksize, strides, padding, include_batch_in_index=False, name=None): 

6893 r"""Computes gradients of the maxpooling function. 

6894 

6895 Args: 

6896 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

6897 The original input. 

6898 grad: A `Tensor`. Must have the same type as `input`. 

6899 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the 

6900 output of `max_pool`. 

6901 argmax: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

6902 The indices of the maximum values chosen for each output of `max_pool`. 

6903 ksize: A list of `ints` that has length `>= 4`. 

6904 The size of the window for each dimension of the input tensor. 

6905 strides: A list of `ints` that has length `>= 4`. 

6906 The stride of the sliding window for each dimension of the 

6907 input tensor. 

6908 padding: A `string` from: `"SAME", "VALID"`. 

6909 The type of padding algorithm to use. 

6910 include_batch_in_index: An optional `bool`. Defaults to `False`. 

6911 Whether to include batch dimension in flattened index of `argmax`. 

6912 name: A name for the operation (optional). 

6913 

6914 Returns: 

6915 A `Tensor`. Has the same type as `input`. 

6916 """ 

6917 _ctx = _context._context or _context.context() 

6918 tld = _ctx._thread_local_data 

6919 if tld.is_eager: 

6920 try: 

6921 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

6922 _ctx, "MaxPoolGradWithArgmax", name, input, grad, argmax, "ksize", 

6923 ksize, "strides", strides, "padding", padding, 

6924 "include_batch_in_index", include_batch_in_index) 

6925 return _result 

6926 except _core._NotOkStatusException as e: 

6927 _ops.raise_from_not_ok_status(e, name) 

6928 except _core._FallbackException: 

6929 pass 

6930 try: 

6931 return max_pool_grad_with_argmax_eager_fallback( 

6932 input, grad, argmax, ksize=ksize, strides=strides, padding=padding, 

6933 include_batch_in_index=include_batch_in_index, name=name, ctx=_ctx) 

6934 except _core._SymbolicException: 

6935 pass # Add nodes to the TensorFlow graph. 

6936 # Add nodes to the TensorFlow graph. 

6937 if not isinstance(ksize, (list, tuple)): 

6938 raise TypeError( 

6939 "Expected list for 'ksize' argument to " 

6940 "'max_pool_grad_with_argmax' Op, not %r." % ksize) 

6941 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6942 if not isinstance(strides, (list, tuple)): 

6943 raise TypeError( 

6944 "Expected list for 'strides' argument to " 

6945 "'max_pool_grad_with_argmax' Op, not %r." % strides) 

6946 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6947 padding = _execute.make_str(padding, "padding") 

6948 if include_batch_in_index is None: 

6949 include_batch_in_index = False 

6950 include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index") 

6951 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

6952 "MaxPoolGradWithArgmax", input=input, grad=grad, argmax=argmax, 

6953 ksize=ksize, strides=strides, 

6954 padding=padding, 

6955 include_batch_in_index=include_batch_in_index, 

6956 name=name) 

6957 _result = _outputs[:] 

6958 if _execute.must_record_gradient(): 

6959 _attrs = ("ksize", _op.get_attr("ksize"), "strides", 

6960 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

6961 "include_batch_in_index", 

6962 _op._get_attr_bool("include_batch_in_index"), "Targmax", 

6963 _op._get_attr_type("Targmax"), "T", _op._get_attr_type("T")) 

6964 _inputs_flat = _op.inputs 

6965 _execute.record_gradient( 

6966 "MaxPoolGradWithArgmax", _inputs_flat, _attrs, _result) 

6967 _result, = _result 

6968 return _result 

6969 

6970MaxPoolGradWithArgmax = tf_export("raw_ops.MaxPoolGradWithArgmax")(_ops.to_raw_op(max_pool_grad_with_argmax)) 

6971 

6972 

6973def max_pool_grad_with_argmax_eager_fallback(input, grad, argmax, ksize, strides, padding, include_batch_in_index, name, ctx): 

6974 if not isinstance(ksize, (list, tuple)): 

6975 raise TypeError( 

6976 "Expected list for 'ksize' argument to " 

6977 "'max_pool_grad_with_argmax' Op, not %r." % ksize) 

6978 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

6979 if not isinstance(strides, (list, tuple)): 

6980 raise TypeError( 

6981 "Expected list for 'strides' argument to " 

6982 "'max_pool_grad_with_argmax' Op, not %r." % strides) 

6983 strides = [_execute.make_int(_i, "strides") for _i in strides] 

6984 padding = _execute.make_str(padding, "padding") 

6985 if include_batch_in_index is None: 

6986 include_batch_in_index = False 

6987 include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index") 

6988 _attr_Targmax, (argmax,) = _execute.args_to_matching_eager([argmax], ctx, [_dtypes.int32, _dtypes.int64, ]) 

6989 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

6990 (input, grad) = _inputs_T 

6991 _inputs_flat = [input, grad, argmax] 

6992 _attrs = ("ksize", ksize, "strides", strides, "padding", padding, 

6993 "include_batch_in_index", include_batch_in_index, "Targmax", _attr_Targmax, 

6994 "T", _attr_T) 

6995 _result = _execute.execute(b"MaxPoolGradWithArgmax", 1, inputs=_inputs_flat, 

6996 attrs=_attrs, ctx=ctx, name=name) 

6997 if _execute.must_record_gradient(): 

6998 _execute.record_gradient( 

6999 "MaxPoolGradWithArgmax", _inputs_flat, _attrs, _result) 

7000 _result, = _result 

7001 return _result 

7002 

7003 

7004def max_pool_v2(input, ksize, strides, padding, data_format="NHWC", name=None): 

7005 r"""Performs max pooling on the input. 

7006 

7007 Args: 

7008 input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `qint8`. 

7009 4-D input to pool over. 

7010 ksize: A `Tensor` of type `int32`. 

7011 The size of the window for each dimension of the input tensor. 

7012 strides: A `Tensor` of type `int32`. 

7013 The stride of the sliding window for each dimension of the 

7014 input tensor. 

7015 padding: A `string` from: `"SAME", "VALID"`. 

7016 The type of padding algorithm to use. 

7017 data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`. 

7018 Specify the data format of the input and output data. With the 

7019 default format "NHWC", the data is stored in the order of: 

7020 [batch, in_height, in_width, in_channels]. 

7021 Alternatively, the format could be "NCHW", the data storage order of: 

7022 [batch, in_channels, in_height, in_width]. 

7023 name: A name for the operation (optional). 

7024 

7025 Returns: 

7026 A `Tensor`. Has the same type as `input`. 

7027 """ 

7028 _ctx = _context._context or _context.context() 

7029 tld = _ctx._thread_local_data 

7030 if tld.is_eager: 

7031 try: 

7032 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7033 _ctx, "MaxPoolV2", name, input, ksize, strides, "padding", padding, 

7034 "data_format", data_format) 

7035 return _result 

7036 except _core._NotOkStatusException as e: 

7037 _ops.raise_from_not_ok_status(e, name) 

7038 except _core._FallbackException: 

7039 pass 

7040 try: 

7041 return max_pool_v2_eager_fallback( 

7042 input, ksize, strides, padding=padding, data_format=data_format, 

7043 name=name, ctx=_ctx) 

7044 except _core._SymbolicException: 

7045 pass # Add nodes to the TensorFlow graph. 

7046 # Add nodes to the TensorFlow graph. 

7047 padding = _execute.make_str(padding, "padding") 

7048 if data_format is None: 

7049 data_format = "NHWC" 

7050 data_format = _execute.make_str(data_format, "data_format") 

7051 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7052 "MaxPoolV2", input=input, ksize=ksize, strides=strides, 

7053 padding=padding, data_format=data_format, name=name) 

7054 _result = _outputs[:] 

7055 if _execute.must_record_gradient(): 

7056 _attrs = ("T", _op._get_attr_type("T"), "padding", 

7057 _op.get_attr("padding"), "data_format", 

7058 _op.get_attr("data_format")) 

7059 _inputs_flat = _op.inputs 

7060 _execute.record_gradient( 

7061 "MaxPoolV2", _inputs_flat, _attrs, _result) 

7062 _result, = _result 

7063 return _result 

7064 

7065MaxPoolV2 = tf_export("raw_ops.MaxPoolV2")(_ops.to_raw_op(max_pool_v2)) 

7066 

7067 

7068def max_pool_v2_eager_fallback(input, ksize, strides, padding, data_format, name, ctx): 

7069 padding = _execute.make_str(padding, "padding") 

7070 if data_format is None: 

7071 data_format = "NHWC" 

7072 data_format = _execute.make_str(data_format, "data_format") 

7073 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.int64, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.uint16, _dtypes.qint8, ], _dtypes.float32) 

7074 ksize = _ops.convert_to_tensor(ksize, _dtypes.int32) 

7075 strides = _ops.convert_to_tensor(strides, _dtypes.int32) 

7076 _inputs_flat = [input, ksize, strides] 

7077 _attrs = ("T", _attr_T, "padding", padding, "data_format", data_format) 

7078 _result = _execute.execute(b"MaxPoolV2", 1, inputs=_inputs_flat, 

7079 attrs=_attrs, ctx=ctx, name=name) 

7080 if _execute.must_record_gradient(): 

7081 _execute.record_gradient( 

7082 "MaxPoolV2", _inputs_flat, _attrs, _result) 

7083 _result, = _result 

7084 return _result 

7085 

7086_MaxPoolWithArgmaxOutput = collections.namedtuple( 

7087 "MaxPoolWithArgmax", 

7088 ["output", "argmax"]) 

7089 

7090 

7091def max_pool_with_argmax(input, ksize, strides, padding, Targmax=_dtypes.int64, include_batch_in_index=False, name=None): 

7092 r"""Performs max pooling on the input and outputs both max values and indices. 

7093 

7094 The indices in `argmax` are flattened, so that a maximum value at position 

7095 `[b, y, x, c]` becomes flattened index: 

7096 `(y * width + x) * channels + c` if `include_batch_in_index` is False; 

7097 `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. 

7098 

7099 The indices returned are always in `[0, height) x [0, width)` before flattening, 

7100 even if padding is involved and the mathematically correct answer is outside 

7101 (either negative or too large). This is a bug, but fixing it is difficult to do 

7102 in a safe backwards compatible way, especially due to flattening. 

7103 

7104 Args: 

7105 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

7106 4-D with shape `[batch, height, width, channels]`. Input to pool over. 

7107 ksize: A list of `ints` that has length `>= 4`. 

7108 The size of the window for each dimension of the input tensor. 

7109 strides: A list of `ints` that has length `>= 4`. 

7110 The stride of the sliding window for each dimension of the 

7111 input tensor. 

7112 padding: A `string` from: `"SAME", "VALID"`. 

7113 The type of padding algorithm to use. 

7114 Targmax: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. 

7115 include_batch_in_index: An optional `bool`. Defaults to `False`. 

7116 Whether to include batch dimension in flattened index of `argmax`. 

7117 name: A name for the operation (optional). 

7118 

7119 Returns: 

7120 A tuple of `Tensor` objects (output, argmax). 

7121 

7122 output: A `Tensor`. Has the same type as `input`. 

7123 argmax: A `Tensor` of type `Targmax`. 

7124 """ 

7125 _ctx = _context._context or _context.context() 

7126 tld = _ctx._thread_local_data 

7127 if tld.is_eager: 

7128 try: 

7129 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7130 _ctx, "MaxPoolWithArgmax", name, input, "ksize", ksize, "strides", 

7131 strides, "Targmax", Targmax, "padding", padding, 

7132 "include_batch_in_index", include_batch_in_index) 

7133 _result = _MaxPoolWithArgmaxOutput._make(_result) 

7134 return _result 

7135 except _core._NotOkStatusException as e: 

7136 _ops.raise_from_not_ok_status(e, name) 

7137 except _core._FallbackException: 

7138 pass 

7139 try: 

7140 return max_pool_with_argmax_eager_fallback( 

7141 input, ksize=ksize, strides=strides, Targmax=Targmax, 

7142 padding=padding, include_batch_in_index=include_batch_in_index, 

7143 name=name, ctx=_ctx) 

7144 except _core._SymbolicException: 

7145 pass # Add nodes to the TensorFlow graph. 

7146 # Add nodes to the TensorFlow graph. 

7147 if not isinstance(ksize, (list, tuple)): 

7148 raise TypeError( 

7149 "Expected list for 'ksize' argument to " 

7150 "'max_pool_with_argmax' Op, not %r." % ksize) 

7151 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

7152 if not isinstance(strides, (list, tuple)): 

7153 raise TypeError( 

7154 "Expected list for 'strides' argument to " 

7155 "'max_pool_with_argmax' Op, not %r." % strides) 

7156 strides = [_execute.make_int(_i, "strides") for _i in strides] 

7157 padding = _execute.make_str(padding, "padding") 

7158 if Targmax is None: 

7159 Targmax = _dtypes.int64 

7160 Targmax = _execute.make_type(Targmax, "Targmax") 

7161 if include_batch_in_index is None: 

7162 include_batch_in_index = False 

7163 include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index") 

7164 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7165 "MaxPoolWithArgmax", input=input, ksize=ksize, strides=strides, 

7166 padding=padding, Targmax=Targmax, 

7167 include_batch_in_index=include_batch_in_index, 

7168 name=name) 

7169 _result = _outputs[:] 

7170 if _execute.must_record_gradient(): 

7171 _attrs = ("ksize", _op.get_attr("ksize"), "strides", 

7172 _op.get_attr("strides"), "Targmax", 

7173 _op._get_attr_type("Targmax"), "padding", 

7174 _op.get_attr("padding"), "include_batch_in_index", 

7175 _op._get_attr_bool("include_batch_in_index"), "T", 

7176 _op._get_attr_type("T")) 

7177 _inputs_flat = _op.inputs 

7178 _execute.record_gradient( 

7179 "MaxPoolWithArgmax", _inputs_flat, _attrs, _result) 

7180 _result = _MaxPoolWithArgmaxOutput._make(_result) 

7181 return _result 

7182 

7183MaxPoolWithArgmax = tf_export("raw_ops.MaxPoolWithArgmax")(_ops.to_raw_op(max_pool_with_argmax)) 

7184 

7185 

7186def max_pool_with_argmax_eager_fallback(input, ksize, strides, padding, Targmax, include_batch_in_index, name, ctx): 

7187 if not isinstance(ksize, (list, tuple)): 

7188 raise TypeError( 

7189 "Expected list for 'ksize' argument to " 

7190 "'max_pool_with_argmax' Op, not %r." % ksize) 

7191 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

7192 if not isinstance(strides, (list, tuple)): 

7193 raise TypeError( 

7194 "Expected list for 'strides' argument to " 

7195 "'max_pool_with_argmax' Op, not %r." % strides) 

7196 strides = [_execute.make_int(_i, "strides") for _i in strides] 

7197 padding = _execute.make_str(padding, "padding") 

7198 if Targmax is None: 

7199 Targmax = _dtypes.int64 

7200 Targmax = _execute.make_type(Targmax, "Targmax") 

7201 if include_batch_in_index is None: 

7202 include_batch_in_index = False 

7203 include_batch_in_index = _execute.make_bool(include_batch_in_index, "include_batch_in_index") 

7204 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

7205 _inputs_flat = [input] 

7206 _attrs = ("ksize", ksize, "strides", strides, "Targmax", Targmax, "padding", 

7207 padding, "include_batch_in_index", include_batch_in_index, "T", _attr_T) 

7208 _result = _execute.execute(b"MaxPoolWithArgmax", 2, inputs=_inputs_flat, 

7209 attrs=_attrs, ctx=ctx, name=name) 

7210 if _execute.must_record_gradient(): 

7211 _execute.record_gradient( 

7212 "MaxPoolWithArgmax", _inputs_flat, _attrs, _result) 

7213 _result = _MaxPoolWithArgmaxOutput._make(_result) 

7214 return _result 

7215 

7216 

7217def nth_element(input, n, reverse=False, name=None): 

7218 r"""Finds values of the `n`-th order statistic for the last dimension. 

7219 

7220 If the input is a vector (rank-1), finds the entries which is the nth-smallest 

7221 value in the vector and outputs their values as scalar tensor. 

7222 

7223 For matrices (resp. higher rank input), computes the entries which is the 

7224 nth-smallest value in each row (resp. vector along the last dimension). Thus, 

7225 

7226 values.shape = input.shape[:-1] 

7227 

7228 Args: 

7229 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

7230 1-D or higher with last dimension at least `n+1`. 

7231 n: A `Tensor` of type `int32`. 

7232 0-D. Position of sorted vector to select along the last dimension (along 

7233 each row for matrices). Valid range of n is `[0, input.shape[:-1])` 

7234 reverse: An optional `bool`. Defaults to `False`. 

7235 When set to True, find the nth-largest value in the vector and vice 

7236 versa. 

7237 name: A name for the operation (optional). 

7238 

7239 Returns: 

7240 A `Tensor`. Has the same type as `input`. 

7241 """ 

7242 _ctx = _context._context or _context.context() 

7243 tld = _ctx._thread_local_data 

7244 if tld.is_eager: 

7245 try: 

7246 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7247 _ctx, "NthElement", name, input, n, "reverse", reverse) 

7248 return _result 

7249 except _core._NotOkStatusException as e: 

7250 _ops.raise_from_not_ok_status(e, name) 

7251 except _core._FallbackException: 

7252 pass 

7253 try: 

7254 return nth_element_eager_fallback( 

7255 input, n, reverse=reverse, name=name, ctx=_ctx) 

7256 except _core._SymbolicException: 

7257 pass # Add nodes to the TensorFlow graph. 

7258 # Add nodes to the TensorFlow graph. 

7259 if reverse is None: 

7260 reverse = False 

7261 reverse = _execute.make_bool(reverse, "reverse") 

7262 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7263 "NthElement", input=input, n=n, reverse=reverse, name=name) 

7264 _result = _outputs[:] 

7265 if _execute.must_record_gradient(): 

7266 _attrs = ("reverse", _op._get_attr_bool("reverse"), "T", 

7267 _op._get_attr_type("T")) 

7268 _inputs_flat = _op.inputs 

7269 _execute.record_gradient( 

7270 "NthElement", _inputs_flat, _attrs, _result) 

7271 _result, = _result 

7272 return _result 

7273 

7274NthElement = tf_export("raw_ops.NthElement")(_ops.to_raw_op(nth_element)) 

7275 

7276 

7277def nth_element_eager_fallback(input, n, reverse, name, ctx): 

7278 if reverse is None: 

7279 reverse = False 

7280 reverse = _execute.make_bool(reverse, "reverse") 

7281 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

7282 n = _ops.convert_to_tensor(n, _dtypes.int32) 

7283 _inputs_flat = [input, n] 

7284 _attrs = ("reverse", reverse, "T", _attr_T) 

7285 _result = _execute.execute(b"NthElement", 1, inputs=_inputs_flat, 

7286 attrs=_attrs, ctx=ctx, name=name) 

7287 if _execute.must_record_gradient(): 

7288 _execute.record_gradient( 

7289 "NthElement", _inputs_flat, _attrs, _result) 

7290 _result, = _result 

7291 return _result 

7292 

7293_QuantizedAvgPoolOutput = collections.namedtuple( 

7294 "QuantizedAvgPool", 

7295 ["output", "min_output", "max_output"]) 

7296 

7297 

7298def quantized_avg_pool(input, min_input, max_input, ksize, strides, padding, name=None): 

7299 r"""Produces the average pool of the input tensor for quantized types. 

7300 

7301 Args: 

7302 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

7303 4-D with shape `[batch, height, width, channels]`. 

7304 min_input: A `Tensor` of type `float32`. 

7305 The float value that the lowest quantized input value represents. 

7306 max_input: A `Tensor` of type `float32`. 

7307 The float value that the highest quantized input value represents. 

7308 ksize: A list of `ints`. 

7309 The size of the window for each dimension of the input tensor. 

7310 The length must be 4 to match the number of dimensions of the input. 

7311 strides: A list of `ints`. 

7312 The stride of the sliding window for each dimension of the input 

7313 tensor. The length must be 4 to match the number of dimensions of the input. 

7314 padding: A `string` from: `"SAME", "VALID"`. 

7315 The type of padding algorithm to use. 

7316 name: A name for the operation (optional). 

7317 

7318 Returns: 

7319 A tuple of `Tensor` objects (output, min_output, max_output). 

7320 

7321 output: A `Tensor`. Has the same type as `input`. 

7322 min_output: A `Tensor` of type `float32`. 

7323 max_output: A `Tensor` of type `float32`. 

7324 """ 

7325 _ctx = _context._context or _context.context() 

7326 tld = _ctx._thread_local_data 

7327 if tld.is_eager: 

7328 try: 

7329 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7330 _ctx, "QuantizedAvgPool", name, input, min_input, max_input, "ksize", 

7331 ksize, "strides", strides, "padding", padding) 

7332 _result = _QuantizedAvgPoolOutput._make(_result) 

7333 return _result 

7334 except _core._NotOkStatusException as e: 

7335 _ops.raise_from_not_ok_status(e, name) 

7336 except _core._FallbackException: 

7337 pass 

7338 try: 

7339 return quantized_avg_pool_eager_fallback( 

7340 input, min_input, max_input, ksize=ksize, strides=strides, 

7341 padding=padding, name=name, ctx=_ctx) 

7342 except _core._SymbolicException: 

7343 pass # Add nodes to the TensorFlow graph. 

7344 # Add nodes to the TensorFlow graph. 

7345 if not isinstance(ksize, (list, tuple)): 

7346 raise TypeError( 

7347 "Expected list for 'ksize' argument to " 

7348 "'quantized_avg_pool' Op, not %r." % ksize) 

7349 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

7350 if not isinstance(strides, (list, tuple)): 

7351 raise TypeError( 

7352 "Expected list for 'strides' argument to " 

7353 "'quantized_avg_pool' Op, not %r." % strides) 

7354 strides = [_execute.make_int(_i, "strides") for _i in strides] 

7355 padding = _execute.make_str(padding, "padding") 

7356 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7357 "QuantizedAvgPool", input=input, min_input=min_input, 

7358 max_input=max_input, ksize=ksize, strides=strides, 

7359 padding=padding, name=name) 

7360 _result = _outputs[:] 

7361 if _execute.must_record_gradient(): 

7362 _attrs = ("T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"), 

7363 "strides", _op.get_attr("strides"), "padding", 

7364 _op.get_attr("padding")) 

7365 _inputs_flat = _op.inputs 

7366 _execute.record_gradient( 

7367 "QuantizedAvgPool", _inputs_flat, _attrs, _result) 

7368 _result = _QuantizedAvgPoolOutput._make(_result) 

7369 return _result 

7370 

7371QuantizedAvgPool = tf_export("raw_ops.QuantizedAvgPool")(_ops.to_raw_op(quantized_avg_pool)) 

7372 

7373 

7374def quantized_avg_pool_eager_fallback(input, min_input, max_input, ksize, strides, padding, name, ctx): 

7375 if not isinstance(ksize, (list, tuple)): 

7376 raise TypeError( 

7377 "Expected list for 'ksize' argument to " 

7378 "'quantized_avg_pool' Op, not %r." % ksize) 

7379 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

7380 if not isinstance(strides, (list, tuple)): 

7381 raise TypeError( 

7382 "Expected list for 'strides' argument to " 

7383 "'quantized_avg_pool' Op, not %r." % strides) 

7384 strides = [_execute.make_int(_i, "strides") for _i in strides] 

7385 padding = _execute.make_str(padding, "padding") 

7386 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

7387 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

7388 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

7389 _inputs_flat = [input, min_input, max_input] 

7390 _attrs = ("T", _attr_T, "ksize", ksize, "strides", strides, "padding", 

7391 padding) 

7392 _result = _execute.execute(b"QuantizedAvgPool", 3, inputs=_inputs_flat, 

7393 attrs=_attrs, ctx=ctx, name=name) 

7394 if _execute.must_record_gradient(): 

7395 _execute.record_gradient( 

7396 "QuantizedAvgPool", _inputs_flat, _attrs, _result) 

7397 _result = _QuantizedAvgPoolOutput._make(_result) 

7398 return _result 

7399 

7400_QuantizedBatchNormWithGlobalNormalizationOutput = collections.namedtuple( 

7401 "QuantizedBatchNormWithGlobalNormalization", 

7402 ["result", "result_min", "result_max"]) 

7403 

7404 

7405def quantized_batch_norm_with_global_normalization(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, out_type, variance_epsilon, scale_after_normalization, name=None): 

7406 r"""Quantized Batch normalization. 

7407 

7408 This op is deprecated and will be removed in the future. Prefer 

7409 `tf.nn.batch_normalization`. 

7410 

7411 Args: 

7412 t: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

7413 A 4D input Tensor. 

7414 t_min: A `Tensor` of type `float32`. 

7415 The value represented by the lowest quantized input. 

7416 t_max: A `Tensor` of type `float32`. 

7417 The value represented by the highest quantized input. 

7418 m: A `Tensor`. Must have the same type as `t`. 

7419 A 1D mean Tensor with size matching the last dimension of t. 

7420 This is the first output from tf.nn.moments, 

7421 or a saved moving average thereof. 

7422 m_min: A `Tensor` of type `float32`. 

7423 The value represented by the lowest quantized mean. 

7424 m_max: A `Tensor` of type `float32`. 

7425 The value represented by the highest quantized mean. 

7426 v: A `Tensor`. Must have the same type as `t`. 

7427 A 1D variance Tensor with size matching the last dimension of t. 

7428 This is the second output from tf.nn.moments, 

7429 or a saved moving average thereof. 

7430 v_min: A `Tensor` of type `float32`. 

7431 The value represented by the lowest quantized variance. 

7432 v_max: A `Tensor` of type `float32`. 

7433 The value represented by the highest quantized variance. 

7434 beta: A `Tensor`. Must have the same type as `t`. 

7435 A 1D beta Tensor with size matching the last dimension of t. 

7436 An offset to be added to the normalized tensor. 

7437 beta_min: A `Tensor` of type `float32`. 

7438 The value represented by the lowest quantized offset. 

7439 beta_max: A `Tensor` of type `float32`. 

7440 The value represented by the highest quantized offset. 

7441 gamma: A `Tensor`. Must have the same type as `t`. 

7442 A 1D gamma Tensor with size matching the last dimension of t. 

7443 If "scale_after_normalization" is true, this tensor will be multiplied 

7444 with the normalized tensor. 

7445 gamma_min: A `Tensor` of type `float32`. 

7446 The value represented by the lowest quantized gamma. 

7447 gamma_max: A `Tensor` of type `float32`. 

7448 The value represented by the highest quantized gamma. 

7449 out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. 

7450 variance_epsilon: A `float`. A small float number to avoid dividing by 0. 

7451 scale_after_normalization: A `bool`. 

7452 A bool indicating whether the resulted tensor 

7453 needs to be multiplied with gamma. 

7454 name: A name for the operation (optional). 

7455 

7456 Returns: 

7457 A tuple of `Tensor` objects (result, result_min, result_max). 

7458 

7459 result: A `Tensor` of type `out_type`. 

7460 result_min: A `Tensor` of type `float32`. 

7461 result_max: A `Tensor` of type `float32`. 

7462 """ 

7463 _ctx = _context._context or _context.context() 

7464 tld = _ctx._thread_local_data 

7465 if tld.is_eager: 

7466 try: 

7467 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7468 _ctx, "QuantizedBatchNormWithGlobalNormalization", name, t, t_min, 

7469 t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, 

7470 gamma, gamma_min, gamma_max, "out_type", out_type, "variance_epsilon", 

7471 variance_epsilon, "scale_after_normalization", 

7472 scale_after_normalization) 

7473 _result = _QuantizedBatchNormWithGlobalNormalizationOutput._make(_result) 

7474 return _result 

7475 except _core._NotOkStatusException as e: 

7476 _ops.raise_from_not_ok_status(e, name) 

7477 except _core._FallbackException: 

7478 pass 

7479 try: 

7480 return quantized_batch_norm_with_global_normalization_eager_fallback( 

7481 t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, 

7482 beta_max, gamma, gamma_min, gamma_max, out_type=out_type, 

7483 variance_epsilon=variance_epsilon, 

7484 scale_after_normalization=scale_after_normalization, name=name, 

7485 ctx=_ctx) 

7486 except _core._SymbolicException: 

7487 pass # Add nodes to the TensorFlow graph. 

7488 # Add nodes to the TensorFlow graph. 

7489 out_type = _execute.make_type(out_type, "out_type") 

7490 variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon") 

7491 scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization") 

7492 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7493 "QuantizedBatchNormWithGlobalNormalization", t=t, t_min=t_min, 

7494 t_max=t_max, m=m, 

7495 m_min=m_min, m_max=m_max, 

7496 v=v, v_min=v_min, 

7497 v_max=v_max, beta=beta, 

7498 beta_min=beta_min, 

7499 beta_max=beta_max, 

7500 gamma=gamma, 

7501 gamma_min=gamma_min, 

7502 gamma_max=gamma_max, 

7503 out_type=out_type, 

7504 variance_epsilon=variance_epsilon, 

7505 scale_after_normalization=scale_after_normalization, 

7506 name=name) 

7507 _result = _outputs[:] 

7508 if _execute.must_record_gradient(): 

7509 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type", 

7510 _op._get_attr_type("out_type"), "variance_epsilon", 

7511 _op.get_attr("variance_epsilon"), "scale_after_normalization", 

7512 _op._get_attr_bool("scale_after_normalization")) 

7513 _inputs_flat = _op.inputs 

7514 _execute.record_gradient( 

7515 "QuantizedBatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result) 

7516 _result = _QuantizedBatchNormWithGlobalNormalizationOutput._make(_result) 

7517 return _result 

7518 

7519QuantizedBatchNormWithGlobalNormalization = tf_export("raw_ops.QuantizedBatchNormWithGlobalNormalization")(_ops.to_raw_op(quantized_batch_norm_with_global_normalization)) 

7520 

7521 

7522def quantized_batch_norm_with_global_normalization_eager_fallback(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, out_type, variance_epsilon, scale_after_normalization, name, ctx): 

7523 out_type = _execute.make_type(out_type, "out_type") 

7524 variance_epsilon = _execute.make_float(variance_epsilon, "variance_epsilon") 

7525 scale_after_normalization = _execute.make_bool(scale_after_normalization, "scale_after_normalization") 

7526 _attr_Tinput, _inputs_Tinput = _execute.args_to_matching_eager([t, m, v, beta, gamma], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

7527 (t, m, v, beta, gamma) = _inputs_Tinput 

7528 t_min = _ops.convert_to_tensor(t_min, _dtypes.float32) 

7529 t_max = _ops.convert_to_tensor(t_max, _dtypes.float32) 

7530 m_min = _ops.convert_to_tensor(m_min, _dtypes.float32) 

7531 m_max = _ops.convert_to_tensor(m_max, _dtypes.float32) 

7532 v_min = _ops.convert_to_tensor(v_min, _dtypes.float32) 

7533 v_max = _ops.convert_to_tensor(v_max, _dtypes.float32) 

7534 beta_min = _ops.convert_to_tensor(beta_min, _dtypes.float32) 

7535 beta_max = _ops.convert_to_tensor(beta_max, _dtypes.float32) 

7536 gamma_min = _ops.convert_to_tensor(gamma_min, _dtypes.float32) 

7537 gamma_max = _ops.convert_to_tensor(gamma_max, _dtypes.float32) 

7538 _inputs_flat = [t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max] 

7539 _attrs = ("Tinput", _attr_Tinput, "out_type", out_type, "variance_epsilon", 

7540 variance_epsilon, "scale_after_normalization", scale_after_normalization) 

7541 _result = _execute.execute(b"QuantizedBatchNormWithGlobalNormalization", 3, 

7542 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

7543 name=name) 

7544 if _execute.must_record_gradient(): 

7545 _execute.record_gradient( 

7546 "QuantizedBatchNormWithGlobalNormalization", _inputs_flat, _attrs, _result) 

7547 _result = _QuantizedBatchNormWithGlobalNormalizationOutput._make(_result) 

7548 return _result 

7549 

7550_QuantizedBiasAddOutput = collections.namedtuple( 

7551 "QuantizedBiasAdd", 

7552 ["output", "min_out", "max_out"]) 

7553 

7554 

7555def quantized_bias_add(input, bias, min_input, max_input, min_bias, max_bias, out_type, name=None): 

7556 r"""Adds Tensor 'bias' to Tensor 'input' for Quantized types. 

7557 

7558 Broadcasts the values of bias on dimensions 0..N-2 of 'input'. 

7559 

7560 Args: 

7561 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

7562 bias: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

7563 A 1D bias Tensor with size matching the last dimension of 'input'. 

7564 min_input: A `Tensor` of type `float32`. 

7565 The float value that the lowest quantized input value represents. 

7566 max_input: A `Tensor` of type `float32`. 

7567 The float value that the highest quantized input value represents. 

7568 min_bias: A `Tensor` of type `float32`. 

7569 The float value that the lowest quantized bias value represents. 

7570 max_bias: A `Tensor` of type `float32`. 

7571 The float value that the highest quantized bias value represents. 

7572 out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. 

7573 name: A name for the operation (optional). 

7574 

7575 Returns: 

7576 A tuple of `Tensor` objects (output, min_out, max_out). 

7577 

7578 output: A `Tensor` of type `out_type`. 

7579 min_out: A `Tensor` of type `float32`. 

7580 max_out: A `Tensor` of type `float32`. 

7581 """ 

7582 _ctx = _context._context or _context.context() 

7583 tld = _ctx._thread_local_data 

7584 if tld.is_eager: 

7585 try: 

7586 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7587 _ctx, "QuantizedBiasAdd", name, input, bias, min_input, max_input, 

7588 min_bias, max_bias, "out_type", out_type) 

7589 _result = _QuantizedBiasAddOutput._make(_result) 

7590 return _result 

7591 except _core._NotOkStatusException as e: 

7592 _ops.raise_from_not_ok_status(e, name) 

7593 except _core._FallbackException: 

7594 pass 

7595 try: 

7596 return quantized_bias_add_eager_fallback( 

7597 input, bias, min_input, max_input, min_bias, max_bias, 

7598 out_type=out_type, name=name, ctx=_ctx) 

7599 except _core._SymbolicException: 

7600 pass # Add nodes to the TensorFlow graph. 

7601 # Add nodes to the TensorFlow graph. 

7602 out_type = _execute.make_type(out_type, "out_type") 

7603 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7604 "QuantizedBiasAdd", input=input, bias=bias, min_input=min_input, 

7605 max_input=max_input, min_bias=min_bias, 

7606 max_bias=max_bias, out_type=out_type, name=name) 

7607 _result = _outputs[:] 

7608 if _execute.must_record_gradient(): 

7609 _attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), 

7610 "out_type", _op._get_attr_type("out_type")) 

7611 _inputs_flat = _op.inputs 

7612 _execute.record_gradient( 

7613 "QuantizedBiasAdd", _inputs_flat, _attrs, _result) 

7614 _result = _QuantizedBiasAddOutput._make(_result) 

7615 return _result 

7616 

7617QuantizedBiasAdd = tf_export("raw_ops.QuantizedBiasAdd")(_ops.to_raw_op(quantized_bias_add)) 

7618 

7619 

7620def quantized_bias_add_eager_fallback(input, bias, min_input, max_input, min_bias, max_bias, out_type, name, ctx): 

7621 out_type = _execute.make_type(out_type, "out_type") 

7622 _attr_T1, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

7623 _attr_T2, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

7624 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

7625 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

7626 min_bias = _ops.convert_to_tensor(min_bias, _dtypes.float32) 

7627 max_bias = _ops.convert_to_tensor(max_bias, _dtypes.float32) 

7628 _inputs_flat = [input, bias, min_input, max_input, min_bias, max_bias] 

7629 _attrs = ("T1", _attr_T1, "T2", _attr_T2, "out_type", out_type) 

7630 _result = _execute.execute(b"QuantizedBiasAdd", 3, inputs=_inputs_flat, 

7631 attrs=_attrs, ctx=ctx, name=name) 

7632 if _execute.must_record_gradient(): 

7633 _execute.record_gradient( 

7634 "QuantizedBiasAdd", _inputs_flat, _attrs, _result) 

7635 _result = _QuantizedBiasAddOutput._make(_result) 

7636 return _result 

7637 

7638_QuantizedConv2DOutput = collections.namedtuple( 

7639 "QuantizedConv2D", 

7640 ["output", "min_output", "max_output"]) 

7641 

7642 

7643def quantized_conv2d(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None): 

7644 r"""Computes a 2D convolution given quantized 4D input and filter tensors. 

7645 

7646 The inputs are quantized tensors where the lowest value represents the real 

7647 number of the associated minimum, and the highest represents the maximum. 

7648 This means that you can only interpret the quantized output in the same way, by 

7649 taking the returned minimum and maximum values into account. 

7650 

7651 Args: 

7652 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

7653 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

7654 filter's input_depth dimension must match input's depth dimensions. 

7655 min_input: A `Tensor` of type `float32`. 

7656 The float value that the lowest quantized input value represents. 

7657 max_input: A `Tensor` of type `float32`. 

7658 The float value that the highest quantized input value represents. 

7659 min_filter: A `Tensor` of type `float32`. 

7660 The float value that the lowest quantized filter value represents. 

7661 max_filter: A `Tensor` of type `float32`. 

7662 The float value that the highest quantized filter value represents. 

7663 strides: A list of `ints`. 

7664 The stride of the sliding window for each dimension of the input 

7665 tensor. 

7666 padding: A `string` from: `"SAME", "VALID"`. 

7667 The type of padding algorithm to use. 

7668 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. 

7669 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

7670 1-D tensor of length 4. The dilation factor for each dimension of 

7671 `input`. If set to k > 1, there will be k-1 skipped cells between each 

7672 filter element on that dimension. The dimension order is determined by the 

7673 value of `data_format`, see above for details. Dilations in the batch and 

7674 depth dimensions must be 1. 

7675 name: A name for the operation (optional). 

7676 

7677 Returns: 

7678 A tuple of `Tensor` objects (output, min_output, max_output). 

7679 

7680 output: A `Tensor` of type `out_type`. 

7681 min_output: A `Tensor` of type `float32`. 

7682 max_output: A `Tensor` of type `float32`. 

7683 """ 

7684 _ctx = _context._context or _context.context() 

7685 tld = _ctx._thread_local_data 

7686 if tld.is_eager: 

7687 try: 

7688 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7689 _ctx, "QuantizedConv2D", name, input, filter, min_input, max_input, 

7690 min_filter, max_filter, "out_type", out_type, "strides", strides, 

7691 "padding", padding, "dilations", dilations) 

7692 _result = _QuantizedConv2DOutput._make(_result) 

7693 return _result 

7694 except _core._NotOkStatusException as e: 

7695 _ops.raise_from_not_ok_status(e, name) 

7696 except _core._FallbackException: 

7697 pass 

7698 try: 

7699 return quantized_conv2d_eager_fallback( 

7700 input, filter, min_input, max_input, min_filter, max_filter, 

7701 out_type=out_type, strides=strides, padding=padding, 

7702 dilations=dilations, name=name, ctx=_ctx) 

7703 except _core._SymbolicException: 

7704 pass # Add nodes to the TensorFlow graph. 

7705 # Add nodes to the TensorFlow graph. 

7706 if not isinstance(strides, (list, tuple)): 

7707 raise TypeError( 

7708 "Expected list for 'strides' argument to " 

7709 "'quantized_conv2d' Op, not %r." % strides) 

7710 strides = [_execute.make_int(_i, "strides") for _i in strides] 

7711 padding = _execute.make_str(padding, "padding") 

7712 if out_type is None: 

7713 out_type = _dtypes.qint32 

7714 out_type = _execute.make_type(out_type, "out_type") 

7715 if dilations is None: 

7716 dilations = [1, 1, 1, 1] 

7717 if not isinstance(dilations, (list, tuple)): 

7718 raise TypeError( 

7719 "Expected list for 'dilations' argument to " 

7720 "'quantized_conv2d' Op, not %r." % dilations) 

7721 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

7722 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7723 "QuantizedConv2D", input=input, filter=filter, min_input=min_input, 

7724 max_input=max_input, min_filter=min_filter, 

7725 max_filter=max_filter, strides=strides, 

7726 padding=padding, out_type=out_type, 

7727 dilations=dilations, name=name) 

7728 _result = _outputs[:] 

7729 if _execute.must_record_gradient(): 

7730 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

7731 _op._get_attr_type("Tfilter"), "out_type", 

7732 _op._get_attr_type("out_type"), "strides", 

7733 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

7734 "dilations", _op.get_attr("dilations")) 

7735 _inputs_flat = _op.inputs 

7736 _execute.record_gradient( 

7737 "QuantizedConv2D", _inputs_flat, _attrs, _result) 

7738 _result = _QuantizedConv2DOutput._make(_result) 

7739 return _result 

7740 

7741QuantizedConv2D = tf_export("raw_ops.QuantizedConv2D")(_ops.to_raw_op(quantized_conv2d)) 

7742 

7743 

7744def quantized_conv2d_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, name, ctx): 

7745 if not isinstance(strides, (list, tuple)): 

7746 raise TypeError( 

7747 "Expected list for 'strides' argument to " 

7748 "'quantized_conv2d' Op, not %r." % strides) 

7749 strides = [_execute.make_int(_i, "strides") for _i in strides] 

7750 padding = _execute.make_str(padding, "padding") 

7751 if out_type is None: 

7752 out_type = _dtypes.qint32 

7753 out_type = _execute.make_type(out_type, "out_type") 

7754 if dilations is None: 

7755 dilations = [1, 1, 1, 1] 

7756 if not isinstance(dilations, (list, tuple)): 

7757 raise TypeError( 

7758 "Expected list for 'dilations' argument to " 

7759 "'quantized_conv2d' Op, not %r." % dilations) 

7760 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

7761 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

7762 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

7763 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

7764 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

7765 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

7766 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

7767 _inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter] 

7768 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type", 

7769 out_type, "strides", strides, "padding", padding, "dilations", dilations) 

7770 _result = _execute.execute(b"QuantizedConv2D", 3, inputs=_inputs_flat, 

7771 attrs=_attrs, ctx=ctx, name=name) 

7772 if _execute.must_record_gradient(): 

7773 _execute.record_gradient( 

7774 "QuantizedConv2D", _inputs_flat, _attrs, _result) 

7775 _result = _QuantizedConv2DOutput._make(_result) 

7776 return _result 

7777 

7778_QuantizedConv2DAndReluOutput = collections.namedtuple( 

7779 "QuantizedConv2DAndRelu", 

7780 ["output", "min_output", "max_output"]) 

7781 

7782 

7783def quantized_conv2d_and_relu(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None): 

7784 r"""TODO: add doc. 

7785 

7786 Args: 

7787 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

7788 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

7789 min_input: A `Tensor` of type `float32`. 

7790 max_input: A `Tensor` of type `float32`. 

7791 min_filter: A `Tensor` of type `float32`. 

7792 max_filter: A `Tensor` of type `float32`. 

7793 strides: A list of `ints`. 

7794 padding: A `string` from: `"SAME", "VALID"`. 

7795 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. 

7796 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

7797 padding_list: An optional list of `ints`. Defaults to `[]`. 

7798 name: A name for the operation (optional). 

7799 

7800 Returns: 

7801 A tuple of `Tensor` objects (output, min_output, max_output). 

7802 

7803 output: A `Tensor` of type `out_type`. 

7804 min_output: A `Tensor` of type `float32`. 

7805 max_output: A `Tensor` of type `float32`. 

7806 """ 

7807 _ctx = _context._context or _context.context() 

7808 tld = _ctx._thread_local_data 

7809 if tld.is_eager: 

7810 try: 

7811 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7812 _ctx, "QuantizedConv2DAndRelu", name, input, filter, min_input, 

7813 max_input, min_filter, max_filter, "out_type", out_type, "strides", 

7814 strides, "padding", padding, "dilations", dilations, "padding_list", 

7815 padding_list) 

7816 _result = _QuantizedConv2DAndReluOutput._make(_result) 

7817 return _result 

7818 except _core._NotOkStatusException as e: 

7819 _ops.raise_from_not_ok_status(e, name) 

7820 except _core._FallbackException: 

7821 pass 

7822 try: 

7823 return quantized_conv2d_and_relu_eager_fallback( 

7824 input, filter, min_input, max_input, min_filter, max_filter, 

7825 out_type=out_type, strides=strides, padding=padding, 

7826 dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx) 

7827 except _core._SymbolicException: 

7828 pass # Add nodes to the TensorFlow graph. 

7829 # Add nodes to the TensorFlow graph. 

7830 if not isinstance(strides, (list, tuple)): 

7831 raise TypeError( 

7832 "Expected list for 'strides' argument to " 

7833 "'quantized_conv2d_and_relu' Op, not %r." % strides) 

7834 strides = [_execute.make_int(_i, "strides") for _i in strides] 

7835 padding = _execute.make_str(padding, "padding") 

7836 if out_type is None: 

7837 out_type = _dtypes.qint32 

7838 out_type = _execute.make_type(out_type, "out_type") 

7839 if dilations is None: 

7840 dilations = [1, 1, 1, 1] 

7841 if not isinstance(dilations, (list, tuple)): 

7842 raise TypeError( 

7843 "Expected list for 'dilations' argument to " 

7844 "'quantized_conv2d_and_relu' Op, not %r." % dilations) 

7845 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

7846 if padding_list is None: 

7847 padding_list = [] 

7848 if not isinstance(padding_list, (list, tuple)): 

7849 raise TypeError( 

7850 "Expected list for 'padding_list' argument to " 

7851 "'quantized_conv2d_and_relu' Op, not %r." % padding_list) 

7852 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

7853 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

7854 "QuantizedConv2DAndRelu", input=input, filter=filter, 

7855 min_input=min_input, max_input=max_input, 

7856 min_filter=min_filter, 

7857 max_filter=max_filter, strides=strides, 

7858 padding=padding, out_type=out_type, 

7859 dilations=dilations, 

7860 padding_list=padding_list, name=name) 

7861 _result = _outputs[:] 

7862 if _execute.must_record_gradient(): 

7863 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

7864 _op._get_attr_type("Tfilter"), "out_type", 

7865 _op._get_attr_type("out_type"), "strides", 

7866 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

7867 "dilations", _op.get_attr("dilations"), "padding_list", 

7868 _op.get_attr("padding_list")) 

7869 _inputs_flat = _op.inputs 

7870 _execute.record_gradient( 

7871 "QuantizedConv2DAndRelu", _inputs_flat, _attrs, _result) 

7872 _result = _QuantizedConv2DAndReluOutput._make(_result) 

7873 return _result 

7874 

7875QuantizedConv2DAndRelu = tf_export("raw_ops.QuantizedConv2DAndRelu")(_ops.to_raw_op(quantized_conv2d_and_relu)) 

7876 

7877 

7878def quantized_conv2d_and_relu_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, padding_list, name, ctx): 

7879 if not isinstance(strides, (list, tuple)): 

7880 raise TypeError( 

7881 "Expected list for 'strides' argument to " 

7882 "'quantized_conv2d_and_relu' Op, not %r." % strides) 

7883 strides = [_execute.make_int(_i, "strides") for _i in strides] 

7884 padding = _execute.make_str(padding, "padding") 

7885 if out_type is None: 

7886 out_type = _dtypes.qint32 

7887 out_type = _execute.make_type(out_type, "out_type") 

7888 if dilations is None: 

7889 dilations = [1, 1, 1, 1] 

7890 if not isinstance(dilations, (list, tuple)): 

7891 raise TypeError( 

7892 "Expected list for 'dilations' argument to " 

7893 "'quantized_conv2d_and_relu' Op, not %r." % dilations) 

7894 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

7895 if padding_list is None: 

7896 padding_list = [] 

7897 if not isinstance(padding_list, (list, tuple)): 

7898 raise TypeError( 

7899 "Expected list for 'padding_list' argument to " 

7900 "'quantized_conv2d_and_relu' Op, not %r." % padding_list) 

7901 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

7902 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

7903 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

7904 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

7905 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

7906 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

7907 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

7908 _inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter] 

7909 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type", 

7910 out_type, "strides", strides, "padding", padding, "dilations", dilations, 

7911 "padding_list", padding_list) 

7912 _result = _execute.execute(b"QuantizedConv2DAndRelu", 3, 

7913 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

7914 name=name) 

7915 if _execute.must_record_gradient(): 

7916 _execute.record_gradient( 

7917 "QuantizedConv2DAndRelu", _inputs_flat, _attrs, _result) 

7918 _result = _QuantizedConv2DAndReluOutput._make(_result) 

7919 return _result 

7920 

7921_QuantizedConv2DAndReluAndRequantizeOutput = collections.namedtuple( 

7922 "QuantizedConv2DAndReluAndRequantize", 

7923 ["output", "min_output", "max_output"]) 

7924 

7925 

7926def quantized_conv2d_and_relu_and_requantize(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None): 

7927 r"""TODO: add doc. 

7928 

7929 Args: 

7930 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

7931 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

7932 min_input: A `Tensor` of type `float32`. 

7933 max_input: A `Tensor` of type `float32`. 

7934 min_filter: A `Tensor` of type `float32`. 

7935 max_filter: A `Tensor` of type `float32`. 

7936 min_freezed_output: A `Tensor` of type `float32`. 

7937 max_freezed_output: A `Tensor` of type `float32`. 

7938 strides: A list of `ints`. 

7939 padding: A `string` from: `"SAME", "VALID"`. 

7940 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. 

7941 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

7942 padding_list: An optional list of `ints`. Defaults to `[]`. 

7943 name: A name for the operation (optional). 

7944 

7945 Returns: 

7946 A tuple of `Tensor` objects (output, min_output, max_output). 

7947 

7948 output: A `Tensor` of type `out_type`. 

7949 min_output: A `Tensor` of type `float32`. 

7950 max_output: A `Tensor` of type `float32`. 

7951 """ 

7952 _ctx = _context._context or _context.context() 

7953 tld = _ctx._thread_local_data 

7954 if tld.is_eager: 

7955 try: 

7956 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

7957 _ctx, "QuantizedConv2DAndReluAndRequantize", name, input, filter, 

7958 min_input, max_input, min_filter, max_filter, min_freezed_output, 

7959 max_freezed_output, "out_type", out_type, "strides", strides, 

7960 "padding", padding, "dilations", dilations, "padding_list", 

7961 padding_list) 

7962 _result = _QuantizedConv2DAndReluAndRequantizeOutput._make(_result) 

7963 return _result 

7964 except _core._NotOkStatusException as e: 

7965 _ops.raise_from_not_ok_status(e, name) 

7966 except _core._FallbackException: 

7967 pass 

7968 try: 

7969 return quantized_conv2d_and_relu_and_requantize_eager_fallback( 

7970 input, filter, min_input, max_input, min_filter, max_filter, 

7971 min_freezed_output, max_freezed_output, out_type=out_type, 

7972 strides=strides, padding=padding, dilations=dilations, 

7973 padding_list=padding_list, name=name, ctx=_ctx) 

7974 except _core._SymbolicException: 

7975 pass # Add nodes to the TensorFlow graph. 

7976 # Add nodes to the TensorFlow graph. 

7977 if not isinstance(strides, (list, tuple)): 

7978 raise TypeError( 

7979 "Expected list for 'strides' argument to " 

7980 "'quantized_conv2d_and_relu_and_requantize' Op, not %r." % strides) 

7981 strides = [_execute.make_int(_i, "strides") for _i in strides] 

7982 padding = _execute.make_str(padding, "padding") 

7983 if out_type is None: 

7984 out_type = _dtypes.quint8 

7985 out_type = _execute.make_type(out_type, "out_type") 

7986 if dilations is None: 

7987 dilations = [1, 1, 1, 1] 

7988 if not isinstance(dilations, (list, tuple)): 

7989 raise TypeError( 

7990 "Expected list for 'dilations' argument to " 

7991 "'quantized_conv2d_and_relu_and_requantize' Op, not %r." % dilations) 

7992 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

7993 if padding_list is None: 

7994 padding_list = [] 

7995 if not isinstance(padding_list, (list, tuple)): 

7996 raise TypeError( 

7997 "Expected list for 'padding_list' argument to " 

7998 "'quantized_conv2d_and_relu_and_requantize' Op, not %r." % padding_list) 

7999 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

8000 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

8001 "QuantizedConv2DAndReluAndRequantize", input=input, filter=filter, 

8002 min_input=min_input, 

8003 max_input=max_input, 

8004 min_filter=min_filter, 

8005 max_filter=max_filter, 

8006 min_freezed_output=min_freezed_output, 

8007 max_freezed_output=max_freezed_output, 

8008 strides=strides, 

8009 padding=padding, 

8010 out_type=out_type, 

8011 dilations=dilations, 

8012 padding_list=padding_list, 

8013 name=name) 

8014 _result = _outputs[:] 

8015 if _execute.must_record_gradient(): 

8016 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

8017 _op._get_attr_type("Tfilter"), "out_type", 

8018 _op._get_attr_type("out_type"), "strides", 

8019 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

8020 "dilations", _op.get_attr("dilations"), "padding_list", 

8021 _op.get_attr("padding_list")) 

8022 _inputs_flat = _op.inputs 

8023 _execute.record_gradient( 

8024 "QuantizedConv2DAndReluAndRequantize", _inputs_flat, _attrs, _result) 

8025 _result = _QuantizedConv2DAndReluAndRequantizeOutput._make(_result) 

8026 return _result 

8027 

8028QuantizedConv2DAndReluAndRequantize = tf_export("raw_ops.QuantizedConv2DAndReluAndRequantize")(_ops.to_raw_op(quantized_conv2d_and_relu_and_requantize)) 

8029 

8030 

8031def quantized_conv2d_and_relu_and_requantize_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type, dilations, padding_list, name, ctx): 

8032 if not isinstance(strides, (list, tuple)): 

8033 raise TypeError( 

8034 "Expected list for 'strides' argument to " 

8035 "'quantized_conv2d_and_relu_and_requantize' Op, not %r." % strides) 

8036 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8037 padding = _execute.make_str(padding, "padding") 

8038 if out_type is None: 

8039 out_type = _dtypes.quint8 

8040 out_type = _execute.make_type(out_type, "out_type") 

8041 if dilations is None: 

8042 dilations = [1, 1, 1, 1] 

8043 if not isinstance(dilations, (list, tuple)): 

8044 raise TypeError( 

8045 "Expected list for 'dilations' argument to " 

8046 "'quantized_conv2d_and_relu_and_requantize' Op, not %r." % dilations) 

8047 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8048 if padding_list is None: 

8049 padding_list = [] 

8050 if not isinstance(padding_list, (list, tuple)): 

8051 raise TypeError( 

8052 "Expected list for 'padding_list' argument to " 

8053 "'quantized_conv2d_and_relu_and_requantize' Op, not %r." % padding_list) 

8054 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

8055 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8056 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8057 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

8058 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

8059 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

8060 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

8061 min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32) 

8062 max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32) 

8063 _inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output] 

8064 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type", 

8065 out_type, "strides", strides, "padding", padding, "dilations", dilations, 

8066 "padding_list", padding_list) 

8067 _result = _execute.execute(b"QuantizedConv2DAndReluAndRequantize", 3, 

8068 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

8069 name=name) 

8070 if _execute.must_record_gradient(): 

8071 _execute.record_gradient( 

8072 "QuantizedConv2DAndReluAndRequantize", _inputs_flat, _attrs, _result) 

8073 _result = _QuantizedConv2DAndReluAndRequantizeOutput._make(_result) 

8074 return _result 

8075 

8076_QuantizedConv2DAndRequantizeOutput = collections.namedtuple( 

8077 "QuantizedConv2DAndRequantize", 

8078 ["output", "min_output", "max_output"]) 

8079 

8080 

8081def quantized_conv2d_and_requantize(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.qint8, dilations=[1, 1, 1, 1], padding_list=[], name=None): 

8082 r"""TODO: add doc. 

8083 

8084 Args: 

8085 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8086 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8087 min_input: A `Tensor` of type `float32`. 

8088 max_input: A `Tensor` of type `float32`. 

8089 min_filter: A `Tensor` of type `float32`. 

8090 max_filter: A `Tensor` of type `float32`. 

8091 min_freezed_output: A `Tensor` of type `float32`. 

8092 max_freezed_output: A `Tensor` of type `float32`. 

8093 strides: A list of `ints`. 

8094 padding: A `string` from: `"SAME", "VALID"`. 

8095 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint8`. 

8096 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

8097 padding_list: An optional list of `ints`. Defaults to `[]`. 

8098 name: A name for the operation (optional). 

8099 

8100 Returns: 

8101 A tuple of `Tensor` objects (output, min_output, max_output). 

8102 

8103 output: A `Tensor` of type `out_type`. 

8104 min_output: A `Tensor` of type `float32`. 

8105 max_output: A `Tensor` of type `float32`. 

8106 """ 

8107 _ctx = _context._context or _context.context() 

8108 tld = _ctx._thread_local_data 

8109 if tld.is_eager: 

8110 try: 

8111 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

8112 _ctx, "QuantizedConv2DAndRequantize", name, input, filter, min_input, 

8113 max_input, min_filter, max_filter, min_freezed_output, 

8114 max_freezed_output, "out_type", out_type, "strides", strides, 

8115 "padding", padding, "dilations", dilations, "padding_list", 

8116 padding_list) 

8117 _result = _QuantizedConv2DAndRequantizeOutput._make(_result) 

8118 return _result 

8119 except _core._NotOkStatusException as e: 

8120 _ops.raise_from_not_ok_status(e, name) 

8121 except _core._FallbackException: 

8122 pass 

8123 try: 

8124 return quantized_conv2d_and_requantize_eager_fallback( 

8125 input, filter, min_input, max_input, min_filter, max_filter, 

8126 min_freezed_output, max_freezed_output, out_type=out_type, 

8127 strides=strides, padding=padding, dilations=dilations, 

8128 padding_list=padding_list, name=name, ctx=_ctx) 

8129 except _core._SymbolicException: 

8130 pass # Add nodes to the TensorFlow graph. 

8131 # Add nodes to the TensorFlow graph. 

8132 if not isinstance(strides, (list, tuple)): 

8133 raise TypeError( 

8134 "Expected list for 'strides' argument to " 

8135 "'quantized_conv2d_and_requantize' Op, not %r." % strides) 

8136 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8137 padding = _execute.make_str(padding, "padding") 

8138 if out_type is None: 

8139 out_type = _dtypes.qint8 

8140 out_type = _execute.make_type(out_type, "out_type") 

8141 if dilations is None: 

8142 dilations = [1, 1, 1, 1] 

8143 if not isinstance(dilations, (list, tuple)): 

8144 raise TypeError( 

8145 "Expected list for 'dilations' argument to " 

8146 "'quantized_conv2d_and_requantize' Op, not %r." % dilations) 

8147 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8148 if padding_list is None: 

8149 padding_list = [] 

8150 if not isinstance(padding_list, (list, tuple)): 

8151 raise TypeError( 

8152 "Expected list for 'padding_list' argument to " 

8153 "'quantized_conv2d_and_requantize' Op, not %r." % padding_list) 

8154 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

8155 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

8156 "QuantizedConv2DAndRequantize", input=input, filter=filter, 

8157 min_input=min_input, 

8158 max_input=max_input, 

8159 min_filter=min_filter, 

8160 max_filter=max_filter, 

8161 min_freezed_output=min_freezed_output, 

8162 max_freezed_output=max_freezed_output, 

8163 strides=strides, padding=padding, 

8164 out_type=out_type, 

8165 dilations=dilations, 

8166 padding_list=padding_list, name=name) 

8167 _result = _outputs[:] 

8168 if _execute.must_record_gradient(): 

8169 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

8170 _op._get_attr_type("Tfilter"), "out_type", 

8171 _op._get_attr_type("out_type"), "strides", 

8172 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

8173 "dilations", _op.get_attr("dilations"), "padding_list", 

8174 _op.get_attr("padding_list")) 

8175 _inputs_flat = _op.inputs 

8176 _execute.record_gradient( 

8177 "QuantizedConv2DAndRequantize", _inputs_flat, _attrs, _result) 

8178 _result = _QuantizedConv2DAndRequantizeOutput._make(_result) 

8179 return _result 

8180 

8181QuantizedConv2DAndRequantize = tf_export("raw_ops.QuantizedConv2DAndRequantize")(_ops.to_raw_op(quantized_conv2d_and_requantize)) 

8182 

8183 

8184def quantized_conv2d_and_requantize_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type, dilations, padding_list, name, ctx): 

8185 if not isinstance(strides, (list, tuple)): 

8186 raise TypeError( 

8187 "Expected list for 'strides' argument to " 

8188 "'quantized_conv2d_and_requantize' Op, not %r." % strides) 

8189 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8190 padding = _execute.make_str(padding, "padding") 

8191 if out_type is None: 

8192 out_type = _dtypes.qint8 

8193 out_type = _execute.make_type(out_type, "out_type") 

8194 if dilations is None: 

8195 dilations = [1, 1, 1, 1] 

8196 if not isinstance(dilations, (list, tuple)): 

8197 raise TypeError( 

8198 "Expected list for 'dilations' argument to " 

8199 "'quantized_conv2d_and_requantize' Op, not %r." % dilations) 

8200 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8201 if padding_list is None: 

8202 padding_list = [] 

8203 if not isinstance(padding_list, (list, tuple)): 

8204 raise TypeError( 

8205 "Expected list for 'padding_list' argument to " 

8206 "'quantized_conv2d_and_requantize' Op, not %r." % padding_list) 

8207 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

8208 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8209 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8210 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

8211 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

8212 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

8213 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

8214 min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32) 

8215 max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32) 

8216 _inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output] 

8217 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type", 

8218 out_type, "strides", strides, "padding", padding, "dilations", dilations, 

8219 "padding_list", padding_list) 

8220 _result = _execute.execute(b"QuantizedConv2DAndRequantize", 3, 

8221 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

8222 name=name) 

8223 if _execute.must_record_gradient(): 

8224 _execute.record_gradient( 

8225 "QuantizedConv2DAndRequantize", _inputs_flat, _attrs, _result) 

8226 _result = _QuantizedConv2DAndRequantizeOutput._make(_result) 

8227 return _result 

8228 

8229_QuantizedConv2DPerChannelOutput = collections.namedtuple( 

8230 "QuantizedConv2DPerChannel", 

8231 ["output", "min_output", "max_output"]) 

8232 

8233 

8234def quantized_conv2d_per_channel(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None): 

8235 r"""Computes QuantizedConv2D per channel. 

8236 

8237 Args: 

8238 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8239 The original input tensor. 

8240 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8241 The original filter tensor. 

8242 min_input: A `Tensor` of type `float32`. 

8243 The minimum value of the input tensor 

8244 max_input: A `Tensor` of type `float32`. 

8245 The maximum value of the input tensor. 

8246 min_filter: A `Tensor` of type `float32`. 

8247 The minimum value of the filter tensor. 

8248 max_filter: A `Tensor` of type `float32`. 

8249 The maximum value of the filter tensor. 

8250 strides: A list of `ints`. list of stride values. 

8251 padding: A `string` from: `"SAME", "VALID"`. 

8252 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. 

8253 The quantized type of output tensor that needs to be converted. 

8254 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

8255 list of dilation values. 

8256 name: A name for the operation (optional). 

8257 

8258 Returns: 

8259 A tuple of `Tensor` objects (output, min_output, max_output). 

8260 

8261 output: A `Tensor` of type `out_type`. 

8262 min_output: A `Tensor` of type `float32`. 

8263 max_output: A `Tensor` of type `float32`. 

8264 """ 

8265 _ctx = _context._context or _context.context() 

8266 tld = _ctx._thread_local_data 

8267 if tld.is_eager: 

8268 try: 

8269 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

8270 _ctx, "QuantizedConv2DPerChannel", name, input, filter, min_input, 

8271 max_input, min_filter, max_filter, "out_type", out_type, "strides", 

8272 strides, "padding", padding, "dilations", dilations) 

8273 _result = _QuantizedConv2DPerChannelOutput._make(_result) 

8274 return _result 

8275 except _core._NotOkStatusException as e: 

8276 _ops.raise_from_not_ok_status(e, name) 

8277 except _core._FallbackException: 

8278 pass 

8279 try: 

8280 return quantized_conv2d_per_channel_eager_fallback( 

8281 input, filter, min_input, max_input, min_filter, max_filter, 

8282 out_type=out_type, strides=strides, padding=padding, 

8283 dilations=dilations, name=name, ctx=_ctx) 

8284 except _core._SymbolicException: 

8285 pass # Add nodes to the TensorFlow graph. 

8286 # Add nodes to the TensorFlow graph. 

8287 if not isinstance(strides, (list, tuple)): 

8288 raise TypeError( 

8289 "Expected list for 'strides' argument to " 

8290 "'quantized_conv2d_per_channel' Op, not %r." % strides) 

8291 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8292 padding = _execute.make_str(padding, "padding") 

8293 if out_type is None: 

8294 out_type = _dtypes.qint32 

8295 out_type = _execute.make_type(out_type, "out_type") 

8296 if dilations is None: 

8297 dilations = [1, 1, 1, 1] 

8298 if not isinstance(dilations, (list, tuple)): 

8299 raise TypeError( 

8300 "Expected list for 'dilations' argument to " 

8301 "'quantized_conv2d_per_channel' Op, not %r." % dilations) 

8302 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8303 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

8304 "QuantizedConv2DPerChannel", input=input, filter=filter, 

8305 min_input=min_input, max_input=max_input, 

8306 min_filter=min_filter, 

8307 max_filter=max_filter, strides=strides, 

8308 padding=padding, out_type=out_type, 

8309 dilations=dilations, name=name) 

8310 _result = _outputs[:] 

8311 if _execute.must_record_gradient(): 

8312 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

8313 _op._get_attr_type("Tfilter"), "out_type", 

8314 _op._get_attr_type("out_type"), "strides", 

8315 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

8316 "dilations", _op.get_attr("dilations")) 

8317 _inputs_flat = _op.inputs 

8318 _execute.record_gradient( 

8319 "QuantizedConv2DPerChannel", _inputs_flat, _attrs, _result) 

8320 _result = _QuantizedConv2DPerChannelOutput._make(_result) 

8321 return _result 

8322 

8323QuantizedConv2DPerChannel = tf_export("raw_ops.QuantizedConv2DPerChannel")(_ops.to_raw_op(quantized_conv2d_per_channel)) 

8324 

8325 

8326def quantized_conv2d_per_channel_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, name, ctx): 

8327 if not isinstance(strides, (list, tuple)): 

8328 raise TypeError( 

8329 "Expected list for 'strides' argument to " 

8330 "'quantized_conv2d_per_channel' Op, not %r." % strides) 

8331 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8332 padding = _execute.make_str(padding, "padding") 

8333 if out_type is None: 

8334 out_type = _dtypes.qint32 

8335 out_type = _execute.make_type(out_type, "out_type") 

8336 if dilations is None: 

8337 dilations = [1, 1, 1, 1] 

8338 if not isinstance(dilations, (list, tuple)): 

8339 raise TypeError( 

8340 "Expected list for 'dilations' argument to " 

8341 "'quantized_conv2d_per_channel' Op, not %r." % dilations) 

8342 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8343 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8344 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8345 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

8346 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

8347 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

8348 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

8349 _inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter] 

8350 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type", 

8351 out_type, "strides", strides, "padding", padding, "dilations", dilations) 

8352 _result = _execute.execute(b"QuantizedConv2DPerChannel", 3, 

8353 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

8354 name=name) 

8355 if _execute.must_record_gradient(): 

8356 _execute.record_gradient( 

8357 "QuantizedConv2DPerChannel", _inputs_flat, _attrs, _result) 

8358 _result = _QuantizedConv2DPerChannelOutput._make(_result) 

8359 return _result 

8360 

8361_QuantizedConv2DWithBiasOutput = collections.namedtuple( 

8362 "QuantizedConv2DWithBias", 

8363 ["output", "min_output", "max_output"]) 

8364 

8365 

8366def quantized_conv2d_with_bias(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None): 

8367 r"""TODO: add doc. 

8368 

8369 Args: 

8370 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8371 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8372 bias: A `Tensor` of type `float32`. 

8373 min_input: A `Tensor` of type `float32`. 

8374 max_input: A `Tensor` of type `float32`. 

8375 min_filter: A `Tensor` of type `float32`. 

8376 max_filter: A `Tensor` of type `float32`. 

8377 strides: A list of `ints`. 

8378 padding: A `string` from: `"SAME", "VALID"`. 

8379 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. 

8380 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

8381 padding_list: An optional list of `ints`. Defaults to `[]`. 

8382 name: A name for the operation (optional). 

8383 

8384 Returns: 

8385 A tuple of `Tensor` objects (output, min_output, max_output). 

8386 

8387 output: A `Tensor` of type `out_type`. 

8388 min_output: A `Tensor` of type `float32`. 

8389 max_output: A `Tensor` of type `float32`. 

8390 """ 

8391 _ctx = _context._context or _context.context() 

8392 tld = _ctx._thread_local_data 

8393 if tld.is_eager: 

8394 try: 

8395 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

8396 _ctx, "QuantizedConv2DWithBias", name, input, filter, bias, min_input, 

8397 max_input, min_filter, max_filter, "out_type", out_type, "strides", 

8398 strides, "padding", padding, "dilations", dilations, "padding_list", 

8399 padding_list) 

8400 _result = _QuantizedConv2DWithBiasOutput._make(_result) 

8401 return _result 

8402 except _core._NotOkStatusException as e: 

8403 _ops.raise_from_not_ok_status(e, name) 

8404 except _core._FallbackException: 

8405 pass 

8406 try: 

8407 return quantized_conv2d_with_bias_eager_fallback( 

8408 input, filter, bias, min_input, max_input, min_filter, max_filter, 

8409 out_type=out_type, strides=strides, padding=padding, 

8410 dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx) 

8411 except _core._SymbolicException: 

8412 pass # Add nodes to the TensorFlow graph. 

8413 # Add nodes to the TensorFlow graph. 

8414 if not isinstance(strides, (list, tuple)): 

8415 raise TypeError( 

8416 "Expected list for 'strides' argument to " 

8417 "'quantized_conv2d_with_bias' Op, not %r." % strides) 

8418 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8419 padding = _execute.make_str(padding, "padding") 

8420 if out_type is None: 

8421 out_type = _dtypes.qint32 

8422 out_type = _execute.make_type(out_type, "out_type") 

8423 if dilations is None: 

8424 dilations = [1, 1, 1, 1] 

8425 if not isinstance(dilations, (list, tuple)): 

8426 raise TypeError( 

8427 "Expected list for 'dilations' argument to " 

8428 "'quantized_conv2d_with_bias' Op, not %r." % dilations) 

8429 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8430 if padding_list is None: 

8431 padding_list = [] 

8432 if not isinstance(padding_list, (list, tuple)): 

8433 raise TypeError( 

8434 "Expected list for 'padding_list' argument to " 

8435 "'quantized_conv2d_with_bias' Op, not %r." % padding_list) 

8436 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

8437 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

8438 "QuantizedConv2DWithBias", input=input, filter=filter, bias=bias, 

8439 min_input=min_input, max_input=max_input, 

8440 min_filter=min_filter, 

8441 max_filter=max_filter, strides=strides, 

8442 padding=padding, out_type=out_type, 

8443 dilations=dilations, 

8444 padding_list=padding_list, name=name) 

8445 _result = _outputs[:] 

8446 if _execute.must_record_gradient(): 

8447 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

8448 _op._get_attr_type("Tfilter"), "out_type", 

8449 _op._get_attr_type("out_type"), "strides", 

8450 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

8451 "dilations", _op.get_attr("dilations"), "padding_list", 

8452 _op.get_attr("padding_list")) 

8453 _inputs_flat = _op.inputs 

8454 _execute.record_gradient( 

8455 "QuantizedConv2DWithBias", _inputs_flat, _attrs, _result) 

8456 _result = _QuantizedConv2DWithBiasOutput._make(_result) 

8457 return _result 

8458 

8459QuantizedConv2DWithBias = tf_export("raw_ops.QuantizedConv2DWithBias")(_ops.to_raw_op(quantized_conv2d_with_bias)) 

8460 

8461 

8462def quantized_conv2d_with_bias_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, padding_list, name, ctx): 

8463 if not isinstance(strides, (list, tuple)): 

8464 raise TypeError( 

8465 "Expected list for 'strides' argument to " 

8466 "'quantized_conv2d_with_bias' Op, not %r." % strides) 

8467 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8468 padding = _execute.make_str(padding, "padding") 

8469 if out_type is None: 

8470 out_type = _dtypes.qint32 

8471 out_type = _execute.make_type(out_type, "out_type") 

8472 if dilations is None: 

8473 dilations = [1, 1, 1, 1] 

8474 if not isinstance(dilations, (list, tuple)): 

8475 raise TypeError( 

8476 "Expected list for 'dilations' argument to " 

8477 "'quantized_conv2d_with_bias' Op, not %r." % dilations) 

8478 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8479 if padding_list is None: 

8480 padding_list = [] 

8481 if not isinstance(padding_list, (list, tuple)): 

8482 raise TypeError( 

8483 "Expected list for 'padding_list' argument to " 

8484 "'quantized_conv2d_with_bias' Op, not %r." % padding_list) 

8485 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

8486 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8487 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8488 bias = _ops.convert_to_tensor(bias, _dtypes.float32) 

8489 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

8490 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

8491 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

8492 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

8493 _inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter] 

8494 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type", 

8495 out_type, "strides", strides, "padding", padding, "dilations", dilations, 

8496 "padding_list", padding_list) 

8497 _result = _execute.execute(b"QuantizedConv2DWithBias", 3, 

8498 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

8499 name=name) 

8500 if _execute.must_record_gradient(): 

8501 _execute.record_gradient( 

8502 "QuantizedConv2DWithBias", _inputs_flat, _attrs, _result) 

8503 _result = _QuantizedConv2DWithBiasOutput._make(_result) 

8504 return _result 

8505 

8506_QuantizedConv2DWithBiasAndReluOutput = collections.namedtuple( 

8507 "QuantizedConv2DWithBiasAndRelu", 

8508 ["output", "min_output", "max_output"]) 

8509 

8510 

8511def quantized_conv2d_with_bias_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None): 

8512 r"""TODO: add doc. 

8513 

8514 Args: 

8515 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8516 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8517 bias: A `Tensor` of type `float32`. 

8518 min_input: A `Tensor` of type `float32`. 

8519 max_input: A `Tensor` of type `float32`. 

8520 min_filter: A `Tensor` of type `float32`. 

8521 max_filter: A `Tensor` of type `float32`. 

8522 strides: A list of `ints`. 

8523 padding: A `string` from: `"SAME", "VALID"`. 

8524 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. 

8525 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

8526 padding_list: An optional list of `ints`. Defaults to `[]`. 

8527 name: A name for the operation (optional). 

8528 

8529 Returns: 

8530 A tuple of `Tensor` objects (output, min_output, max_output). 

8531 

8532 output: A `Tensor` of type `out_type`. 

8533 min_output: A `Tensor` of type `float32`. 

8534 max_output: A `Tensor` of type `float32`. 

8535 """ 

8536 _ctx = _context._context or _context.context() 

8537 tld = _ctx._thread_local_data 

8538 if tld.is_eager: 

8539 try: 

8540 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

8541 _ctx, "QuantizedConv2DWithBiasAndRelu", name, input, filter, bias, 

8542 min_input, max_input, min_filter, max_filter, "out_type", out_type, 

8543 "strides", strides, "padding", padding, "dilations", dilations, 

8544 "padding_list", padding_list) 

8545 _result = _QuantizedConv2DWithBiasAndReluOutput._make(_result) 

8546 return _result 

8547 except _core._NotOkStatusException as e: 

8548 _ops.raise_from_not_ok_status(e, name) 

8549 except _core._FallbackException: 

8550 pass 

8551 try: 

8552 return quantized_conv2d_with_bias_and_relu_eager_fallback( 

8553 input, filter, bias, min_input, max_input, min_filter, max_filter, 

8554 out_type=out_type, strides=strides, padding=padding, 

8555 dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx) 

8556 except _core._SymbolicException: 

8557 pass # Add nodes to the TensorFlow graph. 

8558 # Add nodes to the TensorFlow graph. 

8559 if not isinstance(strides, (list, tuple)): 

8560 raise TypeError( 

8561 "Expected list for 'strides' argument to " 

8562 "'quantized_conv2d_with_bias_and_relu' Op, not %r." % strides) 

8563 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8564 padding = _execute.make_str(padding, "padding") 

8565 if out_type is None: 

8566 out_type = _dtypes.qint32 

8567 out_type = _execute.make_type(out_type, "out_type") 

8568 if dilations is None: 

8569 dilations = [1, 1, 1, 1] 

8570 if not isinstance(dilations, (list, tuple)): 

8571 raise TypeError( 

8572 "Expected list for 'dilations' argument to " 

8573 "'quantized_conv2d_with_bias_and_relu' Op, not %r." % dilations) 

8574 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8575 if padding_list is None: 

8576 padding_list = [] 

8577 if not isinstance(padding_list, (list, tuple)): 

8578 raise TypeError( 

8579 "Expected list for 'padding_list' argument to " 

8580 "'quantized_conv2d_with_bias_and_relu' Op, not %r." % padding_list) 

8581 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

8582 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

8583 "QuantizedConv2DWithBiasAndRelu", input=input, filter=filter, 

8584 bias=bias, min_input=min_input, 

8585 max_input=max_input, 

8586 min_filter=min_filter, 

8587 max_filter=max_filter, 

8588 strides=strides, padding=padding, 

8589 out_type=out_type, 

8590 dilations=dilations, 

8591 padding_list=padding_list, 

8592 name=name) 

8593 _result = _outputs[:] 

8594 if _execute.must_record_gradient(): 

8595 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

8596 _op._get_attr_type("Tfilter"), "out_type", 

8597 _op._get_attr_type("out_type"), "strides", 

8598 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

8599 "dilations", _op.get_attr("dilations"), "padding_list", 

8600 _op.get_attr("padding_list")) 

8601 _inputs_flat = _op.inputs 

8602 _execute.record_gradient( 

8603 "QuantizedConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result) 

8604 _result = _QuantizedConv2DWithBiasAndReluOutput._make(_result) 

8605 return _result 

8606 

8607QuantizedConv2DWithBiasAndRelu = tf_export("raw_ops.QuantizedConv2DWithBiasAndRelu")(_ops.to_raw_op(quantized_conv2d_with_bias_and_relu)) 

8608 

8609 

8610def quantized_conv2d_with_bias_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, padding_list, name, ctx): 

8611 if not isinstance(strides, (list, tuple)): 

8612 raise TypeError( 

8613 "Expected list for 'strides' argument to " 

8614 "'quantized_conv2d_with_bias_and_relu' Op, not %r." % strides) 

8615 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8616 padding = _execute.make_str(padding, "padding") 

8617 if out_type is None: 

8618 out_type = _dtypes.qint32 

8619 out_type = _execute.make_type(out_type, "out_type") 

8620 if dilations is None: 

8621 dilations = [1, 1, 1, 1] 

8622 if not isinstance(dilations, (list, tuple)): 

8623 raise TypeError( 

8624 "Expected list for 'dilations' argument to " 

8625 "'quantized_conv2d_with_bias_and_relu' Op, not %r." % dilations) 

8626 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8627 if padding_list is None: 

8628 padding_list = [] 

8629 if not isinstance(padding_list, (list, tuple)): 

8630 raise TypeError( 

8631 "Expected list for 'padding_list' argument to " 

8632 "'quantized_conv2d_with_bias_and_relu' Op, not %r." % padding_list) 

8633 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

8634 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8635 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8636 bias = _ops.convert_to_tensor(bias, _dtypes.float32) 

8637 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

8638 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

8639 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

8640 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

8641 _inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter] 

8642 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type", 

8643 out_type, "strides", strides, "padding", padding, "dilations", dilations, 

8644 "padding_list", padding_list) 

8645 _result = _execute.execute(b"QuantizedConv2DWithBiasAndRelu", 3, 

8646 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

8647 name=name) 

8648 if _execute.must_record_gradient(): 

8649 _execute.record_gradient( 

8650 "QuantizedConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result) 

8651 _result = _QuantizedConv2DWithBiasAndReluOutput._make(_result) 

8652 return _result 

8653 

8654_QuantizedConv2DWithBiasAndReluAndRequantizeOutput = collections.namedtuple( 

8655 "QuantizedConv2DWithBiasAndReluAndRequantize", 

8656 ["output", "min_output", "max_output"]) 

8657 

8658 

8659def quantized_conv2d_with_bias_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None): 

8660 r"""TODO: add doc. 

8661 

8662 Args: 

8663 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8664 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8665 bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`. 

8666 min_input: A `Tensor` of type `float32`. 

8667 max_input: A `Tensor` of type `float32`. 

8668 min_filter: A `Tensor` of type `float32`. 

8669 max_filter: A `Tensor` of type `float32`. 

8670 min_freezed_output: A `Tensor` of type `float32`. 

8671 max_freezed_output: A `Tensor` of type `float32`. 

8672 strides: A list of `ints`. 

8673 padding: A `string` from: `"SAME", "VALID"`. 

8674 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. 

8675 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

8676 padding_list: An optional list of `ints`. Defaults to `[]`. 

8677 name: A name for the operation (optional). 

8678 

8679 Returns: 

8680 A tuple of `Tensor` objects (output, min_output, max_output). 

8681 

8682 output: A `Tensor` of type `out_type`. 

8683 min_output: A `Tensor` of type `float32`. 

8684 max_output: A `Tensor` of type `float32`. 

8685 """ 

8686 _ctx = _context._context or _context.context() 

8687 tld = _ctx._thread_local_data 

8688 if tld.is_eager: 

8689 try: 

8690 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

8691 _ctx, "QuantizedConv2DWithBiasAndReluAndRequantize", name, input, 

8692 filter, bias, min_input, max_input, min_filter, max_filter, 

8693 min_freezed_output, max_freezed_output, "out_type", out_type, 

8694 "strides", strides, "padding", padding, "dilations", dilations, 

8695 "padding_list", padding_list) 

8696 _result = _QuantizedConv2DWithBiasAndReluAndRequantizeOutput._make(_result) 

8697 return _result 

8698 except _core._NotOkStatusException as e: 

8699 _ops.raise_from_not_ok_status(e, name) 

8700 except _core._FallbackException: 

8701 pass 

8702 try: 

8703 return quantized_conv2d_with_bias_and_relu_and_requantize_eager_fallback( 

8704 input, filter, bias, min_input, max_input, min_filter, max_filter, 

8705 min_freezed_output, max_freezed_output, out_type=out_type, 

8706 strides=strides, padding=padding, dilations=dilations, 

8707 padding_list=padding_list, name=name, ctx=_ctx) 

8708 except _core._SymbolicException: 

8709 pass # Add nodes to the TensorFlow graph. 

8710 # Add nodes to the TensorFlow graph. 

8711 if not isinstance(strides, (list, tuple)): 

8712 raise TypeError( 

8713 "Expected list for 'strides' argument to " 

8714 "'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % strides) 

8715 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8716 padding = _execute.make_str(padding, "padding") 

8717 if out_type is None: 

8718 out_type = _dtypes.quint8 

8719 out_type = _execute.make_type(out_type, "out_type") 

8720 if dilations is None: 

8721 dilations = [1, 1, 1, 1] 

8722 if not isinstance(dilations, (list, tuple)): 

8723 raise TypeError( 

8724 "Expected list for 'dilations' argument to " 

8725 "'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % dilations) 

8726 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8727 if padding_list is None: 

8728 padding_list = [] 

8729 if not isinstance(padding_list, (list, tuple)): 

8730 raise TypeError( 

8731 "Expected list for 'padding_list' argument to " 

8732 "'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % padding_list) 

8733 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

8734 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

8735 "QuantizedConv2DWithBiasAndReluAndRequantize", input=input, 

8736 filter=filter, 

8737 bias=bias, 

8738 min_input=min_input, 

8739 max_input=max_input, 

8740 min_filter=min_filter, 

8741 max_filter=max_filter, 

8742 min_freezed_output=min_freezed_output, 

8743 max_freezed_output=max_freezed_output, 

8744 strides=strides, 

8745 padding=padding, 

8746 out_type=out_type, 

8747 dilations=dilations, 

8748 padding_list=padding_list, 

8749 name=name) 

8750 _result = _outputs[:] 

8751 if _execute.must_record_gradient(): 

8752 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

8753 _op._get_attr_type("Tfilter"), "Tbias", 

8754 _op._get_attr_type("Tbias"), "out_type", 

8755 _op._get_attr_type("out_type"), "strides", 

8756 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

8757 "dilations", _op.get_attr("dilations"), "padding_list", 

8758 _op.get_attr("padding_list")) 

8759 _inputs_flat = _op.inputs 

8760 _execute.record_gradient( 

8761 "QuantizedConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result) 

8762 _result = _QuantizedConv2DWithBiasAndReluAndRequantizeOutput._make(_result) 

8763 return _result 

8764 

8765QuantizedConv2DWithBiasAndReluAndRequantize = tf_export("raw_ops.QuantizedConv2DWithBiasAndReluAndRequantize")(_ops.to_raw_op(quantized_conv2d_with_bias_and_relu_and_requantize)) 

8766 

8767 

8768def quantized_conv2d_with_bias_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type, dilations, padding_list, name, ctx): 

8769 if not isinstance(strides, (list, tuple)): 

8770 raise TypeError( 

8771 "Expected list for 'strides' argument to " 

8772 "'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % strides) 

8773 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8774 padding = _execute.make_str(padding, "padding") 

8775 if out_type is None: 

8776 out_type = _dtypes.quint8 

8777 out_type = _execute.make_type(out_type, "out_type") 

8778 if dilations is None: 

8779 dilations = [1, 1, 1, 1] 

8780 if not isinstance(dilations, (list, tuple)): 

8781 raise TypeError( 

8782 "Expected list for 'dilations' argument to " 

8783 "'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % dilations) 

8784 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8785 if padding_list is None: 

8786 padding_list = [] 

8787 if not isinstance(padding_list, (list, tuple)): 

8788 raise TypeError( 

8789 "Expected list for 'padding_list' argument to " 

8790 "'quantized_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % padding_list) 

8791 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

8792 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8793 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8794 _attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ]) 

8795 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

8796 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

8797 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

8798 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

8799 min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32) 

8800 max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32) 

8801 _inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output] 

8802 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias", 

8803 _attr_Tbias, "out_type", out_type, "strides", strides, "padding", padding, 

8804 "dilations", dilations, "padding_list", padding_list) 

8805 _result = _execute.execute(b"QuantizedConv2DWithBiasAndReluAndRequantize", 

8806 3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

8807 name=name) 

8808 if _execute.must_record_gradient(): 

8809 _execute.record_gradient( 

8810 "QuantizedConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result) 

8811 _result = _QuantizedConv2DWithBiasAndReluAndRequantizeOutput._make(_result) 

8812 return _result 

8813 

8814_QuantizedConv2DWithBiasAndRequantizeOutput = collections.namedtuple( 

8815 "QuantizedConv2DWithBiasAndRequantize", 

8816 ["output", "min_output", "max_output"]) 

8817 

8818 

8819def quantized_conv2d_with_bias_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.qint8, dilations=[1, 1, 1, 1], padding_list=[], name=None): 

8820 r"""TODO: add doc. 

8821 

8822 Args: 

8823 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8824 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8825 bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`. 

8826 min_input: A `Tensor` of type `float32`. 

8827 max_input: A `Tensor` of type `float32`. 

8828 min_filter: A `Tensor` of type `float32`. 

8829 max_filter: A `Tensor` of type `float32`. 

8830 min_freezed_output: A `Tensor` of type `float32`. 

8831 max_freezed_output: A `Tensor` of type `float32`. 

8832 strides: A list of `ints`. 

8833 padding: A `string` from: `"SAME", "VALID"`. 

8834 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint8`. 

8835 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

8836 padding_list: An optional list of `ints`. Defaults to `[]`. 

8837 name: A name for the operation (optional). 

8838 

8839 Returns: 

8840 A tuple of `Tensor` objects (output, min_output, max_output). 

8841 

8842 output: A `Tensor` of type `out_type`. 

8843 min_output: A `Tensor` of type `float32`. 

8844 max_output: A `Tensor` of type `float32`. 

8845 """ 

8846 _ctx = _context._context or _context.context() 

8847 tld = _ctx._thread_local_data 

8848 if tld.is_eager: 

8849 try: 

8850 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

8851 _ctx, "QuantizedConv2DWithBiasAndRequantize", name, input, filter, 

8852 bias, min_input, max_input, min_filter, max_filter, 

8853 min_freezed_output, max_freezed_output, "out_type", out_type, 

8854 "strides", strides, "padding", padding, "dilations", dilations, 

8855 "padding_list", padding_list) 

8856 _result = _QuantizedConv2DWithBiasAndRequantizeOutput._make(_result) 

8857 return _result 

8858 except _core._NotOkStatusException as e: 

8859 _ops.raise_from_not_ok_status(e, name) 

8860 except _core._FallbackException: 

8861 pass 

8862 try: 

8863 return quantized_conv2d_with_bias_and_requantize_eager_fallback( 

8864 input, filter, bias, min_input, max_input, min_filter, max_filter, 

8865 min_freezed_output, max_freezed_output, out_type=out_type, 

8866 strides=strides, padding=padding, dilations=dilations, 

8867 padding_list=padding_list, name=name, ctx=_ctx) 

8868 except _core._SymbolicException: 

8869 pass # Add nodes to the TensorFlow graph. 

8870 # Add nodes to the TensorFlow graph. 

8871 if not isinstance(strides, (list, tuple)): 

8872 raise TypeError( 

8873 "Expected list for 'strides' argument to " 

8874 "'quantized_conv2d_with_bias_and_requantize' Op, not %r." % strides) 

8875 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8876 padding = _execute.make_str(padding, "padding") 

8877 if out_type is None: 

8878 out_type = _dtypes.qint8 

8879 out_type = _execute.make_type(out_type, "out_type") 

8880 if dilations is None: 

8881 dilations = [1, 1, 1, 1] 

8882 if not isinstance(dilations, (list, tuple)): 

8883 raise TypeError( 

8884 "Expected list for 'dilations' argument to " 

8885 "'quantized_conv2d_with_bias_and_requantize' Op, not %r." % dilations) 

8886 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8887 if padding_list is None: 

8888 padding_list = [] 

8889 if not isinstance(padding_list, (list, tuple)): 

8890 raise TypeError( 

8891 "Expected list for 'padding_list' argument to " 

8892 "'quantized_conv2d_with_bias_and_requantize' Op, not %r." % padding_list) 

8893 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

8894 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

8895 "QuantizedConv2DWithBiasAndRequantize", input=input, filter=filter, 

8896 bias=bias, 

8897 min_input=min_input, 

8898 max_input=max_input, 

8899 min_filter=min_filter, 

8900 max_filter=max_filter, 

8901 min_freezed_output=min_freezed_output, 

8902 max_freezed_output=max_freezed_output, 

8903 strides=strides, 

8904 padding=padding, 

8905 out_type=out_type, 

8906 dilations=dilations, 

8907 padding_list=padding_list, 

8908 name=name) 

8909 _result = _outputs[:] 

8910 if _execute.must_record_gradient(): 

8911 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

8912 _op._get_attr_type("Tfilter"), "Tbias", 

8913 _op._get_attr_type("Tbias"), "out_type", 

8914 _op._get_attr_type("out_type"), "strides", 

8915 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

8916 "dilations", _op.get_attr("dilations"), "padding_list", 

8917 _op.get_attr("padding_list")) 

8918 _inputs_flat = _op.inputs 

8919 _execute.record_gradient( 

8920 "QuantizedConv2DWithBiasAndRequantize", _inputs_flat, _attrs, _result) 

8921 _result = _QuantizedConv2DWithBiasAndRequantizeOutput._make(_result) 

8922 return _result 

8923 

8924QuantizedConv2DWithBiasAndRequantize = tf_export("raw_ops.QuantizedConv2DWithBiasAndRequantize")(_ops.to_raw_op(quantized_conv2d_with_bias_and_requantize)) 

8925 

8926 

8927def quantized_conv2d_with_bias_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type, dilations, padding_list, name, ctx): 

8928 if not isinstance(strides, (list, tuple)): 

8929 raise TypeError( 

8930 "Expected list for 'strides' argument to " 

8931 "'quantized_conv2d_with_bias_and_requantize' Op, not %r." % strides) 

8932 strides = [_execute.make_int(_i, "strides") for _i in strides] 

8933 padding = _execute.make_str(padding, "padding") 

8934 if out_type is None: 

8935 out_type = _dtypes.qint8 

8936 out_type = _execute.make_type(out_type, "out_type") 

8937 if dilations is None: 

8938 dilations = [1, 1, 1, 1] 

8939 if not isinstance(dilations, (list, tuple)): 

8940 raise TypeError( 

8941 "Expected list for 'dilations' argument to " 

8942 "'quantized_conv2d_with_bias_and_requantize' Op, not %r." % dilations) 

8943 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

8944 if padding_list is None: 

8945 padding_list = [] 

8946 if not isinstance(padding_list, (list, tuple)): 

8947 raise TypeError( 

8948 "Expected list for 'padding_list' argument to " 

8949 "'quantized_conv2d_with_bias_and_requantize' Op, not %r." % padding_list) 

8950 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

8951 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8952 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

8953 _attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ]) 

8954 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

8955 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

8956 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

8957 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

8958 min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32) 

8959 max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32) 

8960 _inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output] 

8961 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias", 

8962 _attr_Tbias, "out_type", out_type, "strides", strides, "padding", padding, 

8963 "dilations", dilations, "padding_list", padding_list) 

8964 _result = _execute.execute(b"QuantizedConv2DWithBiasAndRequantize", 3, 

8965 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

8966 name=name) 

8967 if _execute.must_record_gradient(): 

8968 _execute.record_gradient( 

8969 "QuantizedConv2DWithBiasAndRequantize", _inputs_flat, _attrs, _result) 

8970 _result = _QuantizedConv2DWithBiasAndRequantizeOutput._make(_result) 

8971 return _result 

8972 

8973_QuantizedConv2DWithBiasSignedSumAndReluAndRequantizeOutput = collections.namedtuple( 

8974 "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", 

8975 ["output", "min_output", "max_output"]) 

8976 

8977 

8978def quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None): 

8979 r"""TODO: add doc. 

8980 

8981 Args: 

8982 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8983 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8984 bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`. 

8985 min_input: A `Tensor` of type `float32`. 

8986 max_input: A `Tensor` of type `float32`. 

8987 min_filter: A `Tensor` of type `float32`. 

8988 max_filter: A `Tensor` of type `float32`. 

8989 min_freezed_output: A `Tensor` of type `float32`. 

8990 max_freezed_output: A `Tensor` of type `float32`. 

8991 summand: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

8992 min_summand: A `Tensor` of type `float32`. 

8993 max_summand: A `Tensor` of type `float32`. 

8994 strides: A list of `ints`. 

8995 padding: A `string` from: `"SAME", "VALID"`. 

8996 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. 

8997 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

8998 padding_list: An optional list of `ints`. Defaults to `[]`. 

8999 name: A name for the operation (optional). 

9000 

9001 Returns: 

9002 A tuple of `Tensor` objects (output, min_output, max_output). 

9003 

9004 output: A `Tensor` of type `out_type`. 

9005 min_output: A `Tensor` of type `float32`. 

9006 max_output: A `Tensor` of type `float32`. 

9007 """ 

9008 _ctx = _context._context or _context.context() 

9009 tld = _ctx._thread_local_data 

9010 if tld.is_eager: 

9011 try: 

9012 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

9013 _ctx, "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", name, 

9014 input, filter, bias, min_input, max_input, min_filter, max_filter, 

9015 min_freezed_output, max_freezed_output, summand, min_summand, 

9016 max_summand, "out_type", out_type, "strides", strides, "padding", 

9017 padding, "dilations", dilations, "padding_list", padding_list) 

9018 _result = _QuantizedConv2DWithBiasSignedSumAndReluAndRequantizeOutput._make(_result) 

9019 return _result 

9020 except _core._NotOkStatusException as e: 

9021 _ops.raise_from_not_ok_status(e, name) 

9022 except _core._FallbackException: 

9023 pass 

9024 try: 

9025 return quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager_fallback( 

9026 input, filter, bias, min_input, max_input, min_filter, max_filter, 

9027 min_freezed_output, max_freezed_output, summand, min_summand, 

9028 max_summand, out_type=out_type, strides=strides, padding=padding, 

9029 dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx) 

9030 except _core._SymbolicException: 

9031 pass # Add nodes to the TensorFlow graph. 

9032 # Add nodes to the TensorFlow graph. 

9033 if not isinstance(strides, (list, tuple)): 

9034 raise TypeError( 

9035 "Expected list for 'strides' argument to " 

9036 "'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % strides) 

9037 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9038 padding = _execute.make_str(padding, "padding") 

9039 if out_type is None: 

9040 out_type = _dtypes.quint8 

9041 out_type = _execute.make_type(out_type, "out_type") 

9042 if dilations is None: 

9043 dilations = [1, 1, 1, 1] 

9044 if not isinstance(dilations, (list, tuple)): 

9045 raise TypeError( 

9046 "Expected list for 'dilations' argument to " 

9047 "'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % dilations) 

9048 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9049 if padding_list is None: 

9050 padding_list = [] 

9051 if not isinstance(padding_list, (list, tuple)): 

9052 raise TypeError( 

9053 "Expected list for 'padding_list' argument to " 

9054 "'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % padding_list) 

9055 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

9056 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

9057 "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", input=input, 

9058 filter=filter, 

9059 bias=bias, 

9060 min_input=min_input, 

9061 max_input=max_input, 

9062 min_filter=min_filter, 

9063 max_filter=max_filter, 

9064 min_freezed_output=min_freezed_output, 

9065 max_freezed_output=max_freezed_output, 

9066 summand=summand, 

9067 min_summand=min_summand, 

9068 max_summand=max_summand, 

9069 strides=strides, 

9070 padding=padding, 

9071 out_type=out_type, 

9072 dilations=dilations, 

9073 padding_list=padding_list, 

9074 name=name) 

9075 _result = _outputs[:] 

9076 if _execute.must_record_gradient(): 

9077 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

9078 _op._get_attr_type("Tfilter"), "Tbias", 

9079 _op._get_attr_type("Tbias"), "Tsummand", 

9080 _op._get_attr_type("Tsummand"), "out_type", 

9081 _op._get_attr_type("out_type"), "strides", 

9082 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

9083 "dilations", _op.get_attr("dilations"), "padding_list", 

9084 _op.get_attr("padding_list")) 

9085 _inputs_flat = _op.inputs 

9086 _execute.record_gradient( 

9087 "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", _inputs_flat, _attrs, _result) 

9088 _result = _QuantizedConv2DWithBiasSignedSumAndReluAndRequantizeOutput._make(_result) 

9089 return _result 

9090 

9091QuantizedConv2DWithBiasSignedSumAndReluAndRequantize = tf_export("raw_ops.QuantizedConv2DWithBiasSignedSumAndReluAndRequantize")(_ops.to_raw_op(quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize)) 

9092 

9093 

9094def quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type, dilations, padding_list, name, ctx): 

9095 if not isinstance(strides, (list, tuple)): 

9096 raise TypeError( 

9097 "Expected list for 'strides' argument to " 

9098 "'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % strides) 

9099 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9100 padding = _execute.make_str(padding, "padding") 

9101 if out_type is None: 

9102 out_type = _dtypes.quint8 

9103 out_type = _execute.make_type(out_type, "out_type") 

9104 if dilations is None: 

9105 dilations = [1, 1, 1, 1] 

9106 if not isinstance(dilations, (list, tuple)): 

9107 raise TypeError( 

9108 "Expected list for 'dilations' argument to " 

9109 "'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % dilations) 

9110 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9111 if padding_list is None: 

9112 padding_list = [] 

9113 if not isinstance(padding_list, (list, tuple)): 

9114 raise TypeError( 

9115 "Expected list for 'padding_list' argument to " 

9116 "'quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize' Op, not %r." % padding_list) 

9117 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

9118 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9119 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9120 _attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ]) 

9121 _attr_Tsummand, (summand,) = _execute.args_to_matching_eager([summand], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9122 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

9123 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

9124 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

9125 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

9126 min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32) 

9127 max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32) 

9128 min_summand = _ops.convert_to_tensor(min_summand, _dtypes.float32) 

9129 max_summand = _ops.convert_to_tensor(max_summand, _dtypes.float32) 

9130 _inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand] 

9131 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias", 

9132 _attr_Tbias, "Tsummand", _attr_Tsummand, "out_type", out_type, "strides", 

9133 strides, "padding", padding, "dilations", dilations, "padding_list", 

9134 padding_list) 

9135 _result = _execute.execute(b"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", 

9136 3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

9137 name=name) 

9138 if _execute.must_record_gradient(): 

9139 _execute.record_gradient( 

9140 "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", _inputs_flat, _attrs, _result) 

9141 _result = _QuantizedConv2DWithBiasSignedSumAndReluAndRequantizeOutput._make(_result) 

9142 return _result 

9143 

9144_QuantizedConv2DWithBiasSumAndReluOutput = collections.namedtuple( 

9145 "QuantizedConv2DWithBiasSumAndRelu", 

9146 ["output", "min_output", "max_output"]) 

9147 

9148 

9149def quantized_conv2d_with_bias_sum_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter, summand, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None): 

9150 r"""TODO: add doc. 

9151 

9152 Args: 

9153 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9154 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9155 bias: A `Tensor` of type `float32`. 

9156 min_input: A `Tensor` of type `float32`. 

9157 max_input: A `Tensor` of type `float32`. 

9158 min_filter: A `Tensor` of type `float32`. 

9159 max_filter: A `Tensor` of type `float32`. 

9160 summand: A `Tensor` of type `float32`. 

9161 strides: A list of `ints`. 

9162 padding: A `string` from: `"SAME", "VALID"`. 

9163 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. 

9164 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

9165 padding_list: An optional list of `ints`. Defaults to `[]`. 

9166 name: A name for the operation (optional). 

9167 

9168 Returns: 

9169 A tuple of `Tensor` objects (output, min_output, max_output). 

9170 

9171 output: A `Tensor` of type `out_type`. 

9172 min_output: A `Tensor` of type `float32`. 

9173 max_output: A `Tensor` of type `float32`. 

9174 """ 

9175 _ctx = _context._context or _context.context() 

9176 tld = _ctx._thread_local_data 

9177 if tld.is_eager: 

9178 try: 

9179 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

9180 _ctx, "QuantizedConv2DWithBiasSumAndRelu", name, input, filter, bias, 

9181 min_input, max_input, min_filter, max_filter, summand, "out_type", 

9182 out_type, "strides", strides, "padding", padding, "dilations", 

9183 dilations, "padding_list", padding_list) 

9184 _result = _QuantizedConv2DWithBiasSumAndReluOutput._make(_result) 

9185 return _result 

9186 except _core._NotOkStatusException as e: 

9187 _ops.raise_from_not_ok_status(e, name) 

9188 except _core._FallbackException: 

9189 pass 

9190 try: 

9191 return quantized_conv2d_with_bias_sum_and_relu_eager_fallback( 

9192 input, filter, bias, min_input, max_input, min_filter, max_filter, 

9193 summand, out_type=out_type, strides=strides, padding=padding, 

9194 dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx) 

9195 except _core._SymbolicException: 

9196 pass # Add nodes to the TensorFlow graph. 

9197 # Add nodes to the TensorFlow graph. 

9198 if not isinstance(strides, (list, tuple)): 

9199 raise TypeError( 

9200 "Expected list for 'strides' argument to " 

9201 "'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % strides) 

9202 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9203 padding = _execute.make_str(padding, "padding") 

9204 if out_type is None: 

9205 out_type = _dtypes.qint32 

9206 out_type = _execute.make_type(out_type, "out_type") 

9207 if dilations is None: 

9208 dilations = [1, 1, 1, 1] 

9209 if not isinstance(dilations, (list, tuple)): 

9210 raise TypeError( 

9211 "Expected list for 'dilations' argument to " 

9212 "'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % dilations) 

9213 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9214 if padding_list is None: 

9215 padding_list = [] 

9216 if not isinstance(padding_list, (list, tuple)): 

9217 raise TypeError( 

9218 "Expected list for 'padding_list' argument to " 

9219 "'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % padding_list) 

9220 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

9221 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

9222 "QuantizedConv2DWithBiasSumAndRelu", input=input, filter=filter, 

9223 bias=bias, min_input=min_input, 

9224 max_input=max_input, 

9225 min_filter=min_filter, 

9226 max_filter=max_filter, 

9227 summand=summand, strides=strides, 

9228 padding=padding, 

9229 out_type=out_type, 

9230 dilations=dilations, 

9231 padding_list=padding_list, 

9232 name=name) 

9233 _result = _outputs[:] 

9234 if _execute.must_record_gradient(): 

9235 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

9236 _op._get_attr_type("Tfilter"), "out_type", 

9237 _op._get_attr_type("out_type"), "strides", 

9238 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

9239 "dilations", _op.get_attr("dilations"), "padding_list", 

9240 _op.get_attr("padding_list")) 

9241 _inputs_flat = _op.inputs 

9242 _execute.record_gradient( 

9243 "QuantizedConv2DWithBiasSumAndRelu", _inputs_flat, _attrs, _result) 

9244 _result = _QuantizedConv2DWithBiasSumAndReluOutput._make(_result) 

9245 return _result 

9246 

9247QuantizedConv2DWithBiasSumAndRelu = tf_export("raw_ops.QuantizedConv2DWithBiasSumAndRelu")(_ops.to_raw_op(quantized_conv2d_with_bias_sum_and_relu)) 

9248 

9249 

9250def quantized_conv2d_with_bias_sum_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, summand, strides, padding, out_type, dilations, padding_list, name, ctx): 

9251 if not isinstance(strides, (list, tuple)): 

9252 raise TypeError( 

9253 "Expected list for 'strides' argument to " 

9254 "'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % strides) 

9255 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9256 padding = _execute.make_str(padding, "padding") 

9257 if out_type is None: 

9258 out_type = _dtypes.qint32 

9259 out_type = _execute.make_type(out_type, "out_type") 

9260 if dilations is None: 

9261 dilations = [1, 1, 1, 1] 

9262 if not isinstance(dilations, (list, tuple)): 

9263 raise TypeError( 

9264 "Expected list for 'dilations' argument to " 

9265 "'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % dilations) 

9266 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9267 if padding_list is None: 

9268 padding_list = [] 

9269 if not isinstance(padding_list, (list, tuple)): 

9270 raise TypeError( 

9271 "Expected list for 'padding_list' argument to " 

9272 "'quantized_conv2d_with_bias_sum_and_relu' Op, not %r." % padding_list) 

9273 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

9274 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9275 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9276 bias = _ops.convert_to_tensor(bias, _dtypes.float32) 

9277 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

9278 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

9279 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

9280 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

9281 summand = _ops.convert_to_tensor(summand, _dtypes.float32) 

9282 _inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, summand] 

9283 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type", 

9284 out_type, "strides", strides, "padding", padding, "dilations", dilations, 

9285 "padding_list", padding_list) 

9286 _result = _execute.execute(b"QuantizedConv2DWithBiasSumAndRelu", 3, 

9287 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

9288 name=name) 

9289 if _execute.must_record_gradient(): 

9290 _execute.record_gradient( 

9291 "QuantizedConv2DWithBiasSumAndRelu", _inputs_flat, _attrs, _result) 

9292 _result = _QuantizedConv2DWithBiasSumAndReluOutput._make(_result) 

9293 return _result 

9294 

9295_QuantizedConv2DWithBiasSumAndReluAndRequantizeOutput = collections.namedtuple( 

9296 "QuantizedConv2DWithBiasSumAndReluAndRequantize", 

9297 ["output", "min_output", "max_output"]) 

9298 

9299 

9300def quantized_conv2d_with_bias_sum_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None): 

9301 r"""TODO: add doc. 

9302 

9303 Args: 

9304 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9305 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9306 bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`. 

9307 min_input: A `Tensor` of type `float32`. 

9308 max_input: A `Tensor` of type `float32`. 

9309 min_filter: A `Tensor` of type `float32`. 

9310 max_filter: A `Tensor` of type `float32`. 

9311 min_freezed_output: A `Tensor` of type `float32`. 

9312 max_freezed_output: A `Tensor` of type `float32`. 

9313 summand: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9314 min_summand: A `Tensor` of type `float32`. 

9315 max_summand: A `Tensor` of type `float32`. 

9316 strides: A list of `ints`. 

9317 padding: A `string` from: `"SAME", "VALID"`. 

9318 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. 

9319 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

9320 padding_list: An optional list of `ints`. Defaults to `[]`. 

9321 name: A name for the operation (optional). 

9322 

9323 Returns: 

9324 A tuple of `Tensor` objects (output, min_output, max_output). 

9325 

9326 output: A `Tensor` of type `out_type`. 

9327 min_output: A `Tensor` of type `float32`. 

9328 max_output: A `Tensor` of type `float32`. 

9329 """ 

9330 _ctx = _context._context or _context.context() 

9331 tld = _ctx._thread_local_data 

9332 if tld.is_eager: 

9333 try: 

9334 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

9335 _ctx, "QuantizedConv2DWithBiasSumAndReluAndRequantize", name, input, 

9336 filter, bias, min_input, max_input, min_filter, max_filter, 

9337 min_freezed_output, max_freezed_output, summand, min_summand, 

9338 max_summand, "out_type", out_type, "strides", strides, "padding", 

9339 padding, "dilations", dilations, "padding_list", padding_list) 

9340 _result = _QuantizedConv2DWithBiasSumAndReluAndRequantizeOutput._make(_result) 

9341 return _result 

9342 except _core._NotOkStatusException as e: 

9343 _ops.raise_from_not_ok_status(e, name) 

9344 except _core._FallbackException: 

9345 pass 

9346 try: 

9347 return quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager_fallback( 

9348 input, filter, bias, min_input, max_input, min_filter, max_filter, 

9349 min_freezed_output, max_freezed_output, summand, min_summand, 

9350 max_summand, out_type=out_type, strides=strides, padding=padding, 

9351 dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx) 

9352 except _core._SymbolicException: 

9353 pass # Add nodes to the TensorFlow graph. 

9354 # Add nodes to the TensorFlow graph. 

9355 if not isinstance(strides, (list, tuple)): 

9356 raise TypeError( 

9357 "Expected list for 'strides' argument to " 

9358 "'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % strides) 

9359 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9360 padding = _execute.make_str(padding, "padding") 

9361 if out_type is None: 

9362 out_type = _dtypes.quint8 

9363 out_type = _execute.make_type(out_type, "out_type") 

9364 if dilations is None: 

9365 dilations = [1, 1, 1, 1] 

9366 if not isinstance(dilations, (list, tuple)): 

9367 raise TypeError( 

9368 "Expected list for 'dilations' argument to " 

9369 "'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % dilations) 

9370 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9371 if padding_list is None: 

9372 padding_list = [] 

9373 if not isinstance(padding_list, (list, tuple)): 

9374 raise TypeError( 

9375 "Expected list for 'padding_list' argument to " 

9376 "'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % padding_list) 

9377 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

9378 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

9379 "QuantizedConv2DWithBiasSumAndReluAndRequantize", input=input, 

9380 filter=filter, 

9381 bias=bias, 

9382 min_input=min_input, 

9383 max_input=max_input, 

9384 min_filter=min_filter, 

9385 max_filter=max_filter, 

9386 min_freezed_output=min_freezed_output, 

9387 max_freezed_output=max_freezed_output, 

9388 summand=summand, 

9389 min_summand=min_summand, 

9390 max_summand=max_summand, 

9391 strides=strides, 

9392 padding=padding, 

9393 out_type=out_type, 

9394 dilations=dilations, 

9395 padding_list=padding_list, 

9396 name=name) 

9397 _result = _outputs[:] 

9398 if _execute.must_record_gradient(): 

9399 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

9400 _op._get_attr_type("Tfilter"), "Tbias", 

9401 _op._get_attr_type("Tbias"), "Tsummand", 

9402 _op._get_attr_type("Tsummand"), "out_type", 

9403 _op._get_attr_type("out_type"), "strides", 

9404 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

9405 "dilations", _op.get_attr("dilations"), "padding_list", 

9406 _op.get_attr("padding_list")) 

9407 _inputs_flat = _op.inputs 

9408 _execute.record_gradient( 

9409 "QuantizedConv2DWithBiasSumAndReluAndRequantize", _inputs_flat, _attrs, _result) 

9410 _result = _QuantizedConv2DWithBiasSumAndReluAndRequantizeOutput._make(_result) 

9411 return _result 

9412 

9413QuantizedConv2DWithBiasSumAndReluAndRequantize = tf_export("raw_ops.QuantizedConv2DWithBiasSumAndReluAndRequantize")(_ops.to_raw_op(quantized_conv2d_with_bias_sum_and_relu_and_requantize)) 

9414 

9415 

9416def quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand, strides, padding, out_type, dilations, padding_list, name, ctx): 

9417 if not isinstance(strides, (list, tuple)): 

9418 raise TypeError( 

9419 "Expected list for 'strides' argument to " 

9420 "'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % strides) 

9421 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9422 padding = _execute.make_str(padding, "padding") 

9423 if out_type is None: 

9424 out_type = _dtypes.quint8 

9425 out_type = _execute.make_type(out_type, "out_type") 

9426 if dilations is None: 

9427 dilations = [1, 1, 1, 1] 

9428 if not isinstance(dilations, (list, tuple)): 

9429 raise TypeError( 

9430 "Expected list for 'dilations' argument to " 

9431 "'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % dilations) 

9432 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9433 if padding_list is None: 

9434 padding_list = [] 

9435 if not isinstance(padding_list, (list, tuple)): 

9436 raise TypeError( 

9437 "Expected list for 'padding_list' argument to " 

9438 "'quantized_conv2d_with_bias_sum_and_relu_and_requantize' Op, not %r." % padding_list) 

9439 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

9440 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9441 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9442 _attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ]) 

9443 _attr_Tsummand, (summand,) = _execute.args_to_matching_eager([summand], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9444 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

9445 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

9446 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

9447 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

9448 min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32) 

9449 max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32) 

9450 min_summand = _ops.convert_to_tensor(min_summand, _dtypes.float32) 

9451 max_summand = _ops.convert_to_tensor(max_summand, _dtypes.float32) 

9452 _inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand] 

9453 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias", 

9454 _attr_Tbias, "Tsummand", _attr_Tsummand, "out_type", out_type, "strides", 

9455 strides, "padding", padding, "dilations", dilations, "padding_list", 

9456 padding_list) 

9457 _result = _execute.execute(b"QuantizedConv2DWithBiasSumAndReluAndRequantize", 

9458 3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

9459 name=name) 

9460 if _execute.must_record_gradient(): 

9461 _execute.record_gradient( 

9462 "QuantizedConv2DWithBiasSumAndReluAndRequantize", _inputs_flat, _attrs, _result) 

9463 _result = _QuantizedConv2DWithBiasSumAndReluAndRequantizeOutput._make(_result) 

9464 return _result 

9465 

9466_QuantizedDepthwiseConv2DOutput = collections.namedtuple( 

9467 "QuantizedDepthwiseConv2D", 

9468 ["output", "min_output", "max_output"]) 

9469 

9470 

9471def quantized_depthwise_conv2d(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None): 

9472 r"""Computes quantized depthwise Conv2D. 

9473 

9474 Args: 

9475 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9476 The original input tensor. 

9477 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9478 The original filter tensor. 

9479 min_input: A `Tensor` of type `float32`. 

9480 The float value that the minimum quantized input value represents. 

9481 max_input: A `Tensor` of type `float32`. 

9482 The float value that the maximum quantized input value represents. 

9483 min_filter: A `Tensor` of type `float32`. 

9484 The float value that the minimum quantized filter value represents. 

9485 max_filter: A `Tensor` of type `float32`. 

9486 The float value that the maximum quantized filter value represents. 

9487 strides: A list of `ints`. List of stride values. 

9488 padding: A `string` from: `"SAME", "VALID"`. 

9489 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. 

9490 The type of the output. 

9491 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

9492 List of dilation values. 

9493 name: A name for the operation (optional). 

9494 

9495 Returns: 

9496 A tuple of `Tensor` objects (output, min_output, max_output). 

9497 

9498 output: A `Tensor` of type `out_type`. 

9499 min_output: A `Tensor` of type `float32`. 

9500 max_output: A `Tensor` of type `float32`. 

9501 """ 

9502 _ctx = _context._context or _context.context() 

9503 tld = _ctx._thread_local_data 

9504 if tld.is_eager: 

9505 try: 

9506 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

9507 _ctx, "QuantizedDepthwiseConv2D", name, input, filter, min_input, 

9508 max_input, min_filter, max_filter, "out_type", out_type, "strides", 

9509 strides, "padding", padding, "dilations", dilations) 

9510 _result = _QuantizedDepthwiseConv2DOutput._make(_result) 

9511 return _result 

9512 except _core._NotOkStatusException as e: 

9513 _ops.raise_from_not_ok_status(e, name) 

9514 except _core._FallbackException: 

9515 pass 

9516 try: 

9517 return quantized_depthwise_conv2d_eager_fallback( 

9518 input, filter, min_input, max_input, min_filter, max_filter, 

9519 out_type=out_type, strides=strides, padding=padding, 

9520 dilations=dilations, name=name, ctx=_ctx) 

9521 except _core._SymbolicException: 

9522 pass # Add nodes to the TensorFlow graph. 

9523 # Add nodes to the TensorFlow graph. 

9524 if not isinstance(strides, (list, tuple)): 

9525 raise TypeError( 

9526 "Expected list for 'strides' argument to " 

9527 "'quantized_depthwise_conv2d' Op, not %r." % strides) 

9528 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9529 padding = _execute.make_str(padding, "padding") 

9530 if out_type is None: 

9531 out_type = _dtypes.qint32 

9532 out_type = _execute.make_type(out_type, "out_type") 

9533 if dilations is None: 

9534 dilations = [1, 1, 1, 1] 

9535 if not isinstance(dilations, (list, tuple)): 

9536 raise TypeError( 

9537 "Expected list for 'dilations' argument to " 

9538 "'quantized_depthwise_conv2d' Op, not %r." % dilations) 

9539 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9540 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

9541 "QuantizedDepthwiseConv2D", input=input, filter=filter, 

9542 min_input=min_input, max_input=max_input, 

9543 min_filter=min_filter, 

9544 max_filter=max_filter, strides=strides, 

9545 padding=padding, out_type=out_type, 

9546 dilations=dilations, name=name) 

9547 _result = _outputs[:] 

9548 if _execute.must_record_gradient(): 

9549 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

9550 _op._get_attr_type("Tfilter"), "out_type", 

9551 _op._get_attr_type("out_type"), "strides", 

9552 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

9553 "dilations", _op.get_attr("dilations")) 

9554 _inputs_flat = _op.inputs 

9555 _execute.record_gradient( 

9556 "QuantizedDepthwiseConv2D", _inputs_flat, _attrs, _result) 

9557 _result = _QuantizedDepthwiseConv2DOutput._make(_result) 

9558 return _result 

9559 

9560QuantizedDepthwiseConv2D = tf_export("raw_ops.QuantizedDepthwiseConv2D")(_ops.to_raw_op(quantized_depthwise_conv2d)) 

9561 

9562 

9563def quantized_depthwise_conv2d_eager_fallback(input, filter, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, name, ctx): 

9564 if not isinstance(strides, (list, tuple)): 

9565 raise TypeError( 

9566 "Expected list for 'strides' argument to " 

9567 "'quantized_depthwise_conv2d' Op, not %r." % strides) 

9568 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9569 padding = _execute.make_str(padding, "padding") 

9570 if out_type is None: 

9571 out_type = _dtypes.qint32 

9572 out_type = _execute.make_type(out_type, "out_type") 

9573 if dilations is None: 

9574 dilations = [1, 1, 1, 1] 

9575 if not isinstance(dilations, (list, tuple)): 

9576 raise TypeError( 

9577 "Expected list for 'dilations' argument to " 

9578 "'quantized_depthwise_conv2d' Op, not %r." % dilations) 

9579 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9580 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9581 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9582 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

9583 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

9584 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

9585 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

9586 _inputs_flat = [input, filter, min_input, max_input, min_filter, max_filter] 

9587 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type", 

9588 out_type, "strides", strides, "padding", padding, "dilations", dilations) 

9589 _result = _execute.execute(b"QuantizedDepthwiseConv2D", 3, 

9590 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

9591 name=name) 

9592 if _execute.must_record_gradient(): 

9593 _execute.record_gradient( 

9594 "QuantizedDepthwiseConv2D", _inputs_flat, _attrs, _result) 

9595 _result = _QuantizedDepthwiseConv2DOutput._make(_result) 

9596 return _result 

9597 

9598_QuantizedDepthwiseConv2DWithBiasOutput = collections.namedtuple( 

9599 "QuantizedDepthwiseConv2DWithBias", 

9600 ["output", "min_output", "max_output"]) 

9601 

9602 

9603def quantized_depthwise_conv2d_with_bias(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], name=None): 

9604 r"""Computes quantized depthwise Conv2D with Bias. 

9605 

9606 Args: 

9607 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9608 The original input tensor. 

9609 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9610 The original filter tensor. 

9611 bias: A `Tensor` of type `float32`. The original bias tensor. 

9612 min_input: A `Tensor` of type `float32`. 

9613 The float value that the minimum quantized input value represents. 

9614 max_input: A `Tensor` of type `float32`. 

9615 The float value that the maximum quantized input value represents. 

9616 min_filter: A `Tensor` of type `float32`. 

9617 The float value that the minimum quantized filter value represents. 

9618 max_filter: A `Tensor` of type `float32`. 

9619 The float value that the maximum quantized filter value represents. 

9620 strides: A list of `ints`. List of stride values. 

9621 padding: A `string` from: `"SAME", "VALID"`. 

9622 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. 

9623 The type of the output. 

9624 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

9625 List of dilation values. 

9626 name: A name for the operation (optional). 

9627 

9628 Returns: 

9629 A tuple of `Tensor` objects (output, min_output, max_output). 

9630 

9631 output: A `Tensor` of type `out_type`. 

9632 min_output: A `Tensor` of type `float32`. 

9633 max_output: A `Tensor` of type `float32`. 

9634 """ 

9635 _ctx = _context._context or _context.context() 

9636 tld = _ctx._thread_local_data 

9637 if tld.is_eager: 

9638 try: 

9639 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

9640 _ctx, "QuantizedDepthwiseConv2DWithBias", name, input, filter, bias, 

9641 min_input, max_input, min_filter, max_filter, "out_type", out_type, 

9642 "strides", strides, "padding", padding, "dilations", dilations) 

9643 _result = _QuantizedDepthwiseConv2DWithBiasOutput._make(_result) 

9644 return _result 

9645 except _core._NotOkStatusException as e: 

9646 _ops.raise_from_not_ok_status(e, name) 

9647 except _core._FallbackException: 

9648 pass 

9649 try: 

9650 return quantized_depthwise_conv2d_with_bias_eager_fallback( 

9651 input, filter, bias, min_input, max_input, min_filter, max_filter, 

9652 out_type=out_type, strides=strides, padding=padding, 

9653 dilations=dilations, name=name, ctx=_ctx) 

9654 except _core._SymbolicException: 

9655 pass # Add nodes to the TensorFlow graph. 

9656 # Add nodes to the TensorFlow graph. 

9657 if not isinstance(strides, (list, tuple)): 

9658 raise TypeError( 

9659 "Expected list for 'strides' argument to " 

9660 "'quantized_depthwise_conv2d_with_bias' Op, not %r." % strides) 

9661 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9662 padding = _execute.make_str(padding, "padding") 

9663 if out_type is None: 

9664 out_type = _dtypes.qint32 

9665 out_type = _execute.make_type(out_type, "out_type") 

9666 if dilations is None: 

9667 dilations = [1, 1, 1, 1] 

9668 if not isinstance(dilations, (list, tuple)): 

9669 raise TypeError( 

9670 "Expected list for 'dilations' argument to " 

9671 "'quantized_depthwise_conv2d_with_bias' Op, not %r." % dilations) 

9672 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9673 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

9674 "QuantizedDepthwiseConv2DWithBias", input=input, filter=filter, 

9675 bias=bias, min_input=min_input, 

9676 max_input=max_input, 

9677 min_filter=min_filter, 

9678 max_filter=max_filter, 

9679 strides=strides, padding=padding, 

9680 out_type=out_type, 

9681 dilations=dilations, name=name) 

9682 _result = _outputs[:] 

9683 if _execute.must_record_gradient(): 

9684 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

9685 _op._get_attr_type("Tfilter"), "out_type", 

9686 _op._get_attr_type("out_type"), "strides", 

9687 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

9688 "dilations", _op.get_attr("dilations")) 

9689 _inputs_flat = _op.inputs 

9690 _execute.record_gradient( 

9691 "QuantizedDepthwiseConv2DWithBias", _inputs_flat, _attrs, _result) 

9692 _result = _QuantizedDepthwiseConv2DWithBiasOutput._make(_result) 

9693 return _result 

9694 

9695QuantizedDepthwiseConv2DWithBias = tf_export("raw_ops.QuantizedDepthwiseConv2DWithBias")(_ops.to_raw_op(quantized_depthwise_conv2d_with_bias)) 

9696 

9697 

9698def quantized_depthwise_conv2d_with_bias_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, name, ctx): 

9699 if not isinstance(strides, (list, tuple)): 

9700 raise TypeError( 

9701 "Expected list for 'strides' argument to " 

9702 "'quantized_depthwise_conv2d_with_bias' Op, not %r." % strides) 

9703 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9704 padding = _execute.make_str(padding, "padding") 

9705 if out_type is None: 

9706 out_type = _dtypes.qint32 

9707 out_type = _execute.make_type(out_type, "out_type") 

9708 if dilations is None: 

9709 dilations = [1, 1, 1, 1] 

9710 if not isinstance(dilations, (list, tuple)): 

9711 raise TypeError( 

9712 "Expected list for 'dilations' argument to " 

9713 "'quantized_depthwise_conv2d_with_bias' Op, not %r." % dilations) 

9714 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9715 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9716 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9717 bias = _ops.convert_to_tensor(bias, _dtypes.float32) 

9718 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

9719 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

9720 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

9721 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

9722 _inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter] 

9723 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type", 

9724 out_type, "strides", strides, "padding", padding, "dilations", dilations) 

9725 _result = _execute.execute(b"QuantizedDepthwiseConv2DWithBias", 3, 

9726 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

9727 name=name) 

9728 if _execute.must_record_gradient(): 

9729 _execute.record_gradient( 

9730 "QuantizedDepthwiseConv2DWithBias", _inputs_flat, _attrs, _result) 

9731 _result = _QuantizedDepthwiseConv2DWithBiasOutput._make(_result) 

9732 return _result 

9733 

9734_QuantizedDepthwiseConv2DWithBiasAndReluOutput = collections.namedtuple( 

9735 "QuantizedDepthwiseConv2DWithBiasAndRelu", 

9736 ["output", "min_output", "max_output"]) 

9737 

9738 

9739def quantized_depthwise_conv2d_with_bias_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type=_dtypes.qint32, dilations=[1, 1, 1, 1], padding_list=[], name=None): 

9740 r"""Computes quantized depthwise Conv2D with Bias and Relu. 

9741 

9742 Args: 

9743 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9744 The original input tensor. 

9745 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9746 The original filter tensor. 

9747 bias: A `Tensor` of type `float32`. The original bias tensor. 

9748 min_input: A `Tensor` of type `float32`. 

9749 The float value that the minimum quantized input value represents. 

9750 max_input: A `Tensor` of type `float32`. 

9751 The float value that the maximum quantized input value represents. 

9752 min_filter: A `Tensor` of type `float32`. 

9753 The float value that the minimum quantized filter value represents. 

9754 max_filter: A `Tensor` of type `float32`. 

9755 The float value that the maximum quantized filter value represents. 

9756 strides: A list of `ints`. List of stride values. 

9757 padding: A `string` from: `"SAME", "VALID"`. 

9758 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. 

9759 The type of the output. 

9760 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

9761 List of dilation values. 

9762 padding_list: An optional list of `ints`. Defaults to `[]`. 

9763 name: A name for the operation (optional). 

9764 

9765 Returns: 

9766 A tuple of `Tensor` objects (output, min_output, max_output). 

9767 

9768 output: A `Tensor` of type `out_type`. 

9769 min_output: A `Tensor` of type `float32`. 

9770 max_output: A `Tensor` of type `float32`. 

9771 """ 

9772 _ctx = _context._context or _context.context() 

9773 tld = _ctx._thread_local_data 

9774 if tld.is_eager: 

9775 try: 

9776 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

9777 _ctx, "QuantizedDepthwiseConv2DWithBiasAndRelu", name, input, filter, 

9778 bias, min_input, max_input, min_filter, max_filter, "out_type", 

9779 out_type, "strides", strides, "padding", padding, "dilations", 

9780 dilations, "padding_list", padding_list) 

9781 _result = _QuantizedDepthwiseConv2DWithBiasAndReluOutput._make(_result) 

9782 return _result 

9783 except _core._NotOkStatusException as e: 

9784 _ops.raise_from_not_ok_status(e, name) 

9785 except _core._FallbackException: 

9786 pass 

9787 try: 

9788 return quantized_depthwise_conv2d_with_bias_and_relu_eager_fallback( 

9789 input, filter, bias, min_input, max_input, min_filter, max_filter, 

9790 out_type=out_type, strides=strides, padding=padding, 

9791 dilations=dilations, padding_list=padding_list, name=name, ctx=_ctx) 

9792 except _core._SymbolicException: 

9793 pass # Add nodes to the TensorFlow graph. 

9794 # Add nodes to the TensorFlow graph. 

9795 if not isinstance(strides, (list, tuple)): 

9796 raise TypeError( 

9797 "Expected list for 'strides' argument to " 

9798 "'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % strides) 

9799 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9800 padding = _execute.make_str(padding, "padding") 

9801 if out_type is None: 

9802 out_type = _dtypes.qint32 

9803 out_type = _execute.make_type(out_type, "out_type") 

9804 if dilations is None: 

9805 dilations = [1, 1, 1, 1] 

9806 if not isinstance(dilations, (list, tuple)): 

9807 raise TypeError( 

9808 "Expected list for 'dilations' argument to " 

9809 "'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % dilations) 

9810 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9811 if padding_list is None: 

9812 padding_list = [] 

9813 if not isinstance(padding_list, (list, tuple)): 

9814 raise TypeError( 

9815 "Expected list for 'padding_list' argument to " 

9816 "'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % padding_list) 

9817 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

9818 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

9819 "QuantizedDepthwiseConv2DWithBiasAndRelu", input=input, filter=filter, 

9820 bias=bias, 

9821 min_input=min_input, 

9822 max_input=max_input, 

9823 min_filter=min_filter, 

9824 max_filter=max_filter, 

9825 strides=strides, 

9826 padding=padding, 

9827 out_type=out_type, 

9828 dilations=dilations, 

9829 padding_list=padding_list, 

9830 name=name) 

9831 _result = _outputs[:] 

9832 if _execute.must_record_gradient(): 

9833 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

9834 _op._get_attr_type("Tfilter"), "out_type", 

9835 _op._get_attr_type("out_type"), "strides", 

9836 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

9837 "dilations", _op.get_attr("dilations"), "padding_list", 

9838 _op.get_attr("padding_list")) 

9839 _inputs_flat = _op.inputs 

9840 _execute.record_gradient( 

9841 "QuantizedDepthwiseConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result) 

9842 _result = _QuantizedDepthwiseConv2DWithBiasAndReluOutput._make(_result) 

9843 return _result 

9844 

9845QuantizedDepthwiseConv2DWithBiasAndRelu = tf_export("raw_ops.QuantizedDepthwiseConv2DWithBiasAndRelu")(_ops.to_raw_op(quantized_depthwise_conv2d_with_bias_and_relu)) 

9846 

9847 

9848def quantized_depthwise_conv2d_with_bias_and_relu_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, strides, padding, out_type, dilations, padding_list, name, ctx): 

9849 if not isinstance(strides, (list, tuple)): 

9850 raise TypeError( 

9851 "Expected list for 'strides' argument to " 

9852 "'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % strides) 

9853 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9854 padding = _execute.make_str(padding, "padding") 

9855 if out_type is None: 

9856 out_type = _dtypes.qint32 

9857 out_type = _execute.make_type(out_type, "out_type") 

9858 if dilations is None: 

9859 dilations = [1, 1, 1, 1] 

9860 if not isinstance(dilations, (list, tuple)): 

9861 raise TypeError( 

9862 "Expected list for 'dilations' argument to " 

9863 "'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % dilations) 

9864 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9865 if padding_list is None: 

9866 padding_list = [] 

9867 if not isinstance(padding_list, (list, tuple)): 

9868 raise TypeError( 

9869 "Expected list for 'padding_list' argument to " 

9870 "'quantized_depthwise_conv2d_with_bias_and_relu' Op, not %r." % padding_list) 

9871 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

9872 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9873 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

9874 bias = _ops.convert_to_tensor(bias, _dtypes.float32) 

9875 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

9876 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

9877 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

9878 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

9879 _inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter] 

9880 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "out_type", 

9881 out_type, "strides", strides, "padding", padding, "dilations", dilations, 

9882 "padding_list", padding_list) 

9883 _result = _execute.execute(b"QuantizedDepthwiseConv2DWithBiasAndRelu", 3, 

9884 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

9885 name=name) 

9886 if _execute.must_record_gradient(): 

9887 _execute.record_gradient( 

9888 "QuantizedDepthwiseConv2DWithBiasAndRelu", _inputs_flat, _attrs, _result) 

9889 _result = _QuantizedDepthwiseConv2DWithBiasAndReluOutput._make(_result) 

9890 return _result 

9891 

9892_QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutput = collections.namedtuple( 

9893 "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", 

9894 ["output", "min_output", "max_output"]) 

9895 

9896 

9897def quantized_depthwise_conv2d_with_bias_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type=_dtypes.quint8, dilations=[1, 1, 1, 1], padding_list=[], name=None): 

9898 r"""Computes quantized depthwise Conv2D with Bias, Relu and Requantize. 

9899 

9900 Args: 

9901 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9902 The original input tensor. 

9903 filter: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

9904 The original filter tensor. 

9905 bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`. 

9906 The original bias tensor. 

9907 min_input: A `Tensor` of type `float32`. 

9908 The float value that the minimum quantized input value represents. 

9909 max_input: A `Tensor` of type `float32`. 

9910 The float value that the maximum quantized input value represents. 

9911 min_filter: A `Tensor` of type `float32`. 

9912 The float value that the minimum quantized filter value represents. 

9913 max_filter: A `Tensor` of type `float32`. 

9914 The float value that the maximum quantized filter value represents. 

9915 min_freezed_output: A `Tensor` of type `float32`. 

9916 The minimum float value of the output tensor. 

9917 max_freezed_output: A `Tensor` of type `float32`. 

9918 The maximum float value of the output tensor. 

9919 strides: A list of `ints`. List of stride values. 

9920 padding: A `string` from: `"SAME", "VALID"`. 

9921 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. 

9922 The type of the output. 

9923 dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 

9924 List of dilation values. 

9925 padding_list: An optional list of `ints`. Defaults to `[]`. 

9926 name: A name for the operation (optional). 

9927 

9928 Returns: 

9929 A tuple of `Tensor` objects (output, min_output, max_output). 

9930 

9931 output: A `Tensor` of type `out_type`. 

9932 min_output: A `Tensor` of type `float32`. 

9933 max_output: A `Tensor` of type `float32`. 

9934 """ 

9935 _ctx = _context._context or _context.context() 

9936 tld = _ctx._thread_local_data 

9937 if tld.is_eager: 

9938 try: 

9939 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

9940 _ctx, "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", name, 

9941 input, filter, bias, min_input, max_input, min_filter, max_filter, 

9942 min_freezed_output, max_freezed_output, "out_type", out_type, 

9943 "strides", strides, "padding", padding, "dilations", dilations, 

9944 "padding_list", padding_list) 

9945 _result = _QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutput._make(_result) 

9946 return _result 

9947 except _core._NotOkStatusException as e: 

9948 _ops.raise_from_not_ok_status(e, name) 

9949 except _core._FallbackException: 

9950 pass 

9951 try: 

9952 return quantized_depthwise_conv2d_with_bias_and_relu_and_requantize_eager_fallback( 

9953 input, filter, bias, min_input, max_input, min_filter, max_filter, 

9954 min_freezed_output, max_freezed_output, out_type=out_type, 

9955 strides=strides, padding=padding, dilations=dilations, 

9956 padding_list=padding_list, name=name, ctx=_ctx) 

9957 except _core._SymbolicException: 

9958 pass # Add nodes to the TensorFlow graph. 

9959 # Add nodes to the TensorFlow graph. 

9960 if not isinstance(strides, (list, tuple)): 

9961 raise TypeError( 

9962 "Expected list for 'strides' argument to " 

9963 "'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % strides) 

9964 strides = [_execute.make_int(_i, "strides") for _i in strides] 

9965 padding = _execute.make_str(padding, "padding") 

9966 if out_type is None: 

9967 out_type = _dtypes.quint8 

9968 out_type = _execute.make_type(out_type, "out_type") 

9969 if dilations is None: 

9970 dilations = [1, 1, 1, 1] 

9971 if not isinstance(dilations, (list, tuple)): 

9972 raise TypeError( 

9973 "Expected list for 'dilations' argument to " 

9974 "'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % dilations) 

9975 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

9976 if padding_list is None: 

9977 padding_list = [] 

9978 if not isinstance(padding_list, (list, tuple)): 

9979 raise TypeError( 

9980 "Expected list for 'padding_list' argument to " 

9981 "'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % padding_list) 

9982 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

9983 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

9984 "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", input=input, 

9985 filter=filter, 

9986 bias=bias, 

9987 min_input=min_input, 

9988 max_input=max_input, 

9989 min_filter=min_filter, 

9990 max_filter=max_filter, 

9991 min_freezed_output=min_freezed_output, 

9992 max_freezed_output=max_freezed_output, 

9993 strides=strides, 

9994 padding=padding, 

9995 out_type=out_type, 

9996 dilations=dilations, 

9997 padding_list=padding_list, 

9998 name=name) 

9999 _result = _outputs[:] 

10000 if _execute.must_record_gradient(): 

10001 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "Tfilter", 

10002 _op._get_attr_type("Tfilter"), "Tbias", 

10003 _op._get_attr_type("Tbias"), "out_type", 

10004 _op._get_attr_type("out_type"), "strides", 

10005 _op.get_attr("strides"), "padding", _op.get_attr("padding"), 

10006 "dilations", _op.get_attr("dilations"), "padding_list", 

10007 _op.get_attr("padding_list")) 

10008 _inputs_flat = _op.inputs 

10009 _execute.record_gradient( 

10010 "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result) 

10011 _result = _QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutput._make(_result) 

10012 return _result 

10013 

10014QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize = tf_export("raw_ops.QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize")(_ops.to_raw_op(quantized_depthwise_conv2d_with_bias_and_relu_and_requantize)) 

10015 

10016 

10017def quantized_depthwise_conv2d_with_bias_and_relu_and_requantize_eager_fallback(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, strides, padding, out_type, dilations, padding_list, name, ctx): 

10018 if not isinstance(strides, (list, tuple)): 

10019 raise TypeError( 

10020 "Expected list for 'strides' argument to " 

10021 "'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % strides) 

10022 strides = [_execute.make_int(_i, "strides") for _i in strides] 

10023 padding = _execute.make_str(padding, "padding") 

10024 if out_type is None: 

10025 out_type = _dtypes.quint8 

10026 out_type = _execute.make_type(out_type, "out_type") 

10027 if dilations is None: 

10028 dilations = [1, 1, 1, 1] 

10029 if not isinstance(dilations, (list, tuple)): 

10030 raise TypeError( 

10031 "Expected list for 'dilations' argument to " 

10032 "'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % dilations) 

10033 dilations = [_execute.make_int(_i, "dilations") for _i in dilations] 

10034 if padding_list is None: 

10035 padding_list = [] 

10036 if not isinstance(padding_list, (list, tuple)): 

10037 raise TypeError( 

10038 "Expected list for 'padding_list' argument to " 

10039 "'quantized_depthwise_conv2d_with_bias_and_relu_and_requantize' Op, not %r." % padding_list) 

10040 padding_list = [_execute.make_int(_i, "padding_list") for _i in padding_list] 

10041 _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10042 _attr_Tfilter, (filter,) = _execute.args_to_matching_eager([filter], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10043 _attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ]) 

10044 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

10045 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

10046 min_filter = _ops.convert_to_tensor(min_filter, _dtypes.float32) 

10047 max_filter = _ops.convert_to_tensor(max_filter, _dtypes.float32) 

10048 min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32) 

10049 max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32) 

10050 _inputs_flat = [input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output] 

10051 _attrs = ("Tinput", _attr_Tinput, "Tfilter", _attr_Tfilter, "Tbias", 

10052 _attr_Tbias, "out_type", out_type, "strides", strides, "padding", padding, 

10053 "dilations", dilations, "padding_list", padding_list) 

10054 _result = _execute.execute(b"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", 

10055 3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

10056 name=name) 

10057 if _execute.must_record_gradient(): 

10058 _execute.record_gradient( 

10059 "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result) 

10060 _result = _QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutput._make(_result) 

10061 return _result 

10062 

10063_QuantizedMatMulWithBiasOutput = collections.namedtuple( 

10064 "QuantizedMatMulWithBias", 

10065 ["out", "min_out", "max_out"]) 

10066 

10067 

10068def quantized_mat_mul_with_bias(a, b, bias, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None): 

10069 r"""Performs a quantized matrix multiplication of `a` by the matrix `b` with bias 

10070add. 

10071 

10072 The inputs must be two-dimensional matrices and 1D bias vector. And the inner 

10073 dimension of `a` (after being transposed if `transpose_a` is non-zero) must 

10074 match the outer dimension of `b` (after being transposed if `transposed_b` is 

10075 non-zero). Then do broadcast add operation with bias values on the matrix 

10076 multiplication result. The bias size must match inner dimension of `b`. 

10077 

10078 Args: 

10079 a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10080 A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. 

10081 b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10082 A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. 

10083 bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`. 

10084 A 1D bias tensor with size matching inner dimension of `b` (after being 

10085 transposed if `transposed_b` is non-zero). 

10086 min_a: A `Tensor` of type `float32`. 

10087 The float value that the lowest quantized `a` value represents. 

10088 max_a: A `Tensor` of type `float32`. 

10089 The float value that the highest quantized `a` value represents. 

10090 min_b: A `Tensor` of type `float32`. 

10091 The float value that the lowest quantized `b` value represents. 

10092 max_b: A `Tensor` of type `float32`. 

10093 The float value that the highest quantized `b` value represents. 

10094 Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. 

10095 transpose_a: An optional `bool`. Defaults to `False`. 

10096 If true, `a` is transposed before multiplication. 

10097 transpose_b: An optional `bool`. Defaults to `False`. 

10098 If true, `b` is transposed before multiplication. 

10099 input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`. 

10100 Input data quantization mode. Either MIN_FIRST(default) or SCALED. 

10101 name: A name for the operation (optional). 

10102 

10103 Returns: 

10104 A tuple of `Tensor` objects (out, min_out, max_out). 

10105 

10106 out: A `Tensor` of type `Toutput`. 

10107 min_out: A `Tensor` of type `float32`. 

10108 max_out: A `Tensor` of type `float32`. 

10109 """ 

10110 _ctx = _context._context or _context.context() 

10111 tld = _ctx._thread_local_data 

10112 if tld.is_eager: 

10113 try: 

10114 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

10115 _ctx, "QuantizedMatMulWithBias", name, a, b, bias, min_a, max_a, 

10116 min_b, max_b, "Toutput", Toutput, "transpose_a", transpose_a, 

10117 "transpose_b", transpose_b, "input_quant_mode", input_quant_mode) 

10118 _result = _QuantizedMatMulWithBiasOutput._make(_result) 

10119 return _result 

10120 except _core._NotOkStatusException as e: 

10121 _ops.raise_from_not_ok_status(e, name) 

10122 except _core._FallbackException: 

10123 pass 

10124 try: 

10125 return quantized_mat_mul_with_bias_eager_fallback( 

10126 a, b, bias, min_a, max_a, min_b, max_b, Toutput=Toutput, 

10127 transpose_a=transpose_a, transpose_b=transpose_b, 

10128 input_quant_mode=input_quant_mode, name=name, ctx=_ctx) 

10129 except _core._SymbolicException: 

10130 pass # Add nodes to the TensorFlow graph. 

10131 # Add nodes to the TensorFlow graph. 

10132 if Toutput is None: 

10133 Toutput = _dtypes.qint32 

10134 Toutput = _execute.make_type(Toutput, "Toutput") 

10135 if transpose_a is None: 

10136 transpose_a = False 

10137 transpose_a = _execute.make_bool(transpose_a, "transpose_a") 

10138 if transpose_b is None: 

10139 transpose_b = False 

10140 transpose_b = _execute.make_bool(transpose_b, "transpose_b") 

10141 if input_quant_mode is None: 

10142 input_quant_mode = "MIN_FIRST" 

10143 input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode") 

10144 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

10145 "QuantizedMatMulWithBias", a=a, b=b, bias=bias, min_a=min_a, 

10146 max_a=max_a, min_b=min_b, max_b=max_b, 

10147 Toutput=Toutput, transpose_a=transpose_a, 

10148 transpose_b=transpose_b, 

10149 input_quant_mode=input_quant_mode, 

10150 name=name) 

10151 _result = _outputs[:] 

10152 if _execute.must_record_gradient(): 

10153 _attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), 

10154 "Tbias", _op._get_attr_type("Tbias"), "Toutput", 

10155 _op._get_attr_type("Toutput"), "transpose_a", 

10156 _op._get_attr_bool("transpose_a"), "transpose_b", 

10157 _op._get_attr_bool("transpose_b"), "input_quant_mode", 

10158 _op.get_attr("input_quant_mode")) 

10159 _inputs_flat = _op.inputs 

10160 _execute.record_gradient( 

10161 "QuantizedMatMulWithBias", _inputs_flat, _attrs, _result) 

10162 _result = _QuantizedMatMulWithBiasOutput._make(_result) 

10163 return _result 

10164 

10165QuantizedMatMulWithBias = tf_export("raw_ops.QuantizedMatMulWithBias")(_ops.to_raw_op(quantized_mat_mul_with_bias)) 

10166 

10167 

10168def quantized_mat_mul_with_bias_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, Toutput, transpose_a, transpose_b, input_quant_mode, name, ctx): 

10169 if Toutput is None: 

10170 Toutput = _dtypes.qint32 

10171 Toutput = _execute.make_type(Toutput, "Toutput") 

10172 if transpose_a is None: 

10173 transpose_a = False 

10174 transpose_a = _execute.make_bool(transpose_a, "transpose_a") 

10175 if transpose_b is None: 

10176 transpose_b = False 

10177 transpose_b = _execute.make_bool(transpose_b, "transpose_b") 

10178 if input_quant_mode is None: 

10179 input_quant_mode = "MIN_FIRST" 

10180 input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode") 

10181 _attr_T1, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10182 _attr_T2, (b,) = _execute.args_to_matching_eager([b], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10183 _attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ]) 

10184 min_a = _ops.convert_to_tensor(min_a, _dtypes.float32) 

10185 max_a = _ops.convert_to_tensor(max_a, _dtypes.float32) 

10186 min_b = _ops.convert_to_tensor(min_b, _dtypes.float32) 

10187 max_b = _ops.convert_to_tensor(max_b, _dtypes.float32) 

10188 _inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b] 

10189 _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Tbias", _attr_Tbias, "Toutput", 

10190 Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, 

10191 "input_quant_mode", input_quant_mode) 

10192 _result = _execute.execute(b"QuantizedMatMulWithBias", 3, 

10193 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

10194 name=name) 

10195 if _execute.must_record_gradient(): 

10196 _execute.record_gradient( 

10197 "QuantizedMatMulWithBias", _inputs_flat, _attrs, _result) 

10198 _result = _QuantizedMatMulWithBiasOutput._make(_result) 

10199 return _result 

10200 

10201 

10202def quantized_mat_mul_with_bias_and_dequantize(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None): 

10203 r"""TODO: add doc. 

10204 

10205 Args: 

10206 a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10207 b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10208 bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`. 

10209 min_a: A `Tensor` of type `float32`. 

10210 max_a: A `Tensor` of type `float32`. 

10211 min_b: A `Tensor` of type `float32`. 

10212 max_b: A `Tensor` of type `float32`. 

10213 min_freezed_output: A `Tensor` of type `float32`. 

10214 max_freezed_output: A `Tensor` of type `float32`. 

10215 Toutput: A `tf.DType` from: `tf.float32`. 

10216 transpose_a: An optional `bool`. Defaults to `False`. 

10217 transpose_b: An optional `bool`. Defaults to `False`. 

10218 input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`. 

10219 name: A name for the operation (optional). 

10220 

10221 Returns: 

10222 A `Tensor` of type `Toutput`. 

10223 """ 

10224 _ctx = _context._context or _context.context() 

10225 tld = _ctx._thread_local_data 

10226 if tld.is_eager: 

10227 try: 

10228 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

10229 _ctx, "QuantizedMatMulWithBiasAndDequantize", name, a, b, bias, min_a, 

10230 max_a, min_b, max_b, min_freezed_output, max_freezed_output, 

10231 "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", 

10232 transpose_b, "input_quant_mode", input_quant_mode) 

10233 return _result 

10234 except _core._NotOkStatusException as e: 

10235 _ops.raise_from_not_ok_status(e, name) 

10236 except _core._FallbackException: 

10237 pass 

10238 try: 

10239 return quantized_mat_mul_with_bias_and_dequantize_eager_fallback( 

10240 a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, 

10241 max_freezed_output, Toutput=Toutput, transpose_a=transpose_a, 

10242 transpose_b=transpose_b, input_quant_mode=input_quant_mode, 

10243 name=name, ctx=_ctx) 

10244 except _core._SymbolicException: 

10245 pass # Add nodes to the TensorFlow graph. 

10246 # Add nodes to the TensorFlow graph. 

10247 Toutput = _execute.make_type(Toutput, "Toutput") 

10248 if transpose_a is None: 

10249 transpose_a = False 

10250 transpose_a = _execute.make_bool(transpose_a, "transpose_a") 

10251 if transpose_b is None: 

10252 transpose_b = False 

10253 transpose_b = _execute.make_bool(transpose_b, "transpose_b") 

10254 if input_quant_mode is None: 

10255 input_quant_mode = "MIN_FIRST" 

10256 input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode") 

10257 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

10258 "QuantizedMatMulWithBiasAndDequantize", a=a, b=b, bias=bias, 

10259 min_a=min_a, max_a=max_a, 

10260 min_b=min_b, max_b=max_b, 

10261 min_freezed_output=min_freezed_output, 

10262 max_freezed_output=max_freezed_output, 

10263 Toutput=Toutput, 

10264 transpose_a=transpose_a, 

10265 transpose_b=transpose_b, 

10266 input_quant_mode=input_quant_mode, 

10267 name=name) 

10268 _result = _outputs[:] 

10269 if _execute.must_record_gradient(): 

10270 _attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), 

10271 "Tbias", _op._get_attr_type("Tbias"), "Toutput", 

10272 _op._get_attr_type("Toutput"), "transpose_a", 

10273 _op._get_attr_bool("transpose_a"), "transpose_b", 

10274 _op._get_attr_bool("transpose_b"), "input_quant_mode", 

10275 _op.get_attr("input_quant_mode")) 

10276 _inputs_flat = _op.inputs 

10277 _execute.record_gradient( 

10278 "QuantizedMatMulWithBiasAndDequantize", _inputs_flat, _attrs, _result) 

10279 _result, = _result 

10280 return _result 

10281 

10282QuantizedMatMulWithBiasAndDequantize = tf_export("raw_ops.QuantizedMatMulWithBiasAndDequantize")(_ops.to_raw_op(quantized_mat_mul_with_bias_and_dequantize)) 

10283 

10284 

10285def quantized_mat_mul_with_bias_and_dequantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput, transpose_a, transpose_b, input_quant_mode, name, ctx): 

10286 Toutput = _execute.make_type(Toutput, "Toutput") 

10287 if transpose_a is None: 

10288 transpose_a = False 

10289 transpose_a = _execute.make_bool(transpose_a, "transpose_a") 

10290 if transpose_b is None: 

10291 transpose_b = False 

10292 transpose_b = _execute.make_bool(transpose_b, "transpose_b") 

10293 if input_quant_mode is None: 

10294 input_quant_mode = "MIN_FIRST" 

10295 input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode") 

10296 _attr_T1, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10297 _attr_T2, (b,) = _execute.args_to_matching_eager([b], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10298 _attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ]) 

10299 min_a = _ops.convert_to_tensor(min_a, _dtypes.float32) 

10300 max_a = _ops.convert_to_tensor(max_a, _dtypes.float32) 

10301 min_b = _ops.convert_to_tensor(min_b, _dtypes.float32) 

10302 max_b = _ops.convert_to_tensor(max_b, _dtypes.float32) 

10303 min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32) 

10304 max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32) 

10305 _inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output] 

10306 _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Tbias", _attr_Tbias, "Toutput", 

10307 Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, 

10308 "input_quant_mode", input_quant_mode) 

10309 _result = _execute.execute(b"QuantizedMatMulWithBiasAndDequantize", 1, 

10310 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

10311 name=name) 

10312 if _execute.must_record_gradient(): 

10313 _execute.record_gradient( 

10314 "QuantizedMatMulWithBiasAndDequantize", _inputs_flat, _attrs, _result) 

10315 _result, = _result 

10316 return _result 

10317 

10318_QuantizedMatMulWithBiasAndReluOutput = collections.namedtuple( 

10319 "QuantizedMatMulWithBiasAndRelu", 

10320 ["out", "min_out", "max_out"]) 

10321 

10322 

10323def quantized_mat_mul_with_bias_and_relu(a, b, bias, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None): 

10324 r"""Perform a quantized matrix multiplication of `a` by the matrix `b` with bias 

10325add and relu fusion. 

10326 

10327 The inputs must be two-dimensional matrices and 1D bias vector. And the inner 

10328 dimension of `a` (after being transposed if `transpose_a` is non-zero) must 

10329 match the outer dimension of `b` (after being transposed if `transposed_b` is 

10330 non-zero). Then do broadcast add operation with bias values on the matrix 

10331 multiplication result. The bias size must match inner dimension of `b`. Then do 

10332 relu activation to get non-negative result. 

10333 

10334 Args: 

10335 a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10336 A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. 

10337 b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10338 A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. 

10339 bias: A `Tensor` of type `float32`. 

10340 A 1D bias tensor with size matching with inner dimension of `b` (after being 

10341 transposed if `transposed_b` is non-zero). 

10342 min_a: A `Tensor` of type `float32`. 

10343 The float value that the lowest quantized `a` value represents. 

10344 max_a: A `Tensor` of type `float32`. 

10345 The float value that the highest quantized `a` value represents. 

10346 min_b: A `Tensor` of type `float32`. 

10347 The float value that the lowest quantized `b` value represents. 

10348 max_b: A `Tensor` of type `float32`. 

10349 The float value that the highest quantized `b` value represents. 

10350 Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. 

10351 transpose_a: An optional `bool`. Defaults to `False`. 

10352 If true, `a` is transposed before multiplication. 

10353 transpose_b: An optional `bool`. Defaults to `False`. 

10354 If true, `b` is transposed before multiplication. 

10355 input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`. 

10356 Input data quantization mode. Either MIN_FIRST(default) or SCALED. 

10357 name: A name for the operation (optional). 

10358 

10359 Returns: 

10360 A tuple of `Tensor` objects (out, min_out, max_out). 

10361 

10362 out: A `Tensor` of type `Toutput`. 

10363 min_out: A `Tensor` of type `float32`. 

10364 max_out: A `Tensor` of type `float32`. 

10365 """ 

10366 _ctx = _context._context or _context.context() 

10367 tld = _ctx._thread_local_data 

10368 if tld.is_eager: 

10369 try: 

10370 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

10371 _ctx, "QuantizedMatMulWithBiasAndRelu", name, a, b, bias, min_a, 

10372 max_a, min_b, max_b, "Toutput", Toutput, "transpose_a", transpose_a, 

10373 "transpose_b", transpose_b, "input_quant_mode", input_quant_mode) 

10374 _result = _QuantizedMatMulWithBiasAndReluOutput._make(_result) 

10375 return _result 

10376 except _core._NotOkStatusException as e: 

10377 _ops.raise_from_not_ok_status(e, name) 

10378 except _core._FallbackException: 

10379 pass 

10380 try: 

10381 return quantized_mat_mul_with_bias_and_relu_eager_fallback( 

10382 a, b, bias, min_a, max_a, min_b, max_b, Toutput=Toutput, 

10383 transpose_a=transpose_a, transpose_b=transpose_b, 

10384 input_quant_mode=input_quant_mode, name=name, ctx=_ctx) 

10385 except _core._SymbolicException: 

10386 pass # Add nodes to the TensorFlow graph. 

10387 # Add nodes to the TensorFlow graph. 

10388 if Toutput is None: 

10389 Toutput = _dtypes.qint32 

10390 Toutput = _execute.make_type(Toutput, "Toutput") 

10391 if transpose_a is None: 

10392 transpose_a = False 

10393 transpose_a = _execute.make_bool(transpose_a, "transpose_a") 

10394 if transpose_b is None: 

10395 transpose_b = False 

10396 transpose_b = _execute.make_bool(transpose_b, "transpose_b") 

10397 if input_quant_mode is None: 

10398 input_quant_mode = "MIN_FIRST" 

10399 input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode") 

10400 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

10401 "QuantizedMatMulWithBiasAndRelu", a=a, b=b, bias=bias, min_a=min_a, 

10402 max_a=max_a, min_b=min_b, 

10403 max_b=max_b, Toutput=Toutput, 

10404 transpose_a=transpose_a, 

10405 transpose_b=transpose_b, 

10406 input_quant_mode=input_quant_mode, 

10407 name=name) 

10408 _result = _outputs[:] 

10409 if _execute.must_record_gradient(): 

10410 _attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), 

10411 "Toutput", _op._get_attr_type("Toutput"), "transpose_a", 

10412 _op._get_attr_bool("transpose_a"), "transpose_b", 

10413 _op._get_attr_bool("transpose_b"), "input_quant_mode", 

10414 _op.get_attr("input_quant_mode")) 

10415 _inputs_flat = _op.inputs 

10416 _execute.record_gradient( 

10417 "QuantizedMatMulWithBiasAndRelu", _inputs_flat, _attrs, _result) 

10418 _result = _QuantizedMatMulWithBiasAndReluOutput._make(_result) 

10419 return _result 

10420 

10421QuantizedMatMulWithBiasAndRelu = tf_export("raw_ops.QuantizedMatMulWithBiasAndRelu")(_ops.to_raw_op(quantized_mat_mul_with_bias_and_relu)) 

10422 

10423 

10424def quantized_mat_mul_with_bias_and_relu_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, Toutput, transpose_a, transpose_b, input_quant_mode, name, ctx): 

10425 if Toutput is None: 

10426 Toutput = _dtypes.qint32 

10427 Toutput = _execute.make_type(Toutput, "Toutput") 

10428 if transpose_a is None: 

10429 transpose_a = False 

10430 transpose_a = _execute.make_bool(transpose_a, "transpose_a") 

10431 if transpose_b is None: 

10432 transpose_b = False 

10433 transpose_b = _execute.make_bool(transpose_b, "transpose_b") 

10434 if input_quant_mode is None: 

10435 input_quant_mode = "MIN_FIRST" 

10436 input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode") 

10437 _attr_T1, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10438 _attr_T2, (b,) = _execute.args_to_matching_eager([b], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10439 bias = _ops.convert_to_tensor(bias, _dtypes.float32) 

10440 min_a = _ops.convert_to_tensor(min_a, _dtypes.float32) 

10441 max_a = _ops.convert_to_tensor(max_a, _dtypes.float32) 

10442 min_b = _ops.convert_to_tensor(min_b, _dtypes.float32) 

10443 max_b = _ops.convert_to_tensor(max_b, _dtypes.float32) 

10444 _inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b] 

10445 _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput, "transpose_a", 

10446 transpose_a, "transpose_b", transpose_b, "input_quant_mode", 

10447 input_quant_mode) 

10448 _result = _execute.execute(b"QuantizedMatMulWithBiasAndRelu", 3, 

10449 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

10450 name=name) 

10451 if _execute.must_record_gradient(): 

10452 _execute.record_gradient( 

10453 "QuantizedMatMulWithBiasAndRelu", _inputs_flat, _attrs, _result) 

10454 _result = _QuantizedMatMulWithBiasAndReluOutput._make(_result) 

10455 return _result 

10456 

10457_QuantizedMatMulWithBiasAndReluAndRequantizeOutput = collections.namedtuple( 

10458 "QuantizedMatMulWithBiasAndReluAndRequantize", 

10459 ["out", "min_out", "max_out"]) 

10460 

10461 

10462def quantized_mat_mul_with_bias_and_relu_and_requantize(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput=_dtypes.quint8, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None): 

10463 r"""Perform a quantized matrix multiplication of `a` by the matrix `b` with bias 

10464add and relu and requantize fusion. 

10465 

10466 The inputs must be two-dimensional matrices and 1D bias vector. And the inner 

10467 dimension of `a` (after being transposed if `transpose_a` is non-zero) must 

10468 match the outer dimension of `b` (after being transposed if `transposed_b` is 

10469 non-zero). Then do broadcast add operation with bias values on the matrix 

10470 multiplication result. The bias size must match inner dimension of `b`. Then do 

10471 relu activation to get non-negative result. Then do requantize operation to get 

10472 final uint8 result. 

10473 

10474 Args: 

10475 a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10476 A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. 

10477 b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10478 A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. 

10479 bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`. 

10480 A 1D bias tensor with size matching with inner dimension of `b` (after being 

10481 transposed if `transposed_b` is non-zero). 

10482 min_a: A `Tensor` of type `float32`. 

10483 The float value that the lowest quantized `a` value represents. 

10484 max_a: A `Tensor` of type `float32`. 

10485 The float value that the highest quantized `a` value represents. 

10486 min_b: A `Tensor` of type `float32`. 

10487 The float value that the lowest quantized `b` value represents. 

10488 max_b: A `Tensor` of type `float32`. 

10489 The float value that the highest quantized `b` value represents. 

10490 min_freezed_output: A `Tensor` of type `float32`. 

10491 The float value that the highest quantized output value after requantize. 

10492 max_freezed_output: A `Tensor` of type `float32`. 

10493 Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. 

10494 transpose_a: An optional `bool`. Defaults to `False`. 

10495 If true, `a` is transposed before multiplication. 

10496 transpose_b: An optional `bool`. Defaults to `False`. 

10497 If true, `b` is transposed before multiplication. 

10498 input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`. 

10499 Input data quantization mode. Either MIN_FIRST(default) or SCALED. 

10500 name: A name for the operation (optional). 

10501 

10502 Returns: 

10503 A tuple of `Tensor` objects (out, min_out, max_out). 

10504 

10505 out: A `Tensor` of type `Toutput`. 

10506 min_out: A `Tensor` of type `float32`. 

10507 max_out: A `Tensor` of type `float32`. 

10508 """ 

10509 _ctx = _context._context or _context.context() 

10510 tld = _ctx._thread_local_data 

10511 if tld.is_eager: 

10512 try: 

10513 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

10514 _ctx, "QuantizedMatMulWithBiasAndReluAndRequantize", name, a, b, bias, 

10515 min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, 

10516 "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", 

10517 transpose_b, "input_quant_mode", input_quant_mode) 

10518 _result = _QuantizedMatMulWithBiasAndReluAndRequantizeOutput._make(_result) 

10519 return _result 

10520 except _core._NotOkStatusException as e: 

10521 _ops.raise_from_not_ok_status(e, name) 

10522 except _core._FallbackException: 

10523 pass 

10524 try: 

10525 return quantized_mat_mul_with_bias_and_relu_and_requantize_eager_fallback( 

10526 a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, 

10527 max_freezed_output, Toutput=Toutput, transpose_a=transpose_a, 

10528 transpose_b=transpose_b, input_quant_mode=input_quant_mode, 

10529 name=name, ctx=_ctx) 

10530 except _core._SymbolicException: 

10531 pass # Add nodes to the TensorFlow graph. 

10532 # Add nodes to the TensorFlow graph. 

10533 if Toutput is None: 

10534 Toutput = _dtypes.quint8 

10535 Toutput = _execute.make_type(Toutput, "Toutput") 

10536 if transpose_a is None: 

10537 transpose_a = False 

10538 transpose_a = _execute.make_bool(transpose_a, "transpose_a") 

10539 if transpose_b is None: 

10540 transpose_b = False 

10541 transpose_b = _execute.make_bool(transpose_b, "transpose_b") 

10542 if input_quant_mode is None: 

10543 input_quant_mode = "MIN_FIRST" 

10544 input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode") 

10545 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

10546 "QuantizedMatMulWithBiasAndReluAndRequantize", a=a, b=b, bias=bias, 

10547 min_a=min_a, 

10548 max_a=max_a, 

10549 min_b=min_b, 

10550 max_b=max_b, 

10551 min_freezed_output=min_freezed_output, 

10552 max_freezed_output=max_freezed_output, 

10553 Toutput=Toutput, 

10554 transpose_a=transpose_a, 

10555 transpose_b=transpose_b, 

10556 input_quant_mode=input_quant_mode, 

10557 name=name) 

10558 _result = _outputs[:] 

10559 if _execute.must_record_gradient(): 

10560 _attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), 

10561 "Tbias", _op._get_attr_type("Tbias"), "Toutput", 

10562 _op._get_attr_type("Toutput"), "transpose_a", 

10563 _op._get_attr_bool("transpose_a"), "transpose_b", 

10564 _op._get_attr_bool("transpose_b"), "input_quant_mode", 

10565 _op.get_attr("input_quant_mode")) 

10566 _inputs_flat = _op.inputs 

10567 _execute.record_gradient( 

10568 "QuantizedMatMulWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result) 

10569 _result = _QuantizedMatMulWithBiasAndReluAndRequantizeOutput._make(_result) 

10570 return _result 

10571 

10572QuantizedMatMulWithBiasAndReluAndRequantize = tf_export("raw_ops.QuantizedMatMulWithBiasAndReluAndRequantize")(_ops.to_raw_op(quantized_mat_mul_with_bias_and_relu_and_requantize)) 

10573 

10574 

10575def quantized_mat_mul_with_bias_and_relu_and_requantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput, transpose_a, transpose_b, input_quant_mode, name, ctx): 

10576 if Toutput is None: 

10577 Toutput = _dtypes.quint8 

10578 Toutput = _execute.make_type(Toutput, "Toutput") 

10579 if transpose_a is None: 

10580 transpose_a = False 

10581 transpose_a = _execute.make_bool(transpose_a, "transpose_a") 

10582 if transpose_b is None: 

10583 transpose_b = False 

10584 transpose_b = _execute.make_bool(transpose_b, "transpose_b") 

10585 if input_quant_mode is None: 

10586 input_quant_mode = "MIN_FIRST" 

10587 input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode") 

10588 _attr_T1, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10589 _attr_T2, (b,) = _execute.args_to_matching_eager([b], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10590 _attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ]) 

10591 min_a = _ops.convert_to_tensor(min_a, _dtypes.float32) 

10592 max_a = _ops.convert_to_tensor(max_a, _dtypes.float32) 

10593 min_b = _ops.convert_to_tensor(min_b, _dtypes.float32) 

10594 max_b = _ops.convert_to_tensor(max_b, _dtypes.float32) 

10595 min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32) 

10596 max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32) 

10597 _inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output] 

10598 _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Tbias", _attr_Tbias, "Toutput", 

10599 Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, 

10600 "input_quant_mode", input_quant_mode) 

10601 _result = _execute.execute(b"QuantizedMatMulWithBiasAndReluAndRequantize", 

10602 3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

10603 name=name) 

10604 if _execute.must_record_gradient(): 

10605 _execute.record_gradient( 

10606 "QuantizedMatMulWithBiasAndReluAndRequantize", _inputs_flat, _attrs, _result) 

10607 _result = _QuantizedMatMulWithBiasAndReluAndRequantizeOutput._make(_result) 

10608 return _result 

10609 

10610_QuantizedMatMulWithBiasAndRequantizeOutput = collections.namedtuple( 

10611 "QuantizedMatMulWithBiasAndRequantize", 

10612 ["out", "min_out", "max_out"]) 

10613 

10614 

10615def quantized_mat_mul_with_bias_and_requantize(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput=_dtypes.quint8, transpose_a=False, transpose_b=False, input_quant_mode="MIN_FIRST", name=None): 

10616 r"""TODO: add doc. 

10617 

10618 Args: 

10619 a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10620 b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10621 bias: A `Tensor`. Must be one of the following types: `float32`, `qint32`. 

10622 min_a: A `Tensor` of type `float32`. 

10623 max_a: A `Tensor` of type `float32`. 

10624 min_b: A `Tensor` of type `float32`. 

10625 max_b: A `Tensor` of type `float32`. 

10626 min_freezed_output: A `Tensor` of type `float32`. 

10627 max_freezed_output: A `Tensor` of type `float32`. 

10628 Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. 

10629 transpose_a: An optional `bool`. Defaults to `False`. 

10630 transpose_b: An optional `bool`. Defaults to `False`. 

10631 input_quant_mode: An optional `string` from: `"MIN_FIRST", "SCALED"`. Defaults to `"MIN_FIRST"`. 

10632 name: A name for the operation (optional). 

10633 

10634 Returns: 

10635 A tuple of `Tensor` objects (out, min_out, max_out). 

10636 

10637 out: A `Tensor` of type `Toutput`. 

10638 min_out: A `Tensor` of type `float32`. 

10639 max_out: A `Tensor` of type `float32`. 

10640 """ 

10641 _ctx = _context._context or _context.context() 

10642 tld = _ctx._thread_local_data 

10643 if tld.is_eager: 

10644 try: 

10645 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

10646 _ctx, "QuantizedMatMulWithBiasAndRequantize", name, a, b, bias, min_a, 

10647 max_a, min_b, max_b, min_freezed_output, max_freezed_output, 

10648 "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", 

10649 transpose_b, "input_quant_mode", input_quant_mode) 

10650 _result = _QuantizedMatMulWithBiasAndRequantizeOutput._make(_result) 

10651 return _result 

10652 except _core._NotOkStatusException as e: 

10653 _ops.raise_from_not_ok_status(e, name) 

10654 except _core._FallbackException: 

10655 pass 

10656 try: 

10657 return quantized_mat_mul_with_bias_and_requantize_eager_fallback( 

10658 a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, 

10659 max_freezed_output, Toutput=Toutput, transpose_a=transpose_a, 

10660 transpose_b=transpose_b, input_quant_mode=input_quant_mode, 

10661 name=name, ctx=_ctx) 

10662 except _core._SymbolicException: 

10663 pass # Add nodes to the TensorFlow graph. 

10664 # Add nodes to the TensorFlow graph. 

10665 if Toutput is None: 

10666 Toutput = _dtypes.quint8 

10667 Toutput = _execute.make_type(Toutput, "Toutput") 

10668 if transpose_a is None: 

10669 transpose_a = False 

10670 transpose_a = _execute.make_bool(transpose_a, "transpose_a") 

10671 if transpose_b is None: 

10672 transpose_b = False 

10673 transpose_b = _execute.make_bool(transpose_b, "transpose_b") 

10674 if input_quant_mode is None: 

10675 input_quant_mode = "MIN_FIRST" 

10676 input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode") 

10677 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

10678 "QuantizedMatMulWithBiasAndRequantize", a=a, b=b, bias=bias, 

10679 min_a=min_a, max_a=max_a, 

10680 min_b=min_b, max_b=max_b, 

10681 min_freezed_output=min_freezed_output, 

10682 max_freezed_output=max_freezed_output, 

10683 Toutput=Toutput, 

10684 transpose_a=transpose_a, 

10685 transpose_b=transpose_b, 

10686 input_quant_mode=input_quant_mode, 

10687 name=name) 

10688 _result = _outputs[:] 

10689 if _execute.must_record_gradient(): 

10690 _attrs = ("T1", _op._get_attr_type("T1"), "T2", _op._get_attr_type("T2"), 

10691 "Tbias", _op._get_attr_type("Tbias"), "Toutput", 

10692 _op._get_attr_type("Toutput"), "transpose_a", 

10693 _op._get_attr_bool("transpose_a"), "transpose_b", 

10694 _op._get_attr_bool("transpose_b"), "input_quant_mode", 

10695 _op.get_attr("input_quant_mode")) 

10696 _inputs_flat = _op.inputs 

10697 _execute.record_gradient( 

10698 "QuantizedMatMulWithBiasAndRequantize", _inputs_flat, _attrs, _result) 

10699 _result = _QuantizedMatMulWithBiasAndRequantizeOutput._make(_result) 

10700 return _result 

10701 

10702QuantizedMatMulWithBiasAndRequantize = tf_export("raw_ops.QuantizedMatMulWithBiasAndRequantize")(_ops.to_raw_op(quantized_mat_mul_with_bias_and_requantize)) 

10703 

10704 

10705def quantized_mat_mul_with_bias_and_requantize_eager_fallback(a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, Toutput, transpose_a, transpose_b, input_quant_mode, name, ctx): 

10706 if Toutput is None: 

10707 Toutput = _dtypes.quint8 

10708 Toutput = _execute.make_type(Toutput, "Toutput") 

10709 if transpose_a is None: 

10710 transpose_a = False 

10711 transpose_a = _execute.make_bool(transpose_a, "transpose_a") 

10712 if transpose_b is None: 

10713 transpose_b = False 

10714 transpose_b = _execute.make_bool(transpose_b, "transpose_b") 

10715 if input_quant_mode is None: 

10716 input_quant_mode = "MIN_FIRST" 

10717 input_quant_mode = _execute.make_str(input_quant_mode, "input_quant_mode") 

10718 _attr_T1, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10719 _attr_T2, (b,) = _execute.args_to_matching_eager([b], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10720 _attr_Tbias, (bias,) = _execute.args_to_matching_eager([bias], ctx, [_dtypes.float32, _dtypes.qint32, ]) 

10721 min_a = _ops.convert_to_tensor(min_a, _dtypes.float32) 

10722 max_a = _ops.convert_to_tensor(max_a, _dtypes.float32) 

10723 min_b = _ops.convert_to_tensor(min_b, _dtypes.float32) 

10724 max_b = _ops.convert_to_tensor(max_b, _dtypes.float32) 

10725 min_freezed_output = _ops.convert_to_tensor(min_freezed_output, _dtypes.float32) 

10726 max_freezed_output = _ops.convert_to_tensor(max_freezed_output, _dtypes.float32) 

10727 _inputs_flat = [a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output] 

10728 _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Tbias", _attr_Tbias, "Toutput", 

10729 Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, 

10730 "input_quant_mode", input_quant_mode) 

10731 _result = _execute.execute(b"QuantizedMatMulWithBiasAndRequantize", 3, 

10732 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

10733 name=name) 

10734 if _execute.must_record_gradient(): 

10735 _execute.record_gradient( 

10736 "QuantizedMatMulWithBiasAndRequantize", _inputs_flat, _attrs, _result) 

10737 _result = _QuantizedMatMulWithBiasAndRequantizeOutput._make(_result) 

10738 return _result 

10739 

10740_QuantizedMaxPoolOutput = collections.namedtuple( 

10741 "QuantizedMaxPool", 

10742 ["output", "min_output", "max_output"]) 

10743 

10744 

10745def quantized_max_pool(input, min_input, max_input, ksize, strides, padding, name=None): 

10746 r"""Produces the max pool of the input tensor for quantized types. 

10747 

10748 Args: 

10749 input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10750 The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. 

10751 min_input: A `Tensor` of type `float32`. 

10752 The float value that the lowest quantized input value represents. 

10753 max_input: A `Tensor` of type `float32`. 

10754 The float value that the highest quantized input value represents. 

10755 ksize: A list of `ints`. 

10756 The size of the window for each dimension of the input tensor. 

10757 The length must be 4 to match the number of dimensions of the input. 

10758 strides: A list of `ints`. 

10759 The stride of the sliding window for each dimension of the input 

10760 tensor. The length must be 4 to match the number of dimensions of the input. 

10761 padding: A `string` from: `"SAME", "VALID"`. 

10762 The type of padding algorithm to use. 

10763 name: A name for the operation (optional). 

10764 

10765 Returns: 

10766 A tuple of `Tensor` objects (output, min_output, max_output). 

10767 

10768 output: A `Tensor`. Has the same type as `input`. 

10769 min_output: A `Tensor` of type `float32`. 

10770 max_output: A `Tensor` of type `float32`. 

10771 """ 

10772 _ctx = _context._context or _context.context() 

10773 tld = _ctx._thread_local_data 

10774 if tld.is_eager: 

10775 try: 

10776 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

10777 _ctx, "QuantizedMaxPool", name, input, min_input, max_input, "ksize", 

10778 ksize, "strides", strides, "padding", padding) 

10779 _result = _QuantizedMaxPoolOutput._make(_result) 

10780 return _result 

10781 except _core._NotOkStatusException as e: 

10782 _ops.raise_from_not_ok_status(e, name) 

10783 except _core._FallbackException: 

10784 pass 

10785 try: 

10786 return quantized_max_pool_eager_fallback( 

10787 input, min_input, max_input, ksize=ksize, strides=strides, 

10788 padding=padding, name=name, ctx=_ctx) 

10789 except _core._SymbolicException: 

10790 pass # Add nodes to the TensorFlow graph. 

10791 # Add nodes to the TensorFlow graph. 

10792 if not isinstance(ksize, (list, tuple)): 

10793 raise TypeError( 

10794 "Expected list for 'ksize' argument to " 

10795 "'quantized_max_pool' Op, not %r." % ksize) 

10796 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

10797 if not isinstance(strides, (list, tuple)): 

10798 raise TypeError( 

10799 "Expected list for 'strides' argument to " 

10800 "'quantized_max_pool' Op, not %r." % strides) 

10801 strides = [_execute.make_int(_i, "strides") for _i in strides] 

10802 padding = _execute.make_str(padding, "padding") 

10803 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

10804 "QuantizedMaxPool", input=input, min_input=min_input, 

10805 max_input=max_input, ksize=ksize, strides=strides, 

10806 padding=padding, name=name) 

10807 _result = _outputs[:] 

10808 if _execute.must_record_gradient(): 

10809 _attrs = ("T", _op._get_attr_type("T"), "ksize", _op.get_attr("ksize"), 

10810 "strides", _op.get_attr("strides"), "padding", 

10811 _op.get_attr("padding")) 

10812 _inputs_flat = _op.inputs 

10813 _execute.record_gradient( 

10814 "QuantizedMaxPool", _inputs_flat, _attrs, _result) 

10815 _result = _QuantizedMaxPoolOutput._make(_result) 

10816 return _result 

10817 

10818QuantizedMaxPool = tf_export("raw_ops.QuantizedMaxPool")(_ops.to_raw_op(quantized_max_pool)) 

10819 

10820 

10821def quantized_max_pool_eager_fallback(input, min_input, max_input, ksize, strides, padding, name, ctx): 

10822 if not isinstance(ksize, (list, tuple)): 

10823 raise TypeError( 

10824 "Expected list for 'ksize' argument to " 

10825 "'quantized_max_pool' Op, not %r." % ksize) 

10826 ksize = [_execute.make_int(_i, "ksize") for _i in ksize] 

10827 if not isinstance(strides, (list, tuple)): 

10828 raise TypeError( 

10829 "Expected list for 'strides' argument to " 

10830 "'quantized_max_pool' Op, not %r." % strides) 

10831 strides = [_execute.make_int(_i, "strides") for _i in strides] 

10832 padding = _execute.make_str(padding, "padding") 

10833 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10834 min_input = _ops.convert_to_tensor(min_input, _dtypes.float32) 

10835 max_input = _ops.convert_to_tensor(max_input, _dtypes.float32) 

10836 _inputs_flat = [input, min_input, max_input] 

10837 _attrs = ("T", _attr_T, "ksize", ksize, "strides", strides, "padding", 

10838 padding) 

10839 _result = _execute.execute(b"QuantizedMaxPool", 3, inputs=_inputs_flat, 

10840 attrs=_attrs, ctx=ctx, name=name) 

10841 if _execute.must_record_gradient(): 

10842 _execute.record_gradient( 

10843 "QuantizedMaxPool", _inputs_flat, _attrs, _result) 

10844 _result = _QuantizedMaxPoolOutput._make(_result) 

10845 return _result 

10846 

10847_QuantizedReluOutput = collections.namedtuple( 

10848 "QuantizedRelu", 

10849 ["activations", "min_activations", "max_activations"]) 

10850 

10851 

10852def quantized_relu(features, min_features, max_features, out_type=_dtypes.quint8, name=None): 

10853 r"""Computes Quantized Rectified Linear: `max(features, 0)` 

10854 

10855 Args: 

10856 features: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10857 min_features: A `Tensor` of type `float32`. 

10858 The float value that the lowest quantized value represents. 

10859 max_features: A `Tensor` of type `float32`. 

10860 The float value that the highest quantized value represents. 

10861 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. 

10862 name: A name for the operation (optional). 

10863 

10864 Returns: 

10865 A tuple of `Tensor` objects (activations, min_activations, max_activations). 

10866 

10867 activations: A `Tensor` of type `out_type`. 

10868 min_activations: A `Tensor` of type `float32`. 

10869 max_activations: A `Tensor` of type `float32`. 

10870 """ 

10871 _ctx = _context._context or _context.context() 

10872 tld = _ctx._thread_local_data 

10873 if tld.is_eager: 

10874 try: 

10875 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

10876 _ctx, "QuantizedRelu", name, features, min_features, max_features, 

10877 "out_type", out_type) 

10878 _result = _QuantizedReluOutput._make(_result) 

10879 return _result 

10880 except _core._NotOkStatusException as e: 

10881 _ops.raise_from_not_ok_status(e, name) 

10882 except _core._FallbackException: 

10883 pass 

10884 try: 

10885 return quantized_relu_eager_fallback( 

10886 features, min_features, max_features, out_type=out_type, name=name, 

10887 ctx=_ctx) 

10888 except _core._SymbolicException: 

10889 pass # Add nodes to the TensorFlow graph. 

10890 # Add nodes to the TensorFlow graph. 

10891 if out_type is None: 

10892 out_type = _dtypes.quint8 

10893 out_type = _execute.make_type(out_type, "out_type") 

10894 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

10895 "QuantizedRelu", features=features, min_features=min_features, 

10896 max_features=max_features, out_type=out_type, 

10897 name=name) 

10898 _result = _outputs[:] 

10899 if _execute.must_record_gradient(): 

10900 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type", 

10901 _op._get_attr_type("out_type")) 

10902 _inputs_flat = _op.inputs 

10903 _execute.record_gradient( 

10904 "QuantizedRelu", _inputs_flat, _attrs, _result) 

10905 _result = _QuantizedReluOutput._make(_result) 

10906 return _result 

10907 

10908QuantizedRelu = tf_export("raw_ops.QuantizedRelu")(_ops.to_raw_op(quantized_relu)) 

10909 

10910 

10911def quantized_relu_eager_fallback(features, min_features, max_features, out_type, name, ctx): 

10912 if out_type is None: 

10913 out_type = _dtypes.quint8 

10914 out_type = _execute.make_type(out_type, "out_type") 

10915 _attr_Tinput, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10916 min_features = _ops.convert_to_tensor(min_features, _dtypes.float32) 

10917 max_features = _ops.convert_to_tensor(max_features, _dtypes.float32) 

10918 _inputs_flat = [features, min_features, max_features] 

10919 _attrs = ("Tinput", _attr_Tinput, "out_type", out_type) 

10920 _result = _execute.execute(b"QuantizedRelu", 3, inputs=_inputs_flat, 

10921 attrs=_attrs, ctx=ctx, name=name) 

10922 if _execute.must_record_gradient(): 

10923 _execute.record_gradient( 

10924 "QuantizedRelu", _inputs_flat, _attrs, _result) 

10925 _result = _QuantizedReluOutput._make(_result) 

10926 return _result 

10927 

10928_QuantizedRelu6Output = collections.namedtuple( 

10929 "QuantizedRelu6", 

10930 ["activations", "min_activations", "max_activations"]) 

10931 

10932 

10933def quantized_relu6(features, min_features, max_features, out_type=_dtypes.quint8, name=None): 

10934 r"""Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` 

10935 

10936 Args: 

10937 features: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

10938 min_features: A `Tensor` of type `float32`. 

10939 The float value that the lowest quantized value represents. 

10940 max_features: A `Tensor` of type `float32`. 

10941 The float value that the highest quantized value represents. 

10942 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. 

10943 name: A name for the operation (optional). 

10944 

10945 Returns: 

10946 A tuple of `Tensor` objects (activations, min_activations, max_activations). 

10947 

10948 activations: A `Tensor` of type `out_type`. 

10949 min_activations: A `Tensor` of type `float32`. 

10950 max_activations: A `Tensor` of type `float32`. 

10951 """ 

10952 _ctx = _context._context or _context.context() 

10953 tld = _ctx._thread_local_data 

10954 if tld.is_eager: 

10955 try: 

10956 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

10957 _ctx, "QuantizedRelu6", name, features, min_features, max_features, 

10958 "out_type", out_type) 

10959 _result = _QuantizedRelu6Output._make(_result) 

10960 return _result 

10961 except _core._NotOkStatusException as e: 

10962 _ops.raise_from_not_ok_status(e, name) 

10963 except _core._FallbackException: 

10964 pass 

10965 try: 

10966 return quantized_relu6_eager_fallback( 

10967 features, min_features, max_features, out_type=out_type, name=name, 

10968 ctx=_ctx) 

10969 except _core._SymbolicException: 

10970 pass # Add nodes to the TensorFlow graph. 

10971 # Add nodes to the TensorFlow graph. 

10972 if out_type is None: 

10973 out_type = _dtypes.quint8 

10974 out_type = _execute.make_type(out_type, "out_type") 

10975 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

10976 "QuantizedRelu6", features=features, min_features=min_features, 

10977 max_features=max_features, out_type=out_type, 

10978 name=name) 

10979 _result = _outputs[:] 

10980 if _execute.must_record_gradient(): 

10981 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type", 

10982 _op._get_attr_type("out_type")) 

10983 _inputs_flat = _op.inputs 

10984 _execute.record_gradient( 

10985 "QuantizedRelu6", _inputs_flat, _attrs, _result) 

10986 _result = _QuantizedRelu6Output._make(_result) 

10987 return _result 

10988 

10989QuantizedRelu6 = tf_export("raw_ops.QuantizedRelu6")(_ops.to_raw_op(quantized_relu6)) 

10990 

10991 

10992def quantized_relu6_eager_fallback(features, min_features, max_features, out_type, name, ctx): 

10993 if out_type is None: 

10994 out_type = _dtypes.quint8 

10995 out_type = _execute.make_type(out_type, "out_type") 

10996 _attr_Tinput, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

10997 min_features = _ops.convert_to_tensor(min_features, _dtypes.float32) 

10998 max_features = _ops.convert_to_tensor(max_features, _dtypes.float32) 

10999 _inputs_flat = [features, min_features, max_features] 

11000 _attrs = ("Tinput", _attr_Tinput, "out_type", out_type) 

11001 _result = _execute.execute(b"QuantizedRelu6", 3, inputs=_inputs_flat, 

11002 attrs=_attrs, ctx=ctx, name=name) 

11003 if _execute.must_record_gradient(): 

11004 _execute.record_gradient( 

11005 "QuantizedRelu6", _inputs_flat, _attrs, _result) 

11006 _result = _QuantizedRelu6Output._make(_result) 

11007 return _result 

11008 

11009_QuantizedReluXOutput = collections.namedtuple( 

11010 "QuantizedReluX", 

11011 ["activations", "min_activations", "max_activations"]) 

11012 

11013 

11014def quantized_relu_x(features, max_value, min_features, max_features, out_type=_dtypes.quint8, name=None): 

11015 r"""Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` 

11016 

11017 Args: 

11018 features: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. 

11019 max_value: A `Tensor` of type `float32`. 

11020 min_features: A `Tensor` of type `float32`. 

11021 The float value that the lowest quantized value represents. 

11022 max_features: A `Tensor` of type `float32`. 

11023 The float value that the highest quantized value represents. 

11024 out_type: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. 

11025 name: A name for the operation (optional). 

11026 

11027 Returns: 

11028 A tuple of `Tensor` objects (activations, min_activations, max_activations). 

11029 

11030 activations: A `Tensor` of type `out_type`. 

11031 min_activations: A `Tensor` of type `float32`. 

11032 max_activations: A `Tensor` of type `float32`. 

11033 """ 

11034 _ctx = _context._context or _context.context() 

11035 tld = _ctx._thread_local_data 

11036 if tld.is_eager: 

11037 try: 

11038 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11039 _ctx, "QuantizedReluX", name, features, max_value, min_features, 

11040 max_features, "out_type", out_type) 

11041 _result = _QuantizedReluXOutput._make(_result) 

11042 return _result 

11043 except _core._NotOkStatusException as e: 

11044 _ops.raise_from_not_ok_status(e, name) 

11045 except _core._FallbackException: 

11046 pass 

11047 try: 

11048 return quantized_relu_x_eager_fallback( 

11049 features, max_value, min_features, max_features, out_type=out_type, 

11050 name=name, ctx=_ctx) 

11051 except _core._SymbolicException: 

11052 pass # Add nodes to the TensorFlow graph. 

11053 # Add nodes to the TensorFlow graph. 

11054 if out_type is None: 

11055 out_type = _dtypes.quint8 

11056 out_type = _execute.make_type(out_type, "out_type") 

11057 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11058 "QuantizedReluX", features=features, max_value=max_value, 

11059 min_features=min_features, 

11060 max_features=max_features, out_type=out_type, 

11061 name=name) 

11062 _result = _outputs[:] 

11063 if _execute.must_record_gradient(): 

11064 _attrs = ("Tinput", _op._get_attr_type("Tinput"), "out_type", 

11065 _op._get_attr_type("out_type")) 

11066 _inputs_flat = _op.inputs 

11067 _execute.record_gradient( 

11068 "QuantizedReluX", _inputs_flat, _attrs, _result) 

11069 _result = _QuantizedReluXOutput._make(_result) 

11070 return _result 

11071 

11072QuantizedReluX = tf_export("raw_ops.QuantizedReluX")(_ops.to_raw_op(quantized_relu_x)) 

11073 

11074 

11075def quantized_relu_x_eager_fallback(features, max_value, min_features, max_features, out_type, name, ctx): 

11076 if out_type is None: 

11077 out_type = _dtypes.quint8 

11078 out_type = _execute.make_type(out_type, "out_type") 

11079 _attr_Tinput, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.qint16, _dtypes.quint16, ]) 

11080 max_value = _ops.convert_to_tensor(max_value, _dtypes.float32) 

11081 min_features = _ops.convert_to_tensor(min_features, _dtypes.float32) 

11082 max_features = _ops.convert_to_tensor(max_features, _dtypes.float32) 

11083 _inputs_flat = [features, max_value, min_features, max_features] 

11084 _attrs = ("Tinput", _attr_Tinput, "out_type", out_type) 

11085 _result = _execute.execute(b"QuantizedReluX", 3, inputs=_inputs_flat, 

11086 attrs=_attrs, ctx=ctx, name=name) 

11087 if _execute.must_record_gradient(): 

11088 _execute.record_gradient( 

11089 "QuantizedReluX", _inputs_flat, _attrs, _result) 

11090 _result = _QuantizedReluXOutput._make(_result) 

11091 return _result 

11092 

11093 

11094@_dispatch.add_fallback_dispatch_list 

11095@_dispatch.add_type_based_api_dispatcher 

11096@tf_export('nn.relu') 

11097def relu(features, name=None): 

11098 r"""Computes rectified linear: `max(features, 0)`. 

11099 

11100 See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) 

11101 Example usage: 

11102 >>> tf.nn.relu([-2., 0., 3.]).numpy() 

11103 array([0., 0., 3.], dtype=float32) 

11104 

11105 Args: 

11106 features: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`, `qint8`. 

11107 name: A name for the operation (optional). 

11108 

11109 Returns: 

11110 A `Tensor`. Has the same type as `features`. 

11111 """ 

11112 _ctx = _context._context or _context.context() 

11113 tld = _ctx._thread_local_data 

11114 if tld.is_eager: 

11115 try: 

11116 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11117 _ctx, "Relu", name, features) 

11118 return _result 

11119 except _core._NotOkStatusException as e: 

11120 _ops.raise_from_not_ok_status(e, name) 

11121 except _core._FallbackException: 

11122 pass 

11123 try: 

11124 _result = _dispatcher_for_relu( 

11125 (features, name,), None) 

11126 if _result is not NotImplemented: 

11127 return _result 

11128 return relu_eager_fallback( 

11129 features, name=name, ctx=_ctx) 

11130 except _core._SymbolicException: 

11131 pass # Add nodes to the TensorFlow graph. 

11132 except (TypeError, ValueError): 

11133 _result = _dispatch.dispatch( 

11134 relu, (), dict(features=features, name=name) 

11135 ) 

11136 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

11137 return _result 

11138 raise 

11139 else: 

11140 _result = _dispatcher_for_relu( 

11141 (features, name,), None) 

11142 if _result is not NotImplemented: 

11143 return _result 

11144 # Add nodes to the TensorFlow graph. 

11145 try: 

11146 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11147 "Relu", features=features, name=name) 

11148 except (TypeError, ValueError): 

11149 _result = _dispatch.dispatch( 

11150 relu, (), dict(features=features, name=name) 

11151 ) 

11152 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

11153 return _result 

11154 raise 

11155 _result = _outputs[:] 

11156 if _execute.must_record_gradient(): 

11157 _attrs = ("T", _op._get_attr_type("T")) 

11158 _inputs_flat = _op.inputs 

11159 _execute.record_gradient( 

11160 "Relu", _inputs_flat, _attrs, _result) 

11161 _result, = _result 

11162 return _result 

11163 

11164Relu = tf_export("raw_ops.Relu")(_ops.to_raw_op(relu)) 

11165_dispatcher_for_relu = relu._tf_type_based_dispatcher.Dispatch 

11166 

11167 

11168def relu_eager_fallback(features, name, ctx): 

11169 _attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.qint8, ]) 

11170 _inputs_flat = [features] 

11171 _attrs = ("T", _attr_T) 

11172 _result = _execute.execute(b"Relu", 1, inputs=_inputs_flat, attrs=_attrs, 

11173 ctx=ctx, name=name) 

11174 if _execute.must_record_gradient(): 

11175 _execute.record_gradient( 

11176 "Relu", _inputs_flat, _attrs, _result) 

11177 _result, = _result 

11178 return _result 

11179 

11180 

11181def relu6(features, name=None): 

11182 r"""Computes rectified linear 6: `min(max(features, 0), 6)`. 

11183 

11184 Args: 

11185 features: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

11186 name: A name for the operation (optional). 

11187 

11188 Returns: 

11189 A `Tensor`. Has the same type as `features`. 

11190 """ 

11191 _ctx = _context._context or _context.context() 

11192 tld = _ctx._thread_local_data 

11193 if tld.is_eager: 

11194 try: 

11195 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11196 _ctx, "Relu6", name, features) 

11197 return _result 

11198 except _core._NotOkStatusException as e: 

11199 _ops.raise_from_not_ok_status(e, name) 

11200 except _core._FallbackException: 

11201 pass 

11202 try: 

11203 return relu6_eager_fallback( 

11204 features, name=name, ctx=_ctx) 

11205 except _core._SymbolicException: 

11206 pass # Add nodes to the TensorFlow graph. 

11207 # Add nodes to the TensorFlow graph. 

11208 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11209 "Relu6", features=features, name=name) 

11210 _result = _outputs[:] 

11211 if _execute.must_record_gradient(): 

11212 _attrs = ("T", _op._get_attr_type("T")) 

11213 _inputs_flat = _op.inputs 

11214 _execute.record_gradient( 

11215 "Relu6", _inputs_flat, _attrs, _result) 

11216 _result, = _result 

11217 return _result 

11218 

11219Relu6 = tf_export("raw_ops.Relu6")(_ops.to_raw_op(relu6)) 

11220 

11221 

11222def relu6_eager_fallback(features, name, ctx): 

11223 _attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

11224 _inputs_flat = [features] 

11225 _attrs = ("T", _attr_T) 

11226 _result = _execute.execute(b"Relu6", 1, inputs=_inputs_flat, attrs=_attrs, 

11227 ctx=ctx, name=name) 

11228 if _execute.must_record_gradient(): 

11229 _execute.record_gradient( 

11230 "Relu6", _inputs_flat, _attrs, _result) 

11231 _result, = _result 

11232 return _result 

11233 

11234 

11235def relu6_grad(gradients, features, name=None): 

11236 r"""Computes rectified linear 6 gradients for a Relu6 operation. 

11237 

11238 Args: 

11239 gradients: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

11240 The backpropagated gradients to the corresponding Relu6 operation. 

11241 features: A `Tensor`. Must have the same type as `gradients`. 

11242 The features passed as input to the corresponding Relu6 operation, or 

11243 its output; using either one produces the same result. 

11244 name: A name for the operation (optional). 

11245 

11246 Returns: 

11247 A `Tensor`. Has the same type as `gradients`. 

11248 """ 

11249 _ctx = _context._context or _context.context() 

11250 tld = _ctx._thread_local_data 

11251 if tld.is_eager: 

11252 try: 

11253 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11254 _ctx, "Relu6Grad", name, gradients, features) 

11255 return _result 

11256 except _core._NotOkStatusException as e: 

11257 _ops.raise_from_not_ok_status(e, name) 

11258 except _core._FallbackException: 

11259 pass 

11260 try: 

11261 return relu6_grad_eager_fallback( 

11262 gradients, features, name=name, ctx=_ctx) 

11263 except _core._SymbolicException: 

11264 pass # Add nodes to the TensorFlow graph. 

11265 # Add nodes to the TensorFlow graph. 

11266 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11267 "Relu6Grad", gradients=gradients, features=features, name=name) 

11268 _result = _outputs[:] 

11269 if _execute.must_record_gradient(): 

11270 _attrs = ("T", _op._get_attr_type("T")) 

11271 _inputs_flat = _op.inputs 

11272 _execute.record_gradient( 

11273 "Relu6Grad", _inputs_flat, _attrs, _result) 

11274 _result, = _result 

11275 return _result 

11276 

11277Relu6Grad = tf_export("raw_ops.Relu6Grad")(_ops.to_raw_op(relu6_grad)) 

11278 

11279 

11280def relu6_grad_eager_fallback(gradients, features, name, ctx): 

11281 _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

11282 (gradients, features) = _inputs_T 

11283 _inputs_flat = [gradients, features] 

11284 _attrs = ("T", _attr_T) 

11285 _result = _execute.execute(b"Relu6Grad", 1, inputs=_inputs_flat, 

11286 attrs=_attrs, ctx=ctx, name=name) 

11287 if _execute.must_record_gradient(): 

11288 _execute.record_gradient( 

11289 "Relu6Grad", _inputs_flat, _attrs, _result) 

11290 _result, = _result 

11291 return _result 

11292 

11293 

11294def relu_grad(gradients, features, name=None): 

11295 r"""Computes rectified linear gradients for a Relu operation. 

11296 

11297 Args: 

11298 gradients: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

11299 The backpropagated gradients to the corresponding Relu operation. 

11300 features: A `Tensor`. Must have the same type as `gradients`. 

11301 The features passed as input to the corresponding Relu operation, OR 

11302 the outputs of that operation (both work equivalently). 

11303 name: A name for the operation (optional). 

11304 

11305 Returns: 

11306 A `Tensor`. Has the same type as `gradients`. 

11307 """ 

11308 _ctx = _context._context or _context.context() 

11309 tld = _ctx._thread_local_data 

11310 if tld.is_eager: 

11311 try: 

11312 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11313 _ctx, "ReluGrad", name, gradients, features) 

11314 return _result 

11315 except _core._NotOkStatusException as e: 

11316 _ops.raise_from_not_ok_status(e, name) 

11317 except _core._FallbackException: 

11318 pass 

11319 try: 

11320 return relu_grad_eager_fallback( 

11321 gradients, features, name=name, ctx=_ctx) 

11322 except _core._SymbolicException: 

11323 pass # Add nodes to the TensorFlow graph. 

11324 # Add nodes to the TensorFlow graph. 

11325 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11326 "ReluGrad", gradients=gradients, features=features, name=name) 

11327 _result = _outputs[:] 

11328 if _execute.must_record_gradient(): 

11329 _attrs = ("T", _op._get_attr_type("T")) 

11330 _inputs_flat = _op.inputs 

11331 _execute.record_gradient( 

11332 "ReluGrad", _inputs_flat, _attrs, _result) 

11333 _result, = _result 

11334 return _result 

11335 

11336ReluGrad = tf_export("raw_ops.ReluGrad")(_ops.to_raw_op(relu_grad)) 

11337 

11338 

11339def relu_grad_eager_fallback(gradients, features, name, ctx): 

11340 _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

11341 (gradients, features) = _inputs_T 

11342 _inputs_flat = [gradients, features] 

11343 _attrs = ("T", _attr_T) 

11344 _result = _execute.execute(b"ReluGrad", 1, inputs=_inputs_flat, 

11345 attrs=_attrs, ctx=ctx, name=name) 

11346 if _execute.must_record_gradient(): 

11347 _execute.record_gradient( 

11348 "ReluGrad", _inputs_flat, _attrs, _result) 

11349 _result, = _result 

11350 return _result 

11351 

11352 

11353@_dispatch.add_fallback_dispatch_list 

11354@_dispatch.add_type_based_api_dispatcher 

11355@tf_export('nn.selu') 

11356def selu(features, name=None): 

11357 r"""Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` 

11358 

11359 if < 0, `scale * features` otherwise. 

11360 

11361 To be used together with 

11362 `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. 

11363 For correct dropout, use `tf.contrib.nn.alpha_dropout`. 

11364 

11365 See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) 

11366 

11367 Args: 

11368 features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

11369 name: A name for the operation (optional). 

11370 

11371 Returns: 

11372 A `Tensor`. Has the same type as `features`. 

11373 """ 

11374 _ctx = _context._context or _context.context() 

11375 tld = _ctx._thread_local_data 

11376 if tld.is_eager: 

11377 try: 

11378 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11379 _ctx, "Selu", name, features) 

11380 return _result 

11381 except _core._NotOkStatusException as e: 

11382 _ops.raise_from_not_ok_status(e, name) 

11383 except _core._FallbackException: 

11384 pass 

11385 try: 

11386 _result = _dispatcher_for_selu( 

11387 (features, name,), None) 

11388 if _result is not NotImplemented: 

11389 return _result 

11390 return selu_eager_fallback( 

11391 features, name=name, ctx=_ctx) 

11392 except _core._SymbolicException: 

11393 pass # Add nodes to the TensorFlow graph. 

11394 except (TypeError, ValueError): 

11395 _result = _dispatch.dispatch( 

11396 selu, (), dict(features=features, name=name) 

11397 ) 

11398 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

11399 return _result 

11400 raise 

11401 else: 

11402 _result = _dispatcher_for_selu( 

11403 (features, name,), None) 

11404 if _result is not NotImplemented: 

11405 return _result 

11406 # Add nodes to the TensorFlow graph. 

11407 try: 

11408 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11409 "Selu", features=features, name=name) 

11410 except (TypeError, ValueError): 

11411 _result = _dispatch.dispatch( 

11412 selu, (), dict(features=features, name=name) 

11413 ) 

11414 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

11415 return _result 

11416 raise 

11417 _result = _outputs[:] 

11418 if _execute.must_record_gradient(): 

11419 _attrs = ("T", _op._get_attr_type("T")) 

11420 _inputs_flat = _op.inputs 

11421 _execute.record_gradient( 

11422 "Selu", _inputs_flat, _attrs, _result) 

11423 _result, = _result 

11424 return _result 

11425 

11426Selu = tf_export("raw_ops.Selu")(_ops.to_raw_op(selu)) 

11427_dispatcher_for_selu = selu._tf_type_based_dispatcher.Dispatch 

11428 

11429 

11430def selu_eager_fallback(features, name, ctx): 

11431 _attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

11432 _inputs_flat = [features] 

11433 _attrs = ("T", _attr_T) 

11434 _result = _execute.execute(b"Selu", 1, inputs=_inputs_flat, attrs=_attrs, 

11435 ctx=ctx, name=name) 

11436 if _execute.must_record_gradient(): 

11437 _execute.record_gradient( 

11438 "Selu", _inputs_flat, _attrs, _result) 

11439 _result, = _result 

11440 return _result 

11441 

11442 

11443def selu_grad(gradients, outputs, name=None): 

11444 r"""Computes gradients for the scaled exponential linear (Selu) operation. 

11445 

11446 Args: 

11447 gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

11448 The backpropagated gradients to the corresponding Selu operation. 

11449 outputs: A `Tensor`. Must have the same type as `gradients`. 

11450 The outputs of the corresponding Selu operation. 

11451 name: A name for the operation (optional). 

11452 

11453 Returns: 

11454 A `Tensor`. Has the same type as `gradients`. 

11455 """ 

11456 _ctx = _context._context or _context.context() 

11457 tld = _ctx._thread_local_data 

11458 if tld.is_eager: 

11459 try: 

11460 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11461 _ctx, "SeluGrad", name, gradients, outputs) 

11462 return _result 

11463 except _core._NotOkStatusException as e: 

11464 _ops.raise_from_not_ok_status(e, name) 

11465 except _core._FallbackException: 

11466 pass 

11467 try: 

11468 return selu_grad_eager_fallback( 

11469 gradients, outputs, name=name, ctx=_ctx) 

11470 except _core._SymbolicException: 

11471 pass # Add nodes to the TensorFlow graph. 

11472 # Add nodes to the TensorFlow graph. 

11473 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11474 "SeluGrad", gradients=gradients, outputs=outputs, name=name) 

11475 _result = _outputs[:] 

11476 if _execute.must_record_gradient(): 

11477 _attrs = ("T", _op._get_attr_type("T")) 

11478 _inputs_flat = _op.inputs 

11479 _execute.record_gradient( 

11480 "SeluGrad", _inputs_flat, _attrs, _result) 

11481 _result, = _result 

11482 return _result 

11483 

11484SeluGrad = tf_export("raw_ops.SeluGrad")(_ops.to_raw_op(selu_grad)) 

11485 

11486 

11487def selu_grad_eager_fallback(gradients, outputs, name, ctx): 

11488 _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, outputs], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

11489 (gradients, outputs) = _inputs_T 

11490 _inputs_flat = [gradients, outputs] 

11491 _attrs = ("T", _attr_T) 

11492 _result = _execute.execute(b"SeluGrad", 1, inputs=_inputs_flat, 

11493 attrs=_attrs, ctx=ctx, name=name) 

11494 if _execute.must_record_gradient(): 

11495 _execute.record_gradient( 

11496 "SeluGrad", _inputs_flat, _attrs, _result) 

11497 _result, = _result 

11498 return _result 

11499 

11500 

11501def softmax(logits, name=None): 

11502 r"""Computes softmax activations. 

11503 

11504 For each batch `i` and class `j` we have 

11505 

11506 $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ 

11507 

11508 Args: 

11509 logits: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

11510 2-D with shape `[batch_size, num_classes]`. 

11511 name: A name for the operation (optional). 

11512 

11513 Returns: 

11514 A `Tensor`. Has the same type as `logits`. 

11515 """ 

11516 _ctx = _context._context or _context.context() 

11517 tld = _ctx._thread_local_data 

11518 if tld.is_eager: 

11519 try: 

11520 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11521 _ctx, "Softmax", name, logits) 

11522 return _result 

11523 except _core._NotOkStatusException as e: 

11524 _ops.raise_from_not_ok_status(e, name) 

11525 except _core._FallbackException: 

11526 pass 

11527 try: 

11528 return softmax_eager_fallback( 

11529 logits, name=name, ctx=_ctx) 

11530 except _core._SymbolicException: 

11531 pass # Add nodes to the TensorFlow graph. 

11532 # Add nodes to the TensorFlow graph. 

11533 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11534 "Softmax", logits=logits, name=name) 

11535 _result = _outputs[:] 

11536 if _execute.must_record_gradient(): 

11537 _attrs = ("T", _op._get_attr_type("T")) 

11538 _inputs_flat = _op.inputs 

11539 _execute.record_gradient( 

11540 "Softmax", _inputs_flat, _attrs, _result) 

11541 _result, = _result 

11542 return _result 

11543 

11544Softmax = tf_export("raw_ops.Softmax")(_ops.to_raw_op(softmax)) 

11545 

11546 

11547def softmax_eager_fallback(logits, name, ctx): 

11548 _attr_T, (logits,) = _execute.args_to_matching_eager([logits], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

11549 _inputs_flat = [logits] 

11550 _attrs = ("T", _attr_T) 

11551 _result = _execute.execute(b"Softmax", 1, inputs=_inputs_flat, attrs=_attrs, 

11552 ctx=ctx, name=name) 

11553 if _execute.must_record_gradient(): 

11554 _execute.record_gradient( 

11555 "Softmax", _inputs_flat, _attrs, _result) 

11556 _result, = _result 

11557 return _result 

11558 

11559_SoftmaxCrossEntropyWithLogitsOutput = collections.namedtuple( 

11560 "SoftmaxCrossEntropyWithLogits", 

11561 ["loss", "backprop"]) 

11562 

11563 

11564def softmax_cross_entropy_with_logits(features, labels, name=None): 

11565 r"""Computes softmax cross entropy cost and gradients to backpropagate. 

11566 

11567 Inputs are the logits, not probabilities. 

11568 

11569 Args: 

11570 features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

11571 batch_size x num_classes matrix 

11572 labels: A `Tensor`. Must have the same type as `features`. 

11573 batch_size x num_classes matrix 

11574 The caller must ensure that each batch of labels represents a valid 

11575 probability distribution. 

11576 name: A name for the operation (optional). 

11577 

11578 Returns: 

11579 A tuple of `Tensor` objects (loss, backprop). 

11580 

11581 loss: A `Tensor`. Has the same type as `features`. 

11582 backprop: A `Tensor`. Has the same type as `features`. 

11583 """ 

11584 _ctx = _context._context or _context.context() 

11585 tld = _ctx._thread_local_data 

11586 if tld.is_eager: 

11587 try: 

11588 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11589 _ctx, "SoftmaxCrossEntropyWithLogits", name, features, labels) 

11590 _result = _SoftmaxCrossEntropyWithLogitsOutput._make(_result) 

11591 return _result 

11592 except _core._NotOkStatusException as e: 

11593 _ops.raise_from_not_ok_status(e, name) 

11594 except _core._FallbackException: 

11595 pass 

11596 try: 

11597 return softmax_cross_entropy_with_logits_eager_fallback( 

11598 features, labels, name=name, ctx=_ctx) 

11599 except _core._SymbolicException: 

11600 pass # Add nodes to the TensorFlow graph. 

11601 # Add nodes to the TensorFlow graph. 

11602 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11603 "SoftmaxCrossEntropyWithLogits", features=features, labels=labels, 

11604 name=name) 

11605 _result = _outputs[:] 

11606 if _execute.must_record_gradient(): 

11607 _attrs = ("T", _op._get_attr_type("T")) 

11608 _inputs_flat = _op.inputs 

11609 _execute.record_gradient( 

11610 "SoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result) 

11611 _result = _SoftmaxCrossEntropyWithLogitsOutput._make(_result) 

11612 return _result 

11613 

11614SoftmaxCrossEntropyWithLogits = tf_export("raw_ops.SoftmaxCrossEntropyWithLogits")(_ops.to_raw_op(softmax_cross_entropy_with_logits)) 

11615 

11616 

11617def softmax_cross_entropy_with_logits_eager_fallback(features, labels, name, ctx): 

11618 _attr_T, _inputs_T = _execute.args_to_matching_eager([features, labels], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

11619 (features, labels) = _inputs_T 

11620 _inputs_flat = [features, labels] 

11621 _attrs = ("T", _attr_T) 

11622 _result = _execute.execute(b"SoftmaxCrossEntropyWithLogits", 2, 

11623 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

11624 name=name) 

11625 if _execute.must_record_gradient(): 

11626 _execute.record_gradient( 

11627 "SoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result) 

11628 _result = _SoftmaxCrossEntropyWithLogitsOutput._make(_result) 

11629 return _result 

11630 

11631 

11632def softplus(features, name=None): 

11633 r"""TODO: add doc. 

11634 

11635 Args: 

11636 features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

11637 name: A name for the operation (optional). 

11638 

11639 Returns: 

11640 A `Tensor`. Has the same type as `features`. 

11641 """ 

11642 _ctx = _context._context or _context.context() 

11643 tld = _ctx._thread_local_data 

11644 if tld.is_eager: 

11645 try: 

11646 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11647 _ctx, "Softplus", name, features) 

11648 return _result 

11649 except _core._NotOkStatusException as e: 

11650 _ops.raise_from_not_ok_status(e, name) 

11651 except _core._FallbackException: 

11652 pass 

11653 try: 

11654 return softplus_eager_fallback( 

11655 features, name=name, ctx=_ctx) 

11656 except _core._SymbolicException: 

11657 pass # Add nodes to the TensorFlow graph. 

11658 # Add nodes to the TensorFlow graph. 

11659 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11660 "Softplus", features=features, name=name) 

11661 _result = _outputs[:] 

11662 if _execute.must_record_gradient(): 

11663 _attrs = ("T", _op._get_attr_type("T")) 

11664 _inputs_flat = _op.inputs 

11665 _execute.record_gradient( 

11666 "Softplus", _inputs_flat, _attrs, _result) 

11667 _result, = _result 

11668 return _result 

11669 

11670Softplus = tf_export("raw_ops.Softplus")(_ops.to_raw_op(softplus)) 

11671 

11672 

11673def softplus_eager_fallback(features, name, ctx): 

11674 _attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

11675 _inputs_flat = [features] 

11676 _attrs = ("T", _attr_T) 

11677 _result = _execute.execute(b"Softplus", 1, inputs=_inputs_flat, 

11678 attrs=_attrs, ctx=ctx, name=name) 

11679 if _execute.must_record_gradient(): 

11680 _execute.record_gradient( 

11681 "Softplus", _inputs_flat, _attrs, _result) 

11682 _result, = _result 

11683 return _result 

11684 

11685 

11686def softplus_grad(gradients, features, name=None): 

11687 r"""Computes softplus gradients for a softplus operation. 

11688 

11689 Args: 

11690 gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

11691 The backpropagated gradients to the corresponding softplus operation. 

11692 features: A `Tensor`. Must have the same type as `gradients`. 

11693 The features passed as input to the corresponding softplus operation. 

11694 name: A name for the operation (optional). 

11695 

11696 Returns: 

11697 A `Tensor`. Has the same type as `gradients`. 

11698 """ 

11699 _ctx = _context._context or _context.context() 

11700 tld = _ctx._thread_local_data 

11701 if tld.is_eager: 

11702 try: 

11703 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11704 _ctx, "SoftplusGrad", name, gradients, features) 

11705 return _result 

11706 except _core._NotOkStatusException as e: 

11707 _ops.raise_from_not_ok_status(e, name) 

11708 except _core._FallbackException: 

11709 pass 

11710 try: 

11711 return softplus_grad_eager_fallback( 

11712 gradients, features, name=name, ctx=_ctx) 

11713 except _core._SymbolicException: 

11714 pass # Add nodes to the TensorFlow graph. 

11715 # Add nodes to the TensorFlow graph. 

11716 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11717 "SoftplusGrad", gradients=gradients, features=features, name=name) 

11718 _result = _outputs[:] 

11719 if _execute.must_record_gradient(): 

11720 _attrs = ("T", _op._get_attr_type("T")) 

11721 _inputs_flat = _op.inputs 

11722 _execute.record_gradient( 

11723 "SoftplusGrad", _inputs_flat, _attrs, _result) 

11724 _result, = _result 

11725 return _result 

11726 

11727SoftplusGrad = tf_export("raw_ops.SoftplusGrad")(_ops.to_raw_op(softplus_grad)) 

11728 

11729 

11730def softplus_grad_eager_fallback(gradients, features, name, ctx): 

11731 _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

11732 (gradients, features) = _inputs_T 

11733 _inputs_flat = [gradients, features] 

11734 _attrs = ("T", _attr_T) 

11735 _result = _execute.execute(b"SoftplusGrad", 1, inputs=_inputs_flat, 

11736 attrs=_attrs, ctx=ctx, name=name) 

11737 if _execute.must_record_gradient(): 

11738 _execute.record_gradient( 

11739 "SoftplusGrad", _inputs_flat, _attrs, _result) 

11740 _result, = _result 

11741 return _result 

11742 

11743 

11744@_dispatch.add_fallback_dispatch_list 

11745@_dispatch.add_type_based_api_dispatcher 

11746@tf_export('nn.softsign', 'math.softsign') 

11747def softsign(features, name=None): 

11748 r"""Computes softsign: `features / (abs(features) + 1)`. 

11749 

11750 Args: 

11751 features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

11752 name: A name for the operation (optional). 

11753 

11754 Returns: 

11755 A `Tensor`. Has the same type as `features`. 

11756 """ 

11757 _ctx = _context._context or _context.context() 

11758 tld = _ctx._thread_local_data 

11759 if tld.is_eager: 

11760 try: 

11761 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11762 _ctx, "Softsign", name, features) 

11763 return _result 

11764 except _core._NotOkStatusException as e: 

11765 _ops.raise_from_not_ok_status(e, name) 

11766 except _core._FallbackException: 

11767 pass 

11768 try: 

11769 _result = _dispatcher_for_softsign( 

11770 (features, name,), None) 

11771 if _result is not NotImplemented: 

11772 return _result 

11773 return softsign_eager_fallback( 

11774 features, name=name, ctx=_ctx) 

11775 except _core._SymbolicException: 

11776 pass # Add nodes to the TensorFlow graph. 

11777 except (TypeError, ValueError): 

11778 _result = _dispatch.dispatch( 

11779 softsign, (), dict(features=features, name=name) 

11780 ) 

11781 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

11782 return _result 

11783 raise 

11784 else: 

11785 _result = _dispatcher_for_softsign( 

11786 (features, name,), None) 

11787 if _result is not NotImplemented: 

11788 return _result 

11789 # Add nodes to the TensorFlow graph. 

11790 try: 

11791 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11792 "Softsign", features=features, name=name) 

11793 except (TypeError, ValueError): 

11794 _result = _dispatch.dispatch( 

11795 softsign, (), dict(features=features, name=name) 

11796 ) 

11797 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: 

11798 return _result 

11799 raise 

11800 _result = _outputs[:] 

11801 if _execute.must_record_gradient(): 

11802 _attrs = ("T", _op._get_attr_type("T")) 

11803 _inputs_flat = _op.inputs 

11804 _execute.record_gradient( 

11805 "Softsign", _inputs_flat, _attrs, _result) 

11806 _result, = _result 

11807 return _result 

11808 

11809Softsign = tf_export("raw_ops.Softsign")(_ops.to_raw_op(softsign)) 

11810_dispatcher_for_softsign = softsign._tf_type_based_dispatcher.Dispatch 

11811 

11812 

11813def softsign_eager_fallback(features, name, ctx): 

11814 _attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

11815 _inputs_flat = [features] 

11816 _attrs = ("T", _attr_T) 

11817 _result = _execute.execute(b"Softsign", 1, inputs=_inputs_flat, 

11818 attrs=_attrs, ctx=ctx, name=name) 

11819 if _execute.must_record_gradient(): 

11820 _execute.record_gradient( 

11821 "Softsign", _inputs_flat, _attrs, _result) 

11822 _result, = _result 

11823 return _result 

11824 

11825 

11826def softsign_grad(gradients, features, name=None): 

11827 r"""Computes softsign gradients for a softsign operation. 

11828 

11829 Args: 

11830 gradients: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

11831 The backpropagated gradients to the corresponding softsign operation. 

11832 features: A `Tensor`. Must have the same type as `gradients`. 

11833 The features passed as input to the corresponding softsign operation. 

11834 name: A name for the operation (optional). 

11835 

11836 Returns: 

11837 A `Tensor`. Has the same type as `gradients`. 

11838 """ 

11839 _ctx = _context._context or _context.context() 

11840 tld = _ctx._thread_local_data 

11841 if tld.is_eager: 

11842 try: 

11843 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11844 _ctx, "SoftsignGrad", name, gradients, features) 

11845 return _result 

11846 except _core._NotOkStatusException as e: 

11847 _ops.raise_from_not_ok_status(e, name) 

11848 except _core._FallbackException: 

11849 pass 

11850 try: 

11851 return softsign_grad_eager_fallback( 

11852 gradients, features, name=name, ctx=_ctx) 

11853 except _core._SymbolicException: 

11854 pass # Add nodes to the TensorFlow graph. 

11855 # Add nodes to the TensorFlow graph. 

11856 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11857 "SoftsignGrad", gradients=gradients, features=features, name=name) 

11858 _result = _outputs[:] 

11859 if _execute.must_record_gradient(): 

11860 _attrs = ("T", _op._get_attr_type("T")) 

11861 _inputs_flat = _op.inputs 

11862 _execute.record_gradient( 

11863 "SoftsignGrad", _inputs_flat, _attrs, _result) 

11864 _result, = _result 

11865 return _result 

11866 

11867SoftsignGrad = tf_export("raw_ops.SoftsignGrad")(_ops.to_raw_op(softsign_grad)) 

11868 

11869 

11870def softsign_grad_eager_fallback(gradients, features, name, ctx): 

11871 _attr_T, _inputs_T = _execute.args_to_matching_eager([gradients, features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

11872 (gradients, features) = _inputs_T 

11873 _inputs_flat = [gradients, features] 

11874 _attrs = ("T", _attr_T) 

11875 _result = _execute.execute(b"SoftsignGrad", 1, inputs=_inputs_flat, 

11876 attrs=_attrs, ctx=ctx, name=name) 

11877 if _execute.must_record_gradient(): 

11878 _execute.record_gradient( 

11879 "SoftsignGrad", _inputs_flat, _attrs, _result) 

11880 _result, = _result 

11881 return _result 

11882 

11883_SparseSoftmaxCrossEntropyWithLogitsOutput = collections.namedtuple( 

11884 "SparseSoftmaxCrossEntropyWithLogits", 

11885 ["loss", "backprop"]) 

11886 

11887 

11888def sparse_softmax_cross_entropy_with_logits(features, labels, name=None): 

11889 r"""Computes softmax cross entropy cost and gradients to backpropagate. 

11890 

11891 Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept 

11892 a matrix of label probabilities, but rather a single label per row 

11893 of features. This label is considered to have probability 1.0 for the 

11894 given row. 

11895 

11896 Inputs are the logits, not probabilities. 

11897 

11898 Args: 

11899 features: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `float64`. 

11900 batch_size x num_classes matrix 

11901 labels: A `Tensor`. Must be one of the following types: `int32`, `int64`. 

11902 batch_size vector with values in [0, num_classes). 

11903 This is the label for the given minibatch entry. 

11904 name: A name for the operation (optional). 

11905 

11906 Returns: 

11907 A tuple of `Tensor` objects (loss, backprop). 

11908 

11909 loss: A `Tensor`. Has the same type as `features`. 

11910 backprop: A `Tensor`. Has the same type as `features`. 

11911 """ 

11912 _ctx = _context._context or _context.context() 

11913 tld = _ctx._thread_local_data 

11914 if tld.is_eager: 

11915 try: 

11916 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

11917 _ctx, "SparseSoftmaxCrossEntropyWithLogits", name, features, labels) 

11918 _result = _SparseSoftmaxCrossEntropyWithLogitsOutput._make(_result) 

11919 return _result 

11920 except _core._NotOkStatusException as e: 

11921 _ops.raise_from_not_ok_status(e, name) 

11922 except _core._FallbackException: 

11923 pass 

11924 try: 

11925 return sparse_softmax_cross_entropy_with_logits_eager_fallback( 

11926 features, labels, name=name, ctx=_ctx) 

11927 except _core._SymbolicException: 

11928 pass # Add nodes to the TensorFlow graph. 

11929 # Add nodes to the TensorFlow graph. 

11930 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

11931 "SparseSoftmaxCrossEntropyWithLogits", features=features, 

11932 labels=labels, name=name) 

11933 _result = _outputs[:] 

11934 if _execute.must_record_gradient(): 

11935 _attrs = ("T", _op._get_attr_type("T"), "Tlabels", 

11936 _op._get_attr_type("Tlabels")) 

11937 _inputs_flat = _op.inputs 

11938 _execute.record_gradient( 

11939 "SparseSoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result) 

11940 _result = _SparseSoftmaxCrossEntropyWithLogitsOutput._make(_result) 

11941 return _result 

11942 

11943SparseSoftmaxCrossEntropyWithLogits = tf_export("raw_ops.SparseSoftmaxCrossEntropyWithLogits")(_ops.to_raw_op(sparse_softmax_cross_entropy_with_logits)) 

11944 

11945 

11946def sparse_softmax_cross_entropy_with_logits_eager_fallback(features, labels, name, ctx): 

11947 _attr_T, (features,) = _execute.args_to_matching_eager([features], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.float64, ]) 

11948 _attr_Tlabels, (labels,) = _execute.args_to_matching_eager([labels], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64) 

11949 _inputs_flat = [features, labels] 

11950 _attrs = ("T", _attr_T, "Tlabels", _attr_Tlabels) 

11951 _result = _execute.execute(b"SparseSoftmaxCrossEntropyWithLogits", 2, 

11952 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

11953 name=name) 

11954 if _execute.must_record_gradient(): 

11955 _execute.record_gradient( 

11956 "SparseSoftmaxCrossEntropyWithLogits", _inputs_flat, _attrs, _result) 

11957 _result = _SparseSoftmaxCrossEntropyWithLogitsOutput._make(_result) 

11958 return _result 

11959 

11960_TopKOutput = collections.namedtuple( 

11961 "TopK", 

11962 ["values", "indices"]) 

11963 

11964 

11965def top_k(input, k, sorted=True, name=None): 

11966 r"""Finds values and indices of the `k` largest elements for the last dimension. 

11967 

11968 If the input is a vector (rank-1), finds the `k` largest entries in the vector 

11969 and outputs their values and indices as vectors. Thus `values[j]` is the 

11970 `j`-th largest entry in `input`, and its index is `indices[j]`. 

11971 

11972 For matrices (resp. higher rank input), computes the top `k` entries in each 

11973 row (resp. vector along the last dimension). Thus, 

11974 

11975 values.shape = indices.shape = input.shape[:-1] + [k] 

11976 

11977 If two elements are equal, the lower-index element appears first. 

11978 

11979 If `k` varies dynamically, use `TopKV2` below. 

11980 

11981 Args: 

11982 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

11983 1-D or higher with last dimension at least `k`. 

11984 k: An `int` that is `>= 0`. 

11985 Number of top elements to look for along the last dimension (along each 

11986 row for matrices). 

11987 sorted: An optional `bool`. Defaults to `True`. 

11988 If true the resulting `k` elements will be sorted by the values in 

11989 descending order. 

11990 name: A name for the operation (optional). 

11991 

11992 Returns: 

11993 A tuple of `Tensor` objects (values, indices). 

11994 

11995 values: A `Tensor`. Has the same type as `input`. 

11996 indices: A `Tensor` of type `int32`. 

11997 """ 

11998 _ctx = _context._context or _context.context() 

11999 tld = _ctx._thread_local_data 

12000 if tld.is_eager: 

12001 try: 

12002 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

12003 _ctx, "TopK", name, input, "k", k, "sorted", sorted) 

12004 _result = _TopKOutput._make(_result) 

12005 return _result 

12006 except _core._NotOkStatusException as e: 

12007 _ops.raise_from_not_ok_status(e, name) 

12008 except _core._FallbackException: 

12009 pass 

12010 try: 

12011 return top_k_eager_fallback( 

12012 input, k=k, sorted=sorted, name=name, ctx=_ctx) 

12013 except _core._SymbolicException: 

12014 pass # Add nodes to the TensorFlow graph. 

12015 # Add nodes to the TensorFlow graph. 

12016 k = _execute.make_int(k, "k") 

12017 if sorted is None: 

12018 sorted = True 

12019 sorted = _execute.make_bool(sorted, "sorted") 

12020 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

12021 "TopK", input=input, k=k, sorted=sorted, name=name) 

12022 _result = _outputs[:] 

12023 if _execute.must_record_gradient(): 

12024 _attrs = ("k", _op._get_attr_int("k"), "sorted", 

12025 _op._get_attr_bool("sorted"), "T", _op._get_attr_type("T")) 

12026 _inputs_flat = _op.inputs 

12027 _execute.record_gradient( 

12028 "TopK", _inputs_flat, _attrs, _result) 

12029 _result = _TopKOutput._make(_result) 

12030 return _result 

12031 

12032TopK = tf_export("raw_ops.TopK")(_ops.to_raw_op(top_k)) 

12033 

12034 

12035def top_k_eager_fallback(input, k, sorted, name, ctx): 

12036 k = _execute.make_int(k, "k") 

12037 if sorted is None: 

12038 sorted = True 

12039 sorted = _execute.make_bool(sorted, "sorted") 

12040 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

12041 _inputs_flat = [input] 

12042 _attrs = ("k", k, "sorted", sorted, "T", _attr_T) 

12043 _result = _execute.execute(b"TopK", 2, inputs=_inputs_flat, attrs=_attrs, 

12044 ctx=ctx, name=name) 

12045 if _execute.must_record_gradient(): 

12046 _execute.record_gradient( 

12047 "TopK", _inputs_flat, _attrs, _result) 

12048 _result = _TopKOutput._make(_result) 

12049 return _result 

12050 

12051_TopKV2Output = collections.namedtuple( 

12052 "TopKV2", 

12053 ["values", "indices"]) 

12054 

12055 

12056def top_kv2(input, k, sorted=True, index_type=_dtypes.int32, name=None): 

12057 r"""Finds values and indices of the `k` largest elements for the last dimension. 

12058 

12059 If the input is a vector (rank-1), finds the `k` largest entries in the vector 

12060 and outputs their values and indices as vectors. Thus `values[j]` is the 

12061 `j`-th largest entry in `input`, and its index is `indices[j]`. 

12062 

12063 For matrices (resp. higher rank input), computes the top `k` entries in each 

12064 row (resp. vector along the last dimension). Thus, 

12065 

12066 values.shape = indices.shape = input.shape[:-1] + [k] 

12067 

12068 If two elements are equal, the lower-index element appears first. 

12069 

12070 Args: 

12071 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 

12072 1-D or higher with last dimension at least `k`. 

12073 k: A `Tensor`. Must be one of the following types: `int16`, `int32`, `int64`. 

12074 0-D. Number of top elements to look for along the last dimension (along each 

12075 row for matrices). 

12076 sorted: An optional `bool`. Defaults to `True`. 

12077 If true the resulting `k` elements will be sorted by the values in 

12078 descending order. 

12079 index_type: An optional `tf.DType` from: `tf.int16, tf.int32, tf.int64`. Defaults to `tf.int32`. 

12080 name: A name for the operation (optional). 

12081 

12082 Returns: 

12083 A tuple of `Tensor` objects (values, indices). 

12084 

12085 values: A `Tensor`. Has the same type as `input`. 

12086 indices: A `Tensor` of type `index_type`. 

12087 """ 

12088 _ctx = _context._context or _context.context() 

12089 tld = _ctx._thread_local_data 

12090 if tld.is_eager: 

12091 try: 

12092 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

12093 _ctx, "TopKV2", name, input, k, "sorted", sorted, "index_type", 

12094 index_type) 

12095 _result = _TopKV2Output._make(_result) 

12096 return _result 

12097 except _core._NotOkStatusException as e: 

12098 _ops.raise_from_not_ok_status(e, name) 

12099 except _core._FallbackException: 

12100 pass 

12101 try: 

12102 return top_kv2_eager_fallback( 

12103 input, k, sorted=sorted, index_type=index_type, name=name, ctx=_ctx) 

12104 except _core._SymbolicException: 

12105 pass # Add nodes to the TensorFlow graph. 

12106 # Add nodes to the TensorFlow graph. 

12107 if sorted is None: 

12108 sorted = True 

12109 sorted = _execute.make_bool(sorted, "sorted") 

12110 if index_type is None: 

12111 index_type = _dtypes.int32 

12112 index_type = _execute.make_type(index_type, "index_type") 

12113 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

12114 "TopKV2", input=input, k=k, sorted=sorted, index_type=index_type, 

12115 name=name) 

12116 _result = _outputs[:] 

12117 if _execute.must_record_gradient(): 

12118 _attrs = ("sorted", _op._get_attr_bool("sorted"), "T", 

12119 _op._get_attr_type("T"), "Tk", _op._get_attr_type("Tk"), 

12120 "index_type", _op._get_attr_type("index_type")) 

12121 _inputs_flat = _op.inputs 

12122 _execute.record_gradient( 

12123 "TopKV2", _inputs_flat, _attrs, _result) 

12124 _result = _TopKV2Output._make(_result) 

12125 return _result 

12126 

12127TopKV2 = tf_export("raw_ops.TopKV2")(_ops.to_raw_op(top_kv2)) 

12128 

12129 

12130def top_kv2_eager_fallback(input, k, sorted, index_type, name, ctx): 

12131 if sorted is None: 

12132 sorted = True 

12133 sorted = _execute.make_bool(sorted, "sorted") 

12134 if index_type is None: 

12135 index_type = _dtypes.int32 

12136 index_type = _execute.make_type(index_type, "index_type") 

12137 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) 

12138 _attr_Tk, (k,) = _execute.args_to_matching_eager([k], ctx, [_dtypes.int16, _dtypes.int32, _dtypes.int64, ], _dtypes.int32) 

12139 _inputs_flat = [input, k] 

12140 _attrs = ("sorted", sorted, "T", _attr_T, "Tk", _attr_Tk, "index_type", 

12141 index_type) 

12142 _result = _execute.execute(b"TopKV2", 2, inputs=_inputs_flat, attrs=_attrs, 

12143 ctx=ctx, name=name) 

12144 if _execute.must_record_gradient(): 

12145 _execute.record_gradient( 

12146 "TopKV2", _inputs_flat, _attrs, _result) 

12147 _result = _TopKV2Output._make(_result) 

12148 return _result 

12149