Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/logging_ops.py: 38%

154 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1# Copyright 2015 The TensorFlow Authors. All Rights Reserved. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14# ============================================================================== 

15"""Logging and Summary Operations.""" 

16# pylint: disable=protected-access 

17import collections as py_collections 

18import os 

19import pprint 

20import random 

21import sys 

22 

23from absl import logging 

24 

25from tensorflow.python import pywrap_tfe 

26from tensorflow.python.framework import dtypes 

27from tensorflow.python.framework import ops 

28from tensorflow.python.framework import sparse_tensor 

29from tensorflow.python.framework import tensor_util 

30from tensorflow.python.ops import gen_logging_ops 

31from tensorflow.python.ops import string_ops 

32# go/tf-wildcard-import 

33# pylint: disable=wildcard-import 

34from tensorflow.python.ops.gen_logging_ops import * 

35# pylint: enable=wildcard-import 

36from tensorflow.python.platform import tf_logging 

37from tensorflow.python.util import dispatch 

38from tensorflow.python.util import nest 

39from tensorflow.python.util.deprecation import deprecated 

40from tensorflow.python.util.tf_export import tf_export 

41 

42 

43def enable_interactive_logging(): 

44 pywrap_tfe.TFE_Py_EnableInteractivePythonLogging() 

45 

46# Register printing to the cell output if we are in a Colab or Jupyter Notebook. 

47try: 

48 get_ipython() # Exists in an ipython env like Jupyter or Colab 

49 enable_interactive_logging() 

50except NameError: 

51 pass 

52 

53# The python wrapper for Assert is in control_flow_ops, as the Assert 

54# call relies on certain conditionals for its dependencies. Use 

55# control_flow_ops.Assert. 

56 

57# Assert and Print are special symbols in Python 2, so we must 

58# have an upper-case version of them. When support for it is dropped, 

59# we can allow lowercase. 

60# See https://github.com/tensorflow/tensorflow/issues/18053 

61 

62 

63# pylint: disable=invalid-name 

64@deprecated("2018-08-20", "Use tf.print instead of tf.Print. Note that " 

65 "tf.print returns a no-output operator that directly " 

66 "prints the output. Outside of defuns or eager mode, " 

67 "this operator will not be executed unless it is " 

68 "directly specified in session.run or used as a " 

69 "control dependency for other operators. This is " 

70 "only a concern in graph mode. Below is an example " 

71 "of how to ensure tf.print executes in graph mode:\n") 

72@tf_export(v1=["Print"]) 

73@dispatch.add_dispatch_support 

74def Print(input_, data, message=None, first_n=None, summarize=None, name=None): 

75 """Prints a list of tensors. 

76 

77 This is an identity op (behaves like `tf.identity`) with the side effect 

78 of printing `data` when evaluating. 

79 

80 Note: This op prints to the standard error. It is not currently compatible 

81 with jupyter notebook (printing to the notebook *server's* output, not into 

82 the notebook). 

83 

84 @compatibility(TF2) 

85 This API is deprecated. Use `tf.print` instead. `tf.print` does not need the 

86 `input_` argument. 

87 

88 `tf.print` works in TF2 when executing eagerly and inside a `tf.function`. 

89 

90 In TF1-styled sessions, an explicit control dependency declaration is needed 

91 to execute the `tf.print` operation. Refer to the documentation of 

92 `tf.print` for more details. 

93 @end_compatibility 

94 

95 Args: 

96 input_: A tensor passed through this op. 

97 data: A list of tensors to print out when op is evaluated. 

98 message: A string, prefix of the error message. 

99 first_n: Only log `first_n` number of times. Negative numbers log always; 

100 this is the default. 

101 summarize: Only print this many entries of each tensor. If None, then a 

102 maximum of 3 elements are printed per input tensor. 

103 name: A name for the operation (optional). 

104 

105 Returns: 

106 A `Tensor`. Has the same type and contents as `input_`. 

107 

108 ```python 

109 sess = tf.compat.v1.Session() 

110 with sess.as_default(): 

111 tensor = tf.range(10) 

112 print_op = tf.print(tensor) 

113 with tf.control_dependencies([print_op]): 

114 out = tf.add(tensor, tensor) 

115 sess.run(out) 

116 ``` 

117 """ 

118 return gen_logging_ops._print(input_, data, message, first_n, summarize, name) 

119 

120 

121# pylint: enable=invalid-name 

122 

123 

124def _generate_placeholder_string(x, default_placeholder="{}"): 

125 """Generate and return a string that does not appear in `x`.""" 

126 placeholder = default_placeholder 

127 rng = random.Random(5) 

128 while placeholder in x: 

129 placeholder = placeholder + str(rng.randint(0, 9)) 

130 return placeholder 

131 

132 

133def _is_filepath(output_stream): 

134 """Returns True if output_stream is a file path.""" 

135 return isinstance(output_stream, str) and output_stream.startswith("file://") 

136 

137 

138# Temporarily disable pylint g-doc-args error to allow giving more context 

139# about what the kwargs are. 

140# Because we are using arbitrary-length positional arguments, python 2 

141# does not support explicitly specifying the keyword arguments in the 

142# function definition. 

143# pylint: disable=g-doc-args 

144@tf_export("print") 

145@dispatch.add_dispatch_support 

146def print_v2(*inputs, **kwargs): 

147 """Print the specified inputs. 

148 

149 A TensorFlow operator that prints the specified inputs to a desired 

150 output stream or logging level. The inputs may be dense or sparse Tensors, 

151 primitive python objects, data structures that contain tensors, and printable 

152 Python objects. Printed tensors will recursively show the first and last 

153 elements of each dimension to summarize. 

154 

155 Example: 

156 Single-input usage: 

157 

158 ```python 

159 tensor = tf.range(10) 

160 tf.print(tensor, output_stream=sys.stderr) 

161 ``` 

162 

163 (This prints "[0 1 2 ... 7 8 9]" to sys.stderr) 

164 

165 Multi-input usage: 

166 

167 ```python 

168 tensor = tf.range(10) 

169 tf.print("tensors:", tensor, {2: tensor * 2}, output_stream=sys.stdout) 

170 ``` 

171 

172 (This prints "tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}" to 

173 sys.stdout) 

174 

175 Changing the input separator: 

176 ```python 

177 tensor_a = tf.range(2) 

178 tensor_b = tensor_a * 2 

179 tf.print(tensor_a, tensor_b, output_stream=sys.stderr, sep=',') 

180 ``` 

181 

182 (This prints "[0 1],[0 2]" to sys.stderr) 

183 

184 Usage in a `tf.function`: 

185 

186 ```python 

187 @tf.function 

188 def f(): 

189 tensor = tf.range(10) 

190 tf.print(tensor, output_stream=sys.stderr) 

191 return tensor 

192 

193 range_tensor = f() 

194 ``` 

195 

196 (This prints "[0 1 2 ... 7 8 9]" to sys.stderr) 

197 

198 *Compatibility usage in TF 1.x graphs*: 

199 

200 In graphs manually created outside of `tf.function`, this method returns 

201 the created TF operator that prints the data. To make sure the 

202 operator runs, users need to pass the produced op to 

203 `tf.compat.v1.Session`'s run method, or to use the op as a control 

204 dependency for executed ops by specifying 

205 `with tf.compat.v1.control_dependencies([print_op])`. 

206 

207 ```python 

208 tf.compat.v1.disable_v2_behavior() # for TF1 compatibility only 

209 

210 sess = tf.compat.v1.Session() 

211 with sess.as_default(): 

212 tensor = tf.range(10) 

213 print_op = tf.print("tensors:", tensor, {2: tensor * 2}, 

214 output_stream=sys.stdout) 

215 with tf.control_dependencies([print_op]): 

216 tripled_tensor = tensor * 3 

217 

218 sess.run(tripled_tensor) 

219 ``` 

220 

221 (This prints "tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}" to 

222 sys.stdout) 

223 

224 Note: In Jupyter notebooks and colabs, `tf.print` prints to the notebook 

225 cell outputs. It will not write to the notebook kernel's console logs. 

226 

227 Args: 

228 *inputs: Positional arguments that are the inputs to print. Inputs in the 

229 printed output will be separated by spaces. Inputs may be python 

230 primitives, tensors, data structures such as dicts and lists that may 

231 contain tensors (with the data structures possibly nested in arbitrary 

232 ways), and printable python objects. 

233 output_stream: The output stream, logging level, or file to print to. 

234 Defaults to sys.stderr, but sys.stdout, tf.compat.v1.logging.info, 

235 tf.compat.v1.logging.warning, tf.compat.v1.logging.error, 

236 absl.logging.info, absl.logging.warning and absl.logging.error are also 

237 supported. To print to a file, pass a string started with "file://" 

238 followed by the file path, e.g., "file:///tmp/foo.out". 

239 summarize: The first and last `summarize` elements within each dimension are 

240 recursively printed per Tensor. If None, then the first 3 and last 3 

241 elements of each dimension are printed for each tensor. If set to -1, it 

242 will print all elements of every tensor. 

243 sep: The string to use to separate the inputs. Defaults to " ". 

244 end: End character that is appended at the end the printed string. Defaults 

245 to the newline character. 

246 name: A name for the operation (optional). 

247 

248 Returns: 

249 None when executing eagerly. During graph tracing this returns 

250 a TF operator that prints the specified inputs in the specified output 

251 stream or logging level. This operator will be automatically executed 

252 except inside of `tf.compat.v1` graphs and sessions. 

253 

254 Raises: 

255 ValueError: If an unsupported output stream is specified. 

256 """ 

257 # Because we are using arbitrary-length positional arguments, python 2 

258 # does not support explicitly specifying the keyword arguments in the 

259 # function definition. So, we manually get the keyword arguments w/ default 

260 # values here. 

261 output_stream = kwargs.pop("output_stream", sys.stderr) 

262 name = kwargs.pop("name", None) 

263 summarize = kwargs.pop("summarize", 3) 

264 sep = kwargs.pop("sep", " ") 

265 end = kwargs.pop("end", os.linesep) 

266 if kwargs: 

267 raise ValueError("Unrecognized keyword arguments for tf.print: %s" % kwargs) 

268 format_name = None 

269 if name: 

270 format_name = name + "_format" 

271 

272 # Match the C++ string constants representing the different output streams. 

273 # Keep this updated! 

274 output_stream_to_constant = { 

275 sys.stdout: "stdout", 

276 sys.stderr: "stderr", 

277 tf_logging.INFO: "log(info)", 

278 tf_logging.info: "log(info)", 

279 tf_logging.WARN: "log(warning)", 

280 tf_logging.warning: "log(warning)", 

281 tf_logging.warn: "log(warning)", 

282 tf_logging.ERROR: "log(error)", 

283 tf_logging.error: "log(error)", 

284 logging.INFO: "log(info)", 

285 logging.info: "log(info)", 

286 logging.INFO: "log(info)", 

287 logging.WARNING: "log(warning)", 

288 logging.WARN: "log(warning)", 

289 logging.warning: "log(warning)", 

290 logging.warn: "log(warning)", 

291 logging.ERROR: "log(error)", 

292 logging.error: "log(error)", 

293 } 

294 

295 if _is_filepath(output_stream): 

296 output_stream_string = output_stream 

297 else: 

298 output_stream_string = output_stream_to_constant.get(output_stream) 

299 if not output_stream_string: 

300 raise ValueError("Unsupported output stream, logging level, or file." + 

301 str(output_stream) + 

302 ". Supported streams are sys.stdout, " 

303 "sys.stderr, tf.logging.info, " 

304 "tf.logging.warning, tf.logging.error. " + 

305 "File needs to be in the form of 'file://<filepath>'.") 

306 

307 # If we are only printing a single string scalar, there is no need to format 

308 if (len(inputs) == 1 and tensor_util.is_tf_type(inputs[0]) and 

309 (not isinstance(inputs[0], sparse_tensor.SparseTensor)) and 

310 (inputs[0].shape.ndims == 0) and (inputs[0].dtype == dtypes.string)): 

311 formatted_string = inputs[0] 

312 # Otherwise, we construct an appropriate template for the tensors we are 

313 # printing, and format the template using those tensors. 

314 else: 

315 # For each input to this print function, we extract any nested tensors, 

316 # and construct an appropriate template to format representing the 

317 # printed input. 

318 templates = [] 

319 tensors = [] 

320 # If an input to the print function is of type `OrderedDict`, sort its 

321 # elements by the keys for consistency with the ordering of `nest.flatten`. 

322 # This is not needed for `dict` types because `pprint.pformat()` takes care 

323 # of printing the template in a sorted fashion. 

324 inputs_ordered_dicts_sorted = [] 

325 for input_ in inputs: 

326 if isinstance(input_, py_collections.OrderedDict): 

327 inputs_ordered_dicts_sorted.append( 

328 py_collections.OrderedDict(sorted(input_.items()))) 

329 else: 

330 inputs_ordered_dicts_sorted.append(input_) 

331 tensor_free_structure = nest.map_structure( 

332 lambda x: "" if tensor_util.is_tf_type(x) else x, 

333 inputs_ordered_dicts_sorted) 

334 

335 tensor_free_template = " ".join( 

336 pprint.pformat(x) for x in tensor_free_structure) 

337 placeholder = _generate_placeholder_string(tensor_free_template) 

338 

339 for input_ in inputs: 

340 placeholders = [] 

341 # Use the nest utilities to flatten & process any nested elements in this 

342 # input. The placeholder for a tensor in the template should be the 

343 # placeholder string, and the placeholder for a non-tensor can just be 

344 # the printed value of the non-tensor itself. 

345 for x in nest.flatten(input_): 

346 # support sparse tensors 

347 if isinstance(x, sparse_tensor.SparseTensor): 

348 tensors.extend([x.indices, x.values, x.dense_shape]) 

349 placeholders.append( 

350 "SparseTensor(indices={}, values={}, shape={})".format( 

351 placeholder, placeholder, placeholder)) 

352 elif tensor_util.is_tf_type(x): 

353 tensors.append(x) 

354 placeholders.append(placeholder) 

355 else: 

356 placeholders.append(x) 

357 

358 if isinstance(input_, str): 

359 # If the current input to format/print is a normal string, that string 

360 # can act as the template. 

361 cur_template = input_ 

362 else: 

363 # We pack the placeholders into a data structure that matches the 

364 # input data structure format, then format that data structure 

365 # into a string template. 

366 # 

367 # NOTE: We must use pprint.pformat here for building the template for 

368 # unordered data structures such as `dict`, because `str` doesn't 

369 # guarantee orderings, while pprint prints in sorted order. pprint 

370 # will match the ordering of `nest.flatten`. 

371 # This even works when nest.flatten reorders OrderedDicts, because 

372 # pprint is printing *after* the OrderedDicts have been reordered. 

373 cur_template = pprint.pformat( 

374 nest.pack_sequence_as(input_, placeholders)) 

375 templates.append(cur_template) 

376 

377 # We join the templates for the various inputs into a single larger 

378 # template. We also remove all quotes surrounding the placeholders, so that 

379 # the formatted/printed output will not contain quotes around tensors. 

380 # (example of where these quotes might appear: if we have added a 

381 # placeholder string into a list, then pretty-formatted that list) 

382 template = sep.join(templates) 

383 template = template.replace("'" + placeholder + "'", placeholder) 

384 formatted_string = string_ops.string_format( 

385 inputs=tensors, 

386 template=template, 

387 placeholder=placeholder, 

388 summarize=summarize, 

389 name=format_name) 

390 

391 return gen_logging_ops.print_v2( 

392 formatted_string, output_stream=output_stream_string, name=name, end=end) 

393 

394 

395# pylint: enable=g-doc-args 

396 

397 

398@ops.RegisterGradient("Print") 

399def _PrintGrad(op, *grad): 

400 return list(grad) + [None] * (len(op.inputs) - 1) 

401 

402 

403def _Collect(val, collections, default_collections): 

404 if collections is None: 

405 collections = default_collections 

406 for key in collections: 

407 ops.add_to_collection(key, val) 

408 

409 

410@deprecated( 

411 "2016-11-30", "Please switch to tf.summary.histogram. Note that " 

412 "tf.summary.histogram uses the node name instead of the tag. " 

413 "This means that TensorFlow will automatically de-duplicate summary " 

414 "names based on the scope they are created in.") 

415def histogram_summary(tag, values, collections=None, name=None): 

416 # pylint: disable=line-too-long 

417 """Outputs a `Summary` protocol buffer with a histogram. 

418 

419 This ops is deprecated. Please switch to tf.summary.histogram. 

420 

421 For an explanation of why this op was deprecated, and information on how to 

422 migrate, look 

423 ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) 

424 

425 The generated 

426 [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) 

427 has one summary value containing a histogram for `values`. 

428 

429 This op reports an `InvalidArgument` error if any value is not finite. 

430 

431 Args: 

432 tag: A `string` `Tensor`. 0-D. Tag to use for the summary value. 

433 values: A real numeric `Tensor`. Any shape. Values to use to build the 

434 histogram. 

435 collections: Optional list of graph collections keys. The new summary op is 

436 added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. 

437 name: A name for the operation (optional). 

438 

439 Returns: 

440 A scalar `Tensor` of type `string`. The serialized `Summary` protocol 

441 buffer. 

442 """ 

443 with ops.name_scope(name, "HistogramSummary", [tag, values]) as scope: 

444 val = gen_logging_ops.histogram_summary(tag=tag, values=values, name=scope) 

445 _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) 

446 return val 

447 

448 

449@deprecated( 

450 "2016-11-30", "Please switch to tf.summary.image. Note that " 

451 "tf.summary.image uses the node name instead of the tag. " 

452 "This means that TensorFlow will automatically de-duplicate summary " 

453 "names based on the scope they are created in. Also, the max_images " 

454 "argument was renamed to max_outputs.") 

455def image_summary(tag, tensor, max_images=3, collections=None, name=None): 

456 # pylint: disable=line-too-long 

457 """Outputs a `Summary` protocol buffer with images. 

458 

459 For an explanation of why this op was deprecated, and information on how to 

460 migrate, look 

461 ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) 

462 

463 The summary has up to `max_images` summary values containing images. The 

464 images are built from `tensor` which must be 4-D with shape `[batch_size, 

465 height, width, channels]` and where `channels` can be: 

466 

467 * 1: `tensor` is interpreted as Grayscale. 

468 * 3: `tensor` is interpreted as RGB. 

469 * 4: `tensor` is interpreted as RGBA. 

470 

471 The images have the same number of channels as the input tensor. For float 

472 input, the values are normalized one image at a time to fit in the range 

473 `[0, 255]`. `uint8` values are unchanged. The op uses two different 

474 normalization algorithms: 

475 

476 * If the input values are all positive, they are rescaled so the largest one 

477 is 255. 

478 

479 * If any input value is negative, the values are shifted so input value 0.0 

480 is at 127. They are then rescaled so that either the smallest value is 0, 

481 or the largest one is 255. 

482 

483 The `tag` argument is a scalar `Tensor` of type `string`. It is used to 

484 build the `tag` of the summary values: 

485 

486 * If `max_images` is 1, the summary value tag is '*tag*/image'. 

487 * If `max_images` is greater than 1, the summary value tags are 

488 generated sequentially as '*tag*/image/0', '*tag*/image/1', etc. 

489 

490 Args: 

491 tag: A scalar `Tensor` of type `string`. Used to build the `tag` of the 

492 summary values. 

493 tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height, 

494 width, channels]` where `channels` is 1, 3, or 4. 

495 max_images: Max number of batch elements to generate images for. 

496 collections: Optional list of ops.GraphKeys. The collections to add the 

497 summary to. Defaults to [ops.GraphKeys.SUMMARIES] 

498 name: A name for the operation (optional). 

499 

500 Returns: 

501 A scalar `Tensor` of type `string`. The serialized `Summary` protocol 

502 buffer. 

503 """ 

504 with ops.name_scope(name, "ImageSummary", [tag, tensor]) as scope: 

505 val = gen_logging_ops.image_summary( 

506 tag=tag, tensor=tensor, max_images=max_images, name=scope) 

507 _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) 

508 return val 

509 

510 

511@deprecated( 

512 "2016-11-30", "Please switch to tf.summary.audio. Note that " 

513 "tf.summary.audio uses the node name instead of the tag. " 

514 "This means that TensorFlow will automatically de-duplicate summary " 

515 "names based on the scope they are created in.") 

516def audio_summary(tag, 

517 tensor, 

518 sample_rate, 

519 max_outputs=3, 

520 collections=None, 

521 name=None): 

522 # pylint: disable=line-too-long 

523 """Outputs a `Summary` protocol buffer with audio. 

524 

525 This op is deprecated. Please switch to tf.summary.audio. 

526 For an explanation of why this op was deprecated, and information on how to 

527 migrate, look 

528 ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) 

529 

530 The summary has up to `max_outputs` summary values containing audio. The 

531 audio is built from `tensor` which must be 3-D with shape `[batch_size, 

532 frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are 

533 assumed to be in the range of `[-1.0, 1.0]` with a sample rate of 

534 `sample_rate`. 

535 

536 The `tag` argument is a scalar `Tensor` of type `string`. It is used to 

537 build the `tag` of the summary values: 

538 

539 * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. 

540 * If `max_outputs` is greater than 1, the summary value tags are 

541 generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. 

542 

543 Args: 

544 tag: A scalar `Tensor` of type `string`. Used to build the `tag` of the 

545 summary values. 

546 tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]` 

547 or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`. 

548 sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the 

549 signal in hertz. 

550 max_outputs: Max number of batch elements to generate audio for. 

551 collections: Optional list of ops.GraphKeys. The collections to add the 

552 summary to. Defaults to [ops.GraphKeys.SUMMARIES] 

553 name: A name for the operation (optional). 

554 

555 Returns: 

556 A scalar `Tensor` of type `string`. The serialized `Summary` protocol 

557 buffer. 

558 """ 

559 with ops.name_scope(name, "AudioSummary", [tag, tensor]) as scope: 

560 sample_rate = ops.convert_to_tensor( 

561 sample_rate, dtype=dtypes.float32, name="sample_rate") 

562 val = gen_logging_ops.audio_summary_v2( 

563 tag=tag, 

564 tensor=tensor, 

565 max_outputs=max_outputs, 

566 sample_rate=sample_rate, 

567 name=scope) 

568 _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) 

569 return val 

570 

571 

572@deprecated("2016-11-30", "Please switch to tf.summary.merge.") 

573def merge_summary(inputs, collections=None, name=None): 

574 # pylint: disable=line-too-long 

575 """Merges summaries. 

576 

577 This op is deprecated. Please switch to tf.compat.v1.summary.merge, which has 

578 identical 

579 behavior. 

580 

581 This op creates a 

582 [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) 

583 protocol buffer that contains the union of all the values in the input 

584 summaries. 

585 

586 When the Op is run, it reports an `InvalidArgument` error if multiple values 

587 in the summaries to merge use the same tag. 

588 

589 Args: 

590 inputs: A list of `string` `Tensor` objects containing serialized `Summary` 

591 protocol buffers. 

592 collections: Optional list of graph collections keys. The new summary op is 

593 added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. 

594 name: A name for the operation (optional). 

595 

596 Returns: 

597 A scalar `Tensor` of type `string`. The serialized `Summary` protocol 

598 buffer resulting from the merging. 

599 """ 

600 with ops.name_scope(name, "MergeSummary", inputs): 

601 val = gen_logging_ops.merge_summary(inputs=inputs, name=name) 

602 _Collect(val, collections, []) 

603 return val 

604 

605 

606@deprecated("2016-11-30", "Please switch to tf.summary.merge_all.") 

607def merge_all_summaries(key=ops.GraphKeys.SUMMARIES): 

608 """Merges all summaries collected in the default graph. 

609 

610 This op is deprecated. Please switch to tf.compat.v1.summary.merge_all, which 

611 has 

612 identical behavior. 

613 

614 Args: 

615 key: `GraphKey` used to collect the summaries. Defaults to 

616 `GraphKeys.SUMMARIES`. 

617 

618 Returns: 

619 If no summaries were collected, returns None. Otherwise returns a scalar 

620 `Tensor` of type `string` containing the serialized `Summary` protocol 

621 buffer resulting from the merging. 

622 """ 

623 summary_ops = ops.get_collection(key) 

624 if not summary_ops: 

625 return None 

626 else: 

627 return merge_summary(summary_ops) 

628 

629 

630def get_summary_op(): 

631 """Returns a single Summary op that would run all summaries. 

632 

633 Either existing one from `SUMMARY_OP` collection or merges all existing 

634 summaries. 

635 

636 Returns: 

637 If no summaries were collected, returns None. Otherwise returns a scalar 

638 `Tensor` of type `string` containing the serialized `Summary` protocol 

639 buffer resulting from the merging. 

640 """ 

641 summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP) 

642 if summary_op is not None: 

643 if summary_op: 

644 summary_op = summary_op[0] 

645 else: 

646 summary_op = None 

647 if summary_op is None: 

648 summary_op = merge_all_summaries() 

649 if summary_op is not None: 

650 ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op) 

651 return summary_op 

652 

653 

654@deprecated( 

655 "2016-11-30", "Please switch to tf.summary.scalar. Note that " 

656 "tf.summary.scalar uses the node name instead of the tag. " 

657 "This means that TensorFlow will automatically de-duplicate summary " 

658 "names based on the scope they are created in. Also, passing a " 

659 "tensor or list of tags to a scalar summary op is no longer " 

660 "supported.") 

661def scalar_summary(tags, values, collections=None, name=None): 

662 # pylint: disable=line-too-long 

663 """Outputs a `Summary` protocol buffer with scalar values. 

664 

665 This ops is deprecated. Please switch to tf.summary.scalar. 

666 For an explanation of why this op was deprecated, and information on how to 

667 migrate, look 

668 ['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py) 

669 

670 The input `tags` and `values` must have the same shape. The generated 

671 summary has a summary value for each tag-value pair in `tags` and `values`. 

672 

673 Args: 

674 tags: A `string` `Tensor`. Tags for the summaries. 

675 values: A real numeric Tensor. Values for the summaries. 

676 collections: Optional list of graph collections keys. The new summary op is 

677 added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. 

678 name: A name for the operation (optional). 

679 

680 Returns: 

681 A scalar `Tensor` of type `string`. The serialized `Summary` protocol 

682 buffer. 

683 """ 

684 with ops.name_scope(name, "ScalarSummary", [tags, values]) as scope: 

685 val = gen_logging_ops.scalar_summary(tags=tags, values=values, name=scope) 

686 _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) 

687 return val 

688 

689 

690ops.NotDifferentiable("HistogramSummary") 

691ops.NotDifferentiable("ImageSummary") 

692ops.NotDifferentiable("AudioSummary") 

693ops.NotDifferentiable("AudioSummaryV2") 

694ops.NotDifferentiable("MergeSummary") 

695ops.NotDifferentiable("ScalarSummary") 

696ops.NotDifferentiable("TensorSummary") 

697ops.NotDifferentiable("TensorSummaryV2") 

698ops.NotDifferentiable("Timestamp")