Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/summary_ops_v2.py: 28%

502 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1# Copyright 2017 The TensorFlow Authors. All Rights Reserved. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14# ============================================================================== 

15 

16"""Operations to emit summaries.""" 

17 

18import abc 

19import collections 

20import functools 

21import os 

22import re 

23import threading 

24 

25from tensorflow.core.framework import graph_pb2 

26from tensorflow.core.framework import summary_pb2 

27from tensorflow.core.protobuf import config_pb2 

28from tensorflow.python.eager import context 

29from tensorflow.python.eager import profiler as _profiler 

30from tensorflow.python.framework import constant_op 

31from tensorflow.python.framework import dtypes 

32from tensorflow.python.framework import ops 

33from tensorflow.python.framework import smart_cond 

34from tensorflow.python.framework import tensor_util 

35from tensorflow.python.ops import array_ops 

36from tensorflow.python.ops import control_flow_ops 

37from tensorflow.python.ops import gen_resource_variable_ops 

38from tensorflow.python.ops import gen_summary_ops 

39from tensorflow.python.ops import math_ops 

40from tensorflow.python.ops import resource_variable_ops 

41from tensorflow.python.ops import summary_op_util 

42from tensorflow.python.platform import tf_logging as logging 

43from tensorflow.python.trackable import resource 

44from tensorflow.python.training import training_util 

45from tensorflow.python.util import deprecation 

46from tensorflow.python.util import tf_contextlib 

47from tensorflow.python.util.tf_export import tf_export 

48 

49# Name for graph collection of summary writer init ops, which is only exposed 

50# as a legacy API for tf.contrib.summary in TF 1.x. 

51_SUMMARY_WRITER_INIT_COLLECTION_NAME = "_SUMMARY_WRITER_V2" 

52 

53 

54class _SummaryState(threading.local): 

55 

56 def __init__(self): 

57 super(_SummaryState, self).__init__() 

58 self.is_recording = None 

59 # TODO(slebedev): why a separate flag for DS and is it on by default? 

60 self.is_recording_distribution_strategy = True 

61 self.writer = None 

62 self.step = None 

63 

64 

65_summary_state = _SummaryState() 

66 

67 

68class _SummaryContextManager: 

69 """Context manager to implement SummaryWriter.as_default().""" 

70 # Note: this is a class so that it's possible to implement `set_as_default()` 

71 # simply via `as_default().__enter__()`. We can't do that with @contextmanager 

72 # because the `finally` block will be executed when the generator is GCed. 

73 

74 def __init__(self, writer, step=None): 

75 self._writer = writer 

76 self._step = step 

77 self._old_writer = None 

78 self._old_step = None 

79 

80 def __enter__(self): 

81 self._old_writer = _summary_state.writer 

82 _summary_state.writer = self._writer 

83 if self._step is not None: 

84 self._old_step = _summary_state.step 

85 _summary_state.step = self._step 

86 return self._writer 

87 

88 def __exit__(self, *exc): 

89 # Flushes the summary writer in eager mode or in graph functions, but 

90 # not in legacy graph mode (you're on your own there). 

91 _summary_state.writer.flush() 

92 _summary_state.writer = self._old_writer 

93 if self._step is not None: 

94 _summary_state.step = self._old_step 

95 return False 

96 

97 

98def _should_record_summaries_internal(default_state): 

99 """Returns boolean Tensor if summaries should/shouldn't be recorded. 

100 

101 Now the summary condition is decided by logical "and" of below conditions: 

102 First, summary writer must be set. Given this constraint is met, 

103 ctx.summary_recording and ctx.summary_recording_distribution_strategy. 

104 The former one is usually set by user, and the latter one is controlled 

105 by DistributionStrategy (tf.distribute.ReplicaContext). 

106 

107 Args: 

108 default_state: can be True or False. The default summary behavior when 

109 summary writer is set and the user does not specify 

110 ctx.summary_recording and ctx.summary_recording_distribution_strategy 

111 is True. 

112 """ 

113 if _summary_state.writer is None: 

114 return constant_op.constant(False) 

115 

116 if not callable(_summary_state.is_recording): 

117 static_cond = tensor_util.constant_value(_summary_state.is_recording) 

118 if static_cond is not None and not static_cond: 

119 return constant_op.constant(False) 

120 

121 resolve = lambda x: x() if callable(x) else x 

122 cond_distributed = resolve(_summary_state.is_recording_distribution_strategy) 

123 cond = resolve(_summary_state.is_recording) 

124 if cond is None: 

125 cond = default_state 

126 return math_ops.logical_and(cond_distributed, cond) 

127 

128 

129@tf_export("summary.should_record_summaries", v1=[]) 

130def should_record_summaries(): 

131 """Returns boolean Tensor which is True if summaries will be recorded. 

132 

133 If no default summary writer is currently registered, this always returns 

134 False. Otherwise, this reflects the recording condition has been set via 

135 `tf.summary.record_if()` (except that it may return False for some replicas 

136 when using `tf.distribute.Strategy`). If no recording condition is active, 

137 it defaults to True. 

138 """ 

139 return _should_record_summaries_internal(default_state=True) 

140 

141 

142# Legacy symbol used by tf.contrib.summary.should_record_summaries. 

143def _legacy_contrib_should_record_summaries(): 

144 """Returns boolean Tensor which is true if summaries should be recorded.""" 

145 return _should_record_summaries_internal(default_state=False) 

146 

147 

148@tf_export("summary.record_if", v1=[]) 

149@tf_contextlib.contextmanager 

150def record_if(condition): 

151 """Sets summary recording on or off per the provided boolean value. 

152 

153 The provided value can be a python boolean, a scalar boolean Tensor, or 

154 or a callable providing such a value; if a callable is passed it will be 

155 invoked on-demand to determine whether summary writing will occur. Note that 

156 when calling record_if() in an eager mode context, if you intend to provide a 

157 varying condition like `step % 100 == 0`, you must wrap this in a 

158 callable to avoid immediate eager evaluation of the condition. In particular, 

159 using a callable is the only way to have your condition evaluated as part of 

160 the traced body of an @tf.function that is invoked from within the 

161 `record_if()` context. 

162 

163 Args: 

164 condition: can be True, False, a bool Tensor, or a callable providing such. 

165 

166 Yields: 

167 Returns a context manager that sets this value on enter and restores the 

168 previous value on exit. 

169 """ 

170 old = _summary_state.is_recording 

171 try: 

172 _summary_state.is_recording = condition 

173 yield 

174 finally: 

175 _summary_state.is_recording = old 

176 

177 

178def has_default_writer(): 

179 """Returns a boolean indicating whether a default summary writer exists.""" 

180 return _summary_state.writer is not None 

181 

182 

183# TODO(apassos) consider how to handle local step here. 

184def record_summaries_every_n_global_steps(n, global_step=None): 

185 """Sets the should_record_summaries Tensor to true if global_step % n == 0.""" 

186 if global_step is None: 

187 global_step = training_util.get_or_create_global_step() 

188 with ops.device("cpu:0"): 

189 should = lambda: math_ops.equal(global_step % n, 0) 

190 if not context.executing_eagerly(): 

191 should = should() 

192 return record_if(should) 

193 

194 

195def always_record_summaries(): 

196 """Sets the should_record_summaries Tensor to always true.""" 

197 return record_if(True) 

198 

199 

200def never_record_summaries(): 

201 """Sets the should_record_summaries Tensor to always false.""" 

202 return record_if(False) 

203 

204 

205@tf_export("summary.experimental.get_step", v1=[]) 

206def get_step(): 

207 """Returns the default summary step for the current thread. 

208 

209 Returns: 

210 The step set by `tf.summary.experimental.set_step()` if one has been set, 

211 otherwise None. 

212 """ 

213 return _summary_state.step 

214 

215 

216@tf_export("summary.experimental.set_step", v1=[]) 

217def set_step(step): 

218 """Sets the default summary step for the current thread. 

219 

220 For convenience, this function sets a default value for the `step` parameter 

221 used in summary-writing functions elsewhere in the API so that it need not 

222 be explicitly passed in every such invocation. The value can be a constant 

223 or a variable, and can be retrieved via `tf.summary.experimental.get_step()`. 

224 

225 Note: when using this with @tf.functions, the step value will be captured at 

226 the time the function is traced, so changes to the step outside the function 

227 will not be reflected inside the function unless using a `tf.Variable` step. 

228 

229 Args: 

230 step: An `int64`-castable default step value, or None to unset. 

231 """ 

232 _summary_state.step = step 

233 

234 

235@tf_export("summary.SummaryWriter", v1=[]) 

236class SummaryWriter(metaclass=abc.ABCMeta): 

237 """Interface representing a stateful summary writer object.""" 

238 

239 def set_as_default(self, step=None): 

240 """Enables this summary writer for the current thread. 

241 

242 For convenience, if `step` is not None, this function also sets a default 

243 value for the `step` parameter used in summary-writing functions elsewhere 

244 in the API so that it need not be explicitly passed in every such 

245 invocation. The value can be a constant or a variable. 

246 

247 Note: when setting `step` in a @tf.function, the step value will be 

248 captured at the time the function is traced, so changes to the step outside 

249 the function will not be reflected inside the function unless using 

250 a `tf.Variable` step. 

251 

252 Args: 

253 step: An `int64`-castable default step value, or `None`. When not `None`, 

254 the current step is modified to the given value. When `None`, the 

255 current step is not modified. 

256 """ 

257 self.as_default(step).__enter__() 

258 

259 def as_default(self, step=None): 

260 """Returns a context manager that enables summary writing. 

261 

262 For convenience, if `step` is not None, this function also sets a default 

263 value for the `step` parameter used in summary-writing functions elsewhere 

264 in the API so that it need not be explicitly passed in every such 

265 invocation. The value can be a constant or a variable. 

266 

267 Note: when setting `step` in a @tf.function, the step value will be 

268 captured at the time the function is traced, so changes to the step outside 

269 the function will not be reflected inside the function unless using 

270 a `tf.Variable` step. 

271 

272 For example, `step` can be used as: 

273 

274 ```python 

275 with writer_a.as_default(step=10): 

276 tf.summary.scalar(tag, value) # Logged to writer_a with step 10 

277 with writer_b.as_default(step=20): 

278 tf.summary.scalar(tag, value) # Logged to writer_b with step 20 

279 tf.summary.scalar(tag, value) # Logged to writer_a with step 10 

280 ``` 

281 

282 Args: 

283 step: An `int64`-castable default step value, or `None`. When not `None`, 

284 the current step is captured, replaced by a given one, and the original 

285 one is restored when the context manager exits. When `None`, the current 

286 step is not modified (and not restored when the context manager exits). 

287 

288 Returns: 

289 The context manager. 

290 """ 

291 return _SummaryContextManager(self, step) 

292 

293 def init(self): 

294 """Initializes the summary writer.""" 

295 raise NotImplementedError() 

296 

297 def flush(self): 

298 """Flushes any buffered data.""" 

299 raise NotImplementedError() 

300 

301 def close(self): 

302 """Flushes and closes the summary writer.""" 

303 raise NotImplementedError() 

304 

305 

306class _ResourceSummaryWriter(SummaryWriter): 

307 """Implementation of SummaryWriter using a SummaryWriterInterface resource.""" 

308 

309 def __init__(self, create_fn, init_op_fn): 

310 self._resource = create_fn() 

311 self._init_op = init_op_fn(self._resource) 

312 self._closed = False 

313 if context.executing_eagerly(): 

314 self._set_up_resource_deleter() 

315 else: 

316 ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, self._init_op) 

317 

318 # Extension point to be overridden by subclasses to customize deletion. 

319 

320 def _set_up_resource_deleter(self): 

321 self._resource_deleter = resource_variable_ops.EagerResourceDeleter( 

322 handle=self._resource, handle_device="cpu:0") 

323 

324 def set_as_default(self, step=None): 

325 """See `SummaryWriter.set_as_default`.""" 

326 if context.executing_eagerly() and self._closed: 

327 raise RuntimeError(f"SummaryWriter {self!r} is already closed") 

328 super().set_as_default(step) 

329 

330 def as_default(self, step=None): 

331 """See `SummaryWriter.as_default`.""" 

332 if context.executing_eagerly() and self._closed: 

333 raise RuntimeError(f"SummaryWriter {self!r} is already closed") 

334 return super().as_default(step) 

335 

336 def init(self): 

337 """See `SummaryWriter.init`.""" 

338 if context.executing_eagerly() and self._closed: 

339 raise RuntimeError(f"SummaryWriter {self!r} is already closed") 

340 return self._init_op 

341 

342 def flush(self): 

343 """See `SummaryWriter.flush`.""" 

344 if context.executing_eagerly() and self._closed: 

345 return 

346 with ops.device("cpu:0"): 

347 return gen_summary_ops.flush_summary_writer(self._resource) 

348 

349 def close(self): 

350 """See `SummaryWriter.close`.""" 

351 if context.executing_eagerly() and self._closed: 

352 return 

353 try: 

354 with ops.control_dependencies([self.flush()]): 

355 with ops.device("cpu:0"): 

356 return gen_summary_ops.close_summary_writer(self._resource) 

357 finally: 

358 if context.executing_eagerly(): 

359 self._closed = True 

360 

361 

362class _MultiMetaclass( 

363 type(_ResourceSummaryWriter), type(resource.TrackableResource)): 

364 pass 

365 

366 

367class _TrackableResourceSummaryWriter( 

368 _ResourceSummaryWriter, 

369 resource.TrackableResource, 

370 metaclass=_MultiMetaclass): 

371 """A `_ResourceSummaryWriter` subclass that implements `TrackableResource`.""" 

372 

373 def __init__(self, create_fn, init_op_fn): 

374 # Resolve multiple inheritance via explicit calls to __init__() on parents. 

375 resource.TrackableResource.__init__(self, device="/CPU:0") 

376 self._create_fn = create_fn 

377 self._init_op_fn = init_op_fn 

378 # Pass .resource_handle into _ResourceSummaryWriter parent class rather than 

379 # create_fn, to ensure it accesses the resource handle only through the 

380 # cached property so that everything is using a single resource handle. 

381 _ResourceSummaryWriter.__init__( 

382 self, create_fn=lambda: self.resource_handle, init_op_fn=init_op_fn) 

383 

384 # Override for TrackableResource implementation. 

385 def _create_resource(self): 

386 return self._create_fn() 

387 

388 # Override for TrackableResource implementation. 

389 def _initialize(self): 

390 return self._init_op_fn(self.resource_handle) 

391 

392 # Override for TrackableResource implementation. 

393 def _destroy_resource(self): 

394 gen_resource_variable_ops.destroy_resource_op( 

395 self.resource_handle, ignore_lookup_error=True) 

396 

397 def _set_up_resource_deleter(self): 

398 # Override to suppress ResourceSummaryWriter implementation; we don't need 

399 # the deleter since TrackableResource.__del__() handles it for us. 

400 pass 

401 

402 

403class _LegacyResourceSummaryWriter(SummaryWriter): 

404 """Legacy resource-backed SummaryWriter for tf.contrib.summary.""" 

405 

406 def __init__(self, resource, init_op_fn): 

407 self._resource = resource 

408 self._init_op_fn = init_op_fn 

409 init_op = self.init() 

410 if context.executing_eagerly(): 

411 self._resource_deleter = resource_variable_ops.EagerResourceDeleter( 

412 handle=self._resource, handle_device="cpu:0") 

413 else: 

414 ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, init_op) 

415 

416 def init(self): 

417 """See `SummaryWriter.init`.""" 

418 return self._init_op_fn(self._resource) 

419 

420 def flush(self): 

421 """See `SummaryWriter.flush`.""" 

422 with ops.device("cpu:0"): 

423 return gen_summary_ops.flush_summary_writer(self._resource) 

424 

425 def close(self): 

426 """See `SummaryWriter.close`.""" 

427 with ops.control_dependencies([self.flush()]): 

428 with ops.device("cpu:0"): 

429 return gen_summary_ops.close_summary_writer(self._resource) 

430 

431 

432class _NoopSummaryWriter(SummaryWriter): 

433 """A summary writer that does nothing, for create_noop_writer().""" 

434 

435 def set_as_default(self, step=None): 

436 pass 

437 

438 @tf_contextlib.contextmanager 

439 def as_default(self, step=None): 

440 yield 

441 

442 def init(self): 

443 pass 

444 

445 def flush(self): 

446 pass 

447 

448 def close(self): 

449 pass 

450 

451 

452@tf_export(v1=["summary.initialize"]) 

453def initialize( 

454 graph=None, # pylint: disable=redefined-outer-name 

455 session=None): 

456 """Initializes summary writing for graph execution mode. 

457 

458 This operation is a no-op when executing eagerly. 

459 

460 This helper method provides a higher-level alternative to using 

461 `tf.contrib.summary.summary_writer_initializer_op` and 

462 `tf.contrib.summary.graph`. 

463 

464 Most users will also want to call `tf.compat.v1.train.create_global_step` 

465 which can happen before or after this function is called. 

466 

467 Args: 

468 graph: A `tf.Graph` or `tf.compat.v1.GraphDef` to output to the writer. 

469 This function will not write the default graph by default. When 

470 writing to an event log file, the associated step will be zero. 

471 session: So this method can call `tf.Session.run`. This defaults 

472 to `tf.compat.v1.get_default_session`. 

473 

474 Raises: 

475 RuntimeError: If the current thread has no default 

476 `tf.contrib.summary.SummaryWriter`. 

477 ValueError: If session wasn't passed and no default session. 

478 """ 

479 if context.executing_eagerly(): 

480 return 

481 if _summary_state.writer is None: 

482 raise RuntimeError("No default tf.contrib.summary.SummaryWriter found") 

483 if session is None: 

484 session = ops.get_default_session() 

485 if session is None: 

486 raise ValueError("Argument `session must be passed if no default " 

487 "session exists") 

488 session.run(summary_writer_initializer_op()) 

489 if graph is not None: 

490 data = _serialize_graph(graph) 

491 x = array_ops.placeholder(dtypes.string) 

492 session.run(graph_v1(x, 0), feed_dict={x: data}) 

493 

494 

495@tf_export("summary.create_file_writer", v1=[]) 

496def create_file_writer_v2(logdir, 

497 max_queue=None, 

498 flush_millis=None, 

499 filename_suffix=None, 

500 name=None, 

501 experimental_trackable=False): 

502 """Creates a summary file writer for the given log directory. 

503 

504 Args: 

505 logdir: a string specifying the directory in which to write an event file. 

506 max_queue: the largest number of summaries to keep in a queue; will 

507 flush once the queue gets bigger than this. Defaults to 10. 

508 flush_millis: the largest interval between flushes. Defaults to 120,000. 

509 filename_suffix: optional suffix for the event file name. Defaults to `.v2`. 

510 name: a name for the op that creates the writer. 

511 experimental_trackable: a boolean that controls whether the returned writer 

512 will be a `TrackableResource`, which makes it compatible with SavedModel 

513 when used as a `tf.Module` property. 

514 

515 Returns: 

516 A SummaryWriter object. 

517 """ 

518 if logdir is None: 

519 raise ValueError("Argument `logdir` cannot be None") 

520 inside_function = ops.inside_function() 

521 with ops.name_scope(name, "create_file_writer") as scope, ops.device("cpu:0"): 

522 # Run init inside an init_scope() to hoist it out of tf.functions. 

523 with ops.init_scope(): 

524 if context.executing_eagerly(): 

525 _check_create_file_writer_args( 

526 inside_function, 

527 logdir=logdir, 

528 max_queue=max_queue, 

529 flush_millis=flush_millis, 

530 filename_suffix=filename_suffix) 

531 logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string) 

532 if max_queue is None: 

533 max_queue = constant_op.constant(10) 

534 if flush_millis is None: 

535 flush_millis = constant_op.constant(2 * 60 * 1000) 

536 if filename_suffix is None: 

537 filename_suffix = constant_op.constant(".v2") 

538 

539 def create_fn(): 

540 # Use unique shared_name to prevent resource sharing in eager mode, but 

541 # otherwise use a fixed shared_name to allow SavedModel TF 1.x loading. 

542 if context.executing_eagerly(): 

543 shared_name = context.anonymous_name() 

544 else: 

545 shared_name = ops.name_from_scope_name(scope) # pylint: disable=protected-access 

546 return gen_summary_ops.summary_writer( 

547 shared_name=shared_name, name=name) 

548 

549 init_op_fn = functools.partial( 

550 gen_summary_ops.create_summary_file_writer, 

551 logdir=logdir, 

552 max_queue=max_queue, 

553 flush_millis=flush_millis, 

554 filename_suffix=filename_suffix) 

555 if experimental_trackable: 

556 return _TrackableResourceSummaryWriter( 

557 create_fn=create_fn, init_op_fn=init_op_fn) 

558 else: 

559 return _ResourceSummaryWriter( 

560 create_fn=create_fn, init_op_fn=init_op_fn) 

561 

562 

563def create_file_writer(logdir, 

564 max_queue=None, 

565 flush_millis=None, 

566 filename_suffix=None, 

567 name=None): 

568 """Creates a summary file writer in the current context under the given name. 

569 

570 Args: 

571 logdir: a string, or None. If a string, creates a summary file writer 

572 which writes to the directory named by the string. If None, returns 

573 a mock object which acts like a summary writer but does nothing, 

574 useful to use as a context manager. 

575 max_queue: the largest number of summaries to keep in a queue; will 

576 flush once the queue gets bigger than this. Defaults to 10. 

577 flush_millis: the largest interval between flushes. Defaults to 120,000. 

578 filename_suffix: optional suffix for the event file name. Defaults to `.v2`. 

579 name: Shared name for this SummaryWriter resource stored to default 

580 Graph. Defaults to the provided logdir prefixed with `logdir:`. Note: if a 

581 summary writer resource with this shared name already exists, the returned 

582 SummaryWriter wraps that resource and the other arguments have no effect. 

583 

584 Returns: 

585 Either a summary writer or an empty object which can be used as a 

586 summary writer. 

587 """ 

588 if logdir is None: 

589 return _NoopSummaryWriter() 

590 logdir = str(logdir) 

591 with ops.device("cpu:0"): 

592 if max_queue is None: 

593 max_queue = constant_op.constant(10) 

594 if flush_millis is None: 

595 flush_millis = constant_op.constant(2 * 60 * 1000) 

596 if filename_suffix is None: 

597 filename_suffix = constant_op.constant(".v2") 

598 if name is None: 

599 name = "logdir:" + logdir 

600 resource = gen_summary_ops.summary_writer(shared_name=name) 

601 return _LegacyResourceSummaryWriter( 

602 resource=resource, 

603 init_op_fn=functools.partial( 

604 gen_summary_ops.create_summary_file_writer, 

605 logdir=logdir, 

606 max_queue=max_queue, 

607 flush_millis=flush_millis, 

608 filename_suffix=filename_suffix)) 

609 

610 

611@tf_export("summary.create_noop_writer", v1=[]) 

612def create_noop_writer(): 

613 """Returns a summary writer that does nothing. 

614 

615 This is useful as a placeholder in code that expects a context manager. 

616 """ 

617 return _NoopSummaryWriter() 

618 

619 

620def _cleanse_string(name, pattern, value): 

621 if isinstance(value, str) and pattern.search(value) is None: 

622 raise ValueError(f"{name} ({value}) must match {pattern.pattern}") 

623 return ops.convert_to_tensor(value, dtypes.string) 

624 

625 

626def _nothing(): 

627 """Convenient else branch for when summaries do not record.""" 

628 return constant_op.constant(False) 

629 

630 

631@tf_export(v1=["summary.all_v2_summary_ops"]) 

632def all_v2_summary_ops(): 

633 """Returns all V2-style summary ops defined in the current default graph. 

634 

635 This includes ops from TF 2.0 tf.summary and TF 1.x tf.contrib.summary (except 

636 for `tf.contrib.summary.graph` and `tf.contrib.summary.import_event`), but 

637 does *not* include TF 1.x tf.summary ops. 

638 

639 Returns: 

640 List of summary ops, or None if called under eager execution. 

641 """ 

642 if context.executing_eagerly(): 

643 return None 

644 return ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access 

645 

646 

647def summary_writer_initializer_op(): 

648 """Graph-mode only. Returns the list of ops to create all summary writers. 

649 

650 Returns: 

651 The initializer ops. 

652 

653 Raises: 

654 RuntimeError: If in Eager mode. 

655 """ 

656 if context.executing_eagerly(): 

657 raise RuntimeError( 

658 "tf.contrib.summary.summary_writer_initializer_op is only " 

659 "supported in graph mode.") 

660 return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME) 

661 

662 

663_INVALID_SCOPE_CHARACTERS = re.compile(r"[^-_/.A-Za-z0-9]") 

664 

665 

666@tf_export("summary.experimental.summary_scope", v1=[]) 

667@tf_contextlib.contextmanager 

668def summary_scope(name, default_name="summary", values=None): 

669 """Experimental context manager for use when defining a custom summary op. 

670 

671 This behaves similarly to `tf.name_scope`, except that it returns a generated 

672 summary tag in addition to the scope name. The tag is structurally similar to 

673 the scope name - derived from the user-provided name, prefixed with enclosing 

674 name scopes if any - but we relax the constraint that it be uniquified, as 

675 well as the character set limitation (so the user-provided name can contain 

676 characters not legal for scope names; in the scope name these are removed). 

677 

678 This makes the summary tag more predictable and consistent for the user. 

679 

680 For example, to define a new summary op called `my_op`: 

681 

682 ```python 

683 def my_op(name, my_value, step): 

684 with tf.summary.summary_scope(name, "MyOp", [my_value]) as (tag, scope): 

685 my_value = tf.convert_to_tensor(my_value) 

686 return tf.summary.write(tag, my_value, step=step) 

687 ``` 

688 

689 Args: 

690 name: string name for the summary. 

691 default_name: Optional; if provided, used as default name of the summary. 

692 values: Optional; passed as `values` parameter to name_scope. 

693 

694 Yields: 

695 A tuple `(tag, scope)` as described above. 

696 """ 

697 name = name or default_name 

698 current_scope = ops.get_name_scope() 

699 tag = current_scope + "/" + name if current_scope else name 

700 # Strip illegal characters from the scope name, and if that leaves nothing, 

701 # use None instead so we pick up the default name. 

702 name = _INVALID_SCOPE_CHARACTERS.sub("", name) or None 

703 with ops.name_scope(name, default_name, values, skip_on_eager=False) as scope: 

704 yield tag, scope 

705 

706 

707@tf_export("summary.write", v1=[]) 

708def write(tag, tensor, step=None, metadata=None, name=None): 

709 """Writes a generic summary to the default SummaryWriter if one exists. 

710 

711 This exists primarily to support the definition of type-specific summary ops 

712 like scalar() and image(), and is not intended for direct use unless defining 

713 a new type-specific summary op. 

714 

715 Args: 

716 tag: string tag used to identify the summary (e.g. in TensorBoard), usually 

717 generated with `tf.summary.summary_scope` 

718 tensor: the Tensor holding the summary data to write or a callable that 

719 returns this Tensor. If a callable is passed, it will only be called when 

720 a default SummaryWriter exists and the recording condition specified by 

721 `record_if()` is met. 

722 step: Explicit `int64`-castable monotonic step value for this summary. If 

723 omitted, this defaults to `tf.summary.experimental.get_step()`, which must 

724 not be None. 

725 metadata: Optional SummaryMetadata, as a proto or serialized bytes 

726 name: Optional string name for this op. 

727 

728 Returns: 

729 True on success, or false if no summary was written because no default 

730 summary writer was available. 

731 

732 Raises: 

733 ValueError: if a default writer exists, but no step was provided and 

734 `tf.summary.experimental.get_step()` is None. 

735 """ 

736 with ops.name_scope(name, "write_summary") as scope: 

737 if _summary_state.writer is None: 

738 return constant_op.constant(False) 

739 if step is None: 

740 step = get_step() 

741 if metadata is None: 

742 serialized_metadata = b"" 

743 elif hasattr(metadata, "SerializeToString"): 

744 serialized_metadata = metadata.SerializeToString() 

745 else: 

746 serialized_metadata = metadata 

747 

748 def record(): 

749 """Record the actual summary and return True.""" 

750 if step is None: 

751 raise ValueError("No step set. Please specify one either through the " 

752 "`step` argument or through " 

753 "tf.summary.experimental.set_step()") 

754 

755 # Note the identity to move the tensor to the CPU. 

756 with ops.device("cpu:0"): 

757 summary_tensor = tensor() if callable(tensor) else array_ops.identity( 

758 tensor) 

759 write_summary_op = gen_summary_ops.write_summary( 

760 _summary_state.writer._resource, # pylint: disable=protected-access 

761 step, 

762 summary_tensor, 

763 tag, 

764 serialized_metadata, 

765 name=scope) 

766 with ops.control_dependencies([write_summary_op]): 

767 return constant_op.constant(True) 

768 

769 op = smart_cond.smart_cond( 

770 should_record_summaries(), record, _nothing, name="summary_cond") 

771 if not context.executing_eagerly(): 

772 ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access 

773 return op 

774 

775 

776@tf_export("summary.experimental.write_raw_pb", v1=[]) 

777def write_raw_pb(tensor, step=None, name=None): 

778 """Writes a summary using raw `tf.compat.v1.Summary` protocol buffers. 

779 

780 Experimental: this exists to support the usage of V1-style manual summary 

781 writing (via the construction of a `tf.compat.v1.Summary` protocol buffer) 

782 with the V2 summary writing API. 

783 

784 Args: 

785 tensor: the string Tensor holding one or more serialized `Summary` protobufs 

786 step: Explicit `int64`-castable monotonic step value for this summary. If 

787 omitted, this defaults to `tf.summary.experimental.get_step()`, which must 

788 not be None. 

789 name: Optional string name for this op. 

790 

791 Returns: 

792 True on success, or false if no summary was written because no default 

793 summary writer was available. 

794 

795 Raises: 

796 ValueError: if a default writer exists, but no step was provided and 

797 `tf.summary.experimental.get_step()` is None. 

798 """ 

799 with ops.name_scope(name, "write_raw_pb") as scope: 

800 if _summary_state.writer is None: 

801 return constant_op.constant(False) 

802 if step is None: 

803 step = get_step() 

804 if step is None: 

805 raise ValueError("No step set. Please specify one either through the " 

806 "`step` argument or through " 

807 "tf.summary.experimental.set_step()") 

808 

809 def record(): 

810 """Record the actual summary and return True.""" 

811 # Note the identity to move the tensor to the CPU. 

812 with ops.device("cpu:0"): 

813 raw_summary_op = gen_summary_ops.write_raw_proto_summary( 

814 _summary_state.writer._resource, # pylint: disable=protected-access 

815 step, 

816 array_ops.identity(tensor), 

817 name=scope) 

818 with ops.control_dependencies([raw_summary_op]): 

819 return constant_op.constant(True) 

820 

821 with ops.device("cpu:0"): 

822 op = smart_cond.smart_cond( 

823 should_record_summaries(), record, _nothing, name="summary_cond") 

824 if not context.executing_eagerly(): 

825 ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access 

826 return op 

827 

828 

829def summary_writer_function(name, tensor, function, family=None): 

830 """Helper function to write summaries. 

831 

832 Args: 

833 name: name of the summary 

834 tensor: main tensor to form the summary 

835 function: function taking a tag and a scope which writes the summary 

836 family: optional, the summary's family 

837 

838 Returns: 

839 The result of writing the summary. 

840 """ 

841 name_scope = ops.get_name_scope() 

842 if name_scope: 

843 # Add a slash to allow reentering the name scope. 

844 name_scope += "/" 

845 def record(): 

846 with ops.name_scope(name_scope), summary_op_util.summary_scope( 

847 name, family, values=[tensor]) as (tag, scope): 

848 with ops.control_dependencies([function(tag, scope)]): 

849 return constant_op.constant(True) 

850 

851 if _summary_state.writer is None: 

852 return control_flow_ops.no_op() 

853 with ops.device("cpu:0"): 

854 op = smart_cond.smart_cond( 

855 _legacy_contrib_should_record_summaries(), record, _nothing, name="") 

856 if not context.executing_eagerly(): 

857 ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access 

858 return op 

859 

860 

861def generic(name, tensor, metadata=None, family=None, step=None): 

862 """Writes a tensor summary if possible.""" 

863 

864 def function(tag, scope): 

865 if metadata is None: 

866 serialized_metadata = constant_op.constant("") 

867 elif hasattr(metadata, "SerializeToString"): 

868 serialized_metadata = constant_op.constant(metadata.SerializeToString()) 

869 else: 

870 serialized_metadata = metadata 

871 # Note the identity to move the tensor to the CPU. 

872 return gen_summary_ops.write_summary( 

873 _summary_state.writer._resource, # pylint: disable=protected-access 

874 _choose_step(step), 

875 array_ops.identity(tensor), 

876 tag, 

877 serialized_metadata, 

878 name=scope) 

879 return summary_writer_function(name, tensor, function, family=family) 

880 

881 

882def scalar(name, tensor, family=None, step=None): 

883 """Writes a scalar summary if possible. 

884 

885 Unlike `tf.contrib.summary.generic` this op may change the dtype 

886 depending on the writer, for both practical and efficiency concerns. 

887 

888 Args: 

889 name: An arbitrary name for this summary. 

890 tensor: A `tf.Tensor` Must be one of the following types: 

891 `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, 

892 `int8`, `uint16`, `half`, `uint32`, `uint64`. 

893 family: Optional, the summary's family. 

894 step: The `int64` monotonic step variable, which defaults 

895 to `tf.compat.v1.train.get_global_step`. 

896 

897 Returns: 

898 The created `tf.Operation` or a `tf.no_op` if summary writing has 

899 not been enabled for this context. 

900 """ 

901 

902 def function(tag, scope): 

903 # Note the identity to move the tensor to the CPU. 

904 return gen_summary_ops.write_scalar_summary( 

905 _summary_state.writer._resource, # pylint: disable=protected-access 

906 _choose_step(step), 

907 tag, 

908 array_ops.identity(tensor), 

909 name=scope) 

910 

911 return summary_writer_function(name, tensor, function, family=family) 

912 

913 

914def histogram(name, tensor, family=None, step=None): 

915 """Writes a histogram summary if possible.""" 

916 

917 def function(tag, scope): 

918 # Note the identity to move the tensor to the CPU. 

919 return gen_summary_ops.write_histogram_summary( 

920 _summary_state.writer._resource, # pylint: disable=protected-access 

921 _choose_step(step), 

922 tag, 

923 array_ops.identity(tensor), 

924 name=scope) 

925 

926 return summary_writer_function(name, tensor, function, family=family) 

927 

928 

929def image(name, tensor, bad_color=None, max_images=3, family=None, step=None): 

930 """Writes an image summary if possible.""" 

931 

932 def function(tag, scope): 

933 bad_color_ = (constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8) 

934 if bad_color is None else bad_color) 

935 # Note the identity to move the tensor to the CPU. 

936 return gen_summary_ops.write_image_summary( 

937 _summary_state.writer._resource, # pylint: disable=protected-access 

938 _choose_step(step), 

939 tag, 

940 array_ops.identity(tensor), 

941 bad_color_, 

942 max_images, 

943 name=scope) 

944 

945 return summary_writer_function(name, tensor, function, family=family) 

946 

947 

948def audio(name, tensor, sample_rate, max_outputs, family=None, step=None): 

949 """Writes an audio summary if possible.""" 

950 

951 def function(tag, scope): 

952 # Note the identity to move the tensor to the CPU. 

953 return gen_summary_ops.write_audio_summary( 

954 _summary_state.writer._resource, # pylint: disable=protected-access 

955 _choose_step(step), 

956 tag, 

957 array_ops.identity(tensor), 

958 sample_rate=sample_rate, 

959 max_outputs=max_outputs, 

960 name=scope) 

961 

962 return summary_writer_function(name, tensor, function, family=family) 

963 

964 

965def graph_v1(param, step=None, name=None): 

966 """Writes a TensorFlow graph to the summary interface. 

967 

968 The graph summary is, strictly speaking, not a summary. Conditions 

969 like `tf.summary.should_record_summaries` do not apply. Only 

970 a single graph can be associated with a particular run. If multiple 

971 graphs are written, then only the last one will be considered by 

972 TensorBoard. 

973 

974 When not using eager execution mode, the user should consider passing 

975 the `graph` parameter to `tf.compat.v1.summary.initialize` instead of 

976 calling this function. Otherwise special care needs to be taken when 

977 using the graph to record the graph. 

978 

979 Args: 

980 param: A `tf.Tensor` containing a serialized graph proto. When 

981 eager execution is enabled, this function will automatically 

982 coerce `tf.Graph`, `tf.compat.v1.GraphDef`, and string types. 

983 step: The global step variable. This doesn't have useful semantics 

984 for graph summaries, but is used anyway, due to the structure of 

985 event log files. This defaults to the global step. 

986 name: A name for the operation (optional). 

987 

988 Returns: 

989 The created `tf.Operation` or a `tf.no_op` if summary writing has 

990 not been enabled for this context. 

991 

992 Raises: 

993 TypeError: If `param` isn't already a `tf.Tensor` in graph mode. 

994 """ 

995 if not context.executing_eagerly() and not isinstance(param, ops.Tensor): 

996 raise TypeError("graph() needs a argument `param` to be tf.Tensor " 

997 "(e.g. tf.placeholder) in graph mode, but received " 

998 f"param={param} of type {type(param).__name__}.") 

999 writer = _summary_state.writer 

1000 if writer is None: 

1001 return control_flow_ops.no_op() 

1002 with ops.device("cpu:0"): 

1003 if isinstance(param, (ops.Graph, graph_pb2.GraphDef)): 

1004 tensor = ops.convert_to_tensor(_serialize_graph(param), dtypes.string) 

1005 else: 

1006 tensor = array_ops.identity(param) 

1007 return gen_summary_ops.write_graph_summary( 

1008 writer._resource, _choose_step(step), tensor, name=name) # pylint: disable=protected-access 

1009 

1010 

1011@tf_export("summary.graph", v1=[]) 

1012def graph(graph_data): 

1013 """Writes a TensorFlow graph summary. 

1014 

1015 Write an instance of `tf.Graph` or `tf.compat.v1.GraphDef` as summary only 

1016 in an eager mode. Please prefer to use the trace APIs (`tf.summary.trace_on`, 

1017 `tf.summary.trace_off`, and `tf.summary.trace_export`) when using 

1018 `tf.function` which can automatically collect and record graphs from 

1019 executions. 

1020 

1021 Usage Example: 

1022 ```py 

1023 writer = tf.summary.create_file_writer("/tmp/mylogs") 

1024 

1025 @tf.function 

1026 def f(): 

1027 x = constant_op.constant(2) 

1028 y = constant_op.constant(3) 

1029 return x**y 

1030 

1031 with writer.as_default(): 

1032 tf.summary.graph(f.get_concrete_function().graph) 

1033 

1034 # Another example: in a very rare use case, when you are dealing with a TF v1 

1035 # graph. 

1036 graph = tf.Graph() 

1037 with graph.as_default(): 

1038 c = tf.constant(30.0) 

1039 with writer.as_default(): 

1040 tf.summary.graph(graph) 

1041 ``` 

1042 

1043 Args: 

1044 graph_data: The TensorFlow graph to write, as a `tf.Graph` or a 

1045 `tf.compat.v1.GraphDef`. 

1046 

1047 Returns: 

1048 True on success, or False if no summary was written because no default 

1049 summary writer was available. 

1050 

1051 Raises: 

1052 ValueError: `graph` summary API is invoked in a graph mode. 

1053 """ 

1054 if not context.executing_eagerly(): 

1055 raise ValueError("graph() cannot be invoked inside a graph context.") 

1056 writer = _summary_state.writer 

1057 if writer is None: 

1058 return constant_op.constant(False) 

1059 with ops.device("cpu:0"): 

1060 if not should_record_summaries(): 

1061 return constant_op.constant(False) 

1062 

1063 if isinstance(graph_data, (ops.Graph, graph_pb2.GraphDef)): 

1064 tensor = ops.convert_to_tensor( 

1065 _serialize_graph(graph_data), dtypes.string) 

1066 else: 

1067 raise ValueError("Argument 'graph_data' is not tf.Graph or " 

1068 "tf.compat.v1.GraphDef. Received graph_data=" 

1069 f"{graph_data} of type {type(graph_data).__name__}.") 

1070 

1071 gen_summary_ops.write_graph_summary( 

1072 writer._resource, # pylint: disable=protected-access 

1073 # Graph does not have step. Set to 0. 

1074 0, 

1075 tensor, 

1076 ) 

1077 return constant_op.constant(True) 

1078 

1079 

1080def import_event(tensor, name=None): 

1081 """Writes a `tf.compat.v1.Event` binary proto. 

1082 

1083 This can be used to import existing event logs into a new summary writer sink. 

1084 Please note that this is lower level than the other summary functions and 

1085 will ignore the `tf.summary.should_record_summaries` setting. 

1086 

1087 Args: 

1088 tensor: A `tf.Tensor` of type `string` containing a serialized 

1089 `tf.compat.v1.Event` proto. 

1090 name: A name for the operation (optional). 

1091 

1092 Returns: 

1093 The created `tf.Operation`. 

1094 """ 

1095 return gen_summary_ops.import_event( 

1096 _summary_state.writer._resource, tensor, name=name) # pylint: disable=protected-access 

1097 

1098 

1099@tf_export("summary.flush", v1=[]) 

1100def flush(writer=None, name=None): 

1101 """Forces summary writer to send any buffered data to storage. 

1102 

1103 This operation blocks until that finishes. 

1104 

1105 Args: 

1106 writer: The `tf.summary.SummaryWriter` to flush. If None, the current 

1107 default writer will be used instead; if there is no current writer, this 

1108 returns `tf.no_op`. 

1109 name: Ignored legacy argument for a name for the operation. 

1110 

1111 Returns: 

1112 The created `tf.Operation`. 

1113 """ 

1114 del name # unused 

1115 if writer is None: 

1116 writer = _summary_state.writer 

1117 if writer is None: 

1118 return control_flow_ops.no_op() 

1119 if isinstance(writer, SummaryWriter): 

1120 return writer.flush() 

1121 raise ValueError("Invalid argument to flush(): %r" % (writer,)) 

1122 

1123 

1124def legacy_raw_flush(writer=None, name=None): 

1125 """Legacy version of flush() that accepts a raw resource tensor for `writer`. 

1126 

1127 Do not use this function in any new code. Not supported and not part of the 

1128 public TF APIs. 

1129 

1130 Args: 

1131 writer: The `tf.summary.SummaryWriter` to flush. If None, the current 

1132 default writer will be used instead; if there is no current writer, this 

1133 returns `tf.no_op`. For this legacy version only, also accepts a raw 

1134 resource tensor pointing to the underlying C++ writer resource. 

1135 name: Ignored legacy argument for a name for the operation. 

1136 

1137 Returns: 

1138 The created `tf.Operation`. 

1139 """ 

1140 if writer is None or isinstance(writer, SummaryWriter): 

1141 # Forward to the TF2 implementation of flush() when possible. 

1142 return flush(writer, name) 

1143 else: 

1144 # Legacy fallback in case we were passed a raw resource tensor. 

1145 with ops.device("cpu:0"): 

1146 return gen_summary_ops.flush_summary_writer(writer, name=name) 

1147 

1148 

1149def eval_dir(model_dir, name=None): 

1150 """Construct a logdir for an eval summary writer.""" 

1151 return os.path.join(model_dir, "eval" if not name else "eval_" + name) 

1152 

1153 

1154@deprecation.deprecated(date=None, 

1155 instructions="Renamed to create_file_writer().") 

1156def create_summary_file_writer(*args, **kwargs): 

1157 """Please use `tf.contrib.summary.create_file_writer`.""" 

1158 logging.warning("Deprecation Warning: create_summary_file_writer was renamed " 

1159 "to create_file_writer") 

1160 return create_file_writer(*args, **kwargs) 

1161 

1162 

1163def _serialize_graph(arbitrary_graph): 

1164 if isinstance(arbitrary_graph, ops.Graph): 

1165 return arbitrary_graph.as_graph_def(add_shapes=True).SerializeToString() 

1166 else: 

1167 return arbitrary_graph.SerializeToString() 

1168 

1169 

1170def _choose_step(step): 

1171 if step is None: 

1172 return training_util.get_or_create_global_step() 

1173 if not isinstance(step, ops.Tensor): 

1174 return ops.convert_to_tensor(step, dtypes.int64) 

1175 return step 

1176 

1177 

1178def _check_create_file_writer_args(inside_function, **kwargs): 

1179 """Helper to check the validity of arguments to a create_file_writer() call. 

1180 

1181 Args: 

1182 inside_function: whether the create_file_writer() call is in a tf.function 

1183 **kwargs: the arguments to check, as kwargs to give them names. 

1184 

1185 Raises: 

1186 ValueError: if the arguments are graph tensors. 

1187 """ 

1188 for arg_name, arg in kwargs.items(): 

1189 if not isinstance(arg, ops.EagerTensor) and tensor_util.is_tf_type(arg): 

1190 if inside_function: 

1191 raise ValueError( 

1192 f"Invalid graph Tensor argument '{arg_name}={arg}' to " 

1193 "create_file_writer() inside an @tf.function. The create call will " 

1194 "be lifted into the outer eager execution context, so it cannot " 

1195 "consume graph tensors defined inside the function body.") 

1196 else: 

1197 raise ValueError( 

1198 f"Invalid graph Tensor argument '{arg_name}={arg}' to eagerly " 

1199 "executed create_file_writer().") 

1200 

1201 

1202def run_metadata(name, data, step=None): 

1203 """Writes entire RunMetadata summary. 

1204 

1205 A RunMetadata can contain DeviceStats, partition graphs, and function graphs. 

1206 Please refer to the proto for definition of each field. 

1207 

1208 Args: 

1209 name: A name for this summary. The summary tag used for TensorBoard will be 

1210 this name prefixed by any active name scopes. 

1211 data: A RunMetadata proto to write. 

1212 step: Explicit `int64`-castable monotonic step value for this summary. If 

1213 omitted, this defaults to `tf.summary.experimental.get_step()`, which must 

1214 not be None. 

1215 

1216 Returns: 

1217 True on success, or false if no summary was written because no default 

1218 summary writer was available. 

1219 

1220 Raises: 

1221 ValueError: if a default writer exists, but no step was provided and 

1222 `tf.summary.experimental.get_step()` is None. 

1223 """ 

1224 summary_metadata = summary_pb2.SummaryMetadata() 

1225 # Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for 

1226 # the rationale. 

1227 summary_metadata.plugin_data.plugin_name = "graph_run_metadata" 

1228 # version number = 1 

1229 summary_metadata.plugin_data.content = b"1" 

1230 

1231 with summary_scope(name, 

1232 "graph_run_metadata_summary", 

1233 [data, step]) as (tag, _): 

1234 with ops.device("cpu:0"): 

1235 tensor = constant_op.constant(data.SerializeToString(), 

1236 dtype=dtypes.string) 

1237 return write( 

1238 tag=tag, 

1239 tensor=tensor, 

1240 step=step, 

1241 metadata=summary_metadata) 

1242 

1243 

1244def run_metadata_graphs(name, data, step=None): 

1245 """Writes graphs from a RunMetadata summary. 

1246 

1247 Args: 

1248 name: A name for this summary. The summary tag used for TensorBoard will be 

1249 this name prefixed by any active name scopes. 

1250 data: A RunMetadata proto to write. 

1251 step: Explicit `int64`-castable monotonic step value for this summary. If 

1252 omitted, this defaults to `tf.summary.experimental.get_step()`, which must 

1253 not be None. 

1254 

1255 Returns: 

1256 True on success, or false if no summary was written because no default 

1257 summary writer was available. 

1258 

1259 Raises: 

1260 ValueError: if a default writer exists, but no step was provided and 

1261 `tf.summary.experimental.get_step()` is None. 

1262 """ 

1263 summary_metadata = summary_pb2.SummaryMetadata() 

1264 # Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for 

1265 # the rationale. 

1266 summary_metadata.plugin_data.plugin_name = "graph_run_metadata_graph" 

1267 # version number = 1 

1268 summary_metadata.plugin_data.content = b"1" 

1269 

1270 data = config_pb2.RunMetadata( 

1271 function_graphs=data.function_graphs, 

1272 partition_graphs=data.partition_graphs) 

1273 

1274 with summary_scope(name, 

1275 "graph_run_metadata_graph_summary", 

1276 [data, step]) as (tag, _): 

1277 with ops.device("cpu:0"): 

1278 tensor = constant_op.constant(data.SerializeToString(), 

1279 dtype=dtypes.string) 

1280 return write( 

1281 tag=tag, 

1282 tensor=tensor, 

1283 step=step, 

1284 metadata=summary_metadata) 

1285 

1286 

1287_TraceContext = collections.namedtuple("TraceContext", ("graph", "profiler")) 

1288_current_trace_context_lock = threading.Lock() 

1289_current_trace_context = None 

1290 

1291 

1292@tf_export("summary.trace_on", v1=[]) 

1293def trace_on(graph=True, profiler=False): # pylint: disable=redefined-outer-name 

1294 """Starts a trace to record computation graphs and profiling information. 

1295 

1296 Must be invoked in eager mode. 

1297 

1298 When enabled, TensorFlow runtime will collect information that can later be 

1299 exported and consumed by TensorBoard. The trace is activated across the entire 

1300 TensorFlow runtime and affects all threads of execution. 

1301 

1302 To stop the trace and export the collected information, use 

1303 `tf.summary.trace_export`. To stop the trace without exporting, use 

1304 `tf.summary.trace_off`. 

1305 

1306 Args: 

1307 graph: If True, enables collection of executed graphs. It includes ones from 

1308 tf.function invocation and ones from the legacy graph mode. The default 

1309 is True. 

1310 profiler: If True, enables the advanced profiler. Enabling profiler 

1311 implicitly enables the graph collection. The profiler may incur a high 

1312 memory overhead. The default is False. 

1313 

1314 """ 

1315 if ops.inside_function(): 

1316 logging.warn("Cannot enable trace inside a tf.function.") 

1317 return 

1318 if not context.executing_eagerly(): 

1319 logging.warn("Must enable trace in eager mode.") 

1320 return 

1321 

1322 global _current_trace_context 

1323 with _current_trace_context_lock: 

1324 if _current_trace_context: 

1325 logging.warn("Trace already enabled") 

1326 return 

1327 

1328 if graph and not profiler: 

1329 context.context().enable_graph_collection() 

1330 if profiler: 

1331 context.context().enable_run_metadata() 

1332 _profiler.start() 

1333 

1334 _current_trace_context = _TraceContext(graph=graph, profiler=profiler) 

1335 

1336 

1337@tf_export("summary.trace_export", v1=[]) 

1338def trace_export(name, step=None, profiler_outdir=None): 

1339 """Stops and exports the active trace as a Summary and/or profile file. 

1340 

1341 Stops the trace and exports all metadata collected during the trace to the 

1342 default SummaryWriter, if one has been set. 

1343 

1344 Args: 

1345 name: A name for the summary to be written. 

1346 step: Explicit `int64`-castable monotonic step value for this summary. If 

1347 omitted, this defaults to `tf.summary.experimental.get_step()`, which must 

1348 not be None. 

1349 profiler_outdir: Output directory for profiler. It is required when profiler 

1350 is enabled when trace was started. Otherwise, it is ignored. 

1351 

1352 Raises: 

1353 ValueError: if a default writer exists, but no step was provided and 

1354 `tf.summary.experimental.get_step()` is None. 

1355 """ 

1356 # TODO(stephanlee): See if we can remove profiler_outdir and infer it from 

1357 # the SummaryWriter's logdir. 

1358 global _current_trace_context 

1359 

1360 if ops.inside_function(): 

1361 logging.warn("Cannot export trace inside a tf.function.") 

1362 return 

1363 if not context.executing_eagerly(): 

1364 logging.warn("Can only export trace while executing eagerly.") 

1365 return 

1366 

1367 with _current_trace_context_lock: 

1368 if _current_trace_context is None: 

1369 raise ValueError("Must enable trace before export through " 

1370 "tf.summary.trace_on.") 

1371 graph, profiler = _current_trace_context # pylint: disable=redefined-outer-name 

1372 if profiler and profiler_outdir is None: 

1373 raise ValueError("Argument `profiler_outdir` is not specified.") 

1374 

1375 run_meta = context.context().export_run_metadata() 

1376 

1377 if graph and not profiler: 

1378 run_metadata_graphs(name, run_meta, step) 

1379 else: 

1380 run_metadata(name, run_meta, step) 

1381 

1382 if profiler: 

1383 _profiler.save(profiler_outdir, _profiler.stop()) 

1384 

1385 trace_off() 

1386 

1387 

1388@tf_export("summary.trace_off", v1=[]) 

1389def trace_off(): 

1390 """Stops the current trace and discards any collected information.""" 

1391 global _current_trace_context 

1392 with _current_trace_context_lock: 

1393 if _current_trace_context is None: 

1394 return # tracing already off 

1395 graph, profiler = _current_trace_context # pylint: disable=redefined-outer-name, unpacking-non-sequence 

1396 _current_trace_context = None 

1397 

1398 if graph: 

1399 # Disabling run_metadata disables graph collection as well. 

1400 context.context().disable_run_metadata() 

1401 

1402 if profiler: 

1403 try: 

1404 _profiler.stop() 

1405 except _profiler.ProfilerNotRunningError: 

1406 pass