Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/profiler/profiler_v2.py: 44%
55 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""TensorFlow 2.x Profiler.
17The profiler has two modes:
18- Programmatic Mode: start(logdir), stop(), and Profiler class. Profiling starts
19 when calling start(logdir) or create a Profiler class.
20 Profiling stops when calling stop() to save to
21 TensorBoard logdir or destroying the Profiler class.
22- Sampling Mode: start_server(). It will perform profiling after receiving a
23 profiling request.
25NOTE: Only one active profiler session is allowed. Use of simultaneous
26Programmatic Mode and Sampling Mode is undefined and will likely fail.
28NOTE: The Keras TensorBoard callback will automatically perform sampled
29profiling. Before enabling customized profiling, set the callback flag
30"profile_batches=[]" to disable automatic sampled profiling.
31"""
33import collections
34import threading
36from tensorflow.python.framework import errors
37from tensorflow.python.platform import tf_logging as logging
38from tensorflow.python.profiler.internal import _pywrap_profiler
39from tensorflow.python.util.tf_export import tf_export
41_profiler = None
42_profiler_lock = threading.Lock()
45@tf_export('profiler.experimental.ProfilerOptions', v1=[])
46class ProfilerOptions(
47 collections.namedtuple('ProfilerOptions', [
48 'host_tracer_level', 'python_tracer_level', 'device_tracer_level',
49 'delay_ms'
50 ])):
51 """Options for finer control over the profiler.
53 Use `tf.profiler.experimental.ProfilerOptions` to control `tf.profiler`
54 behavior.
56 Fields:
57 host_tracer_level: Adjust CPU tracing level. Values are: `1` - critical info
58 only, `2` - info, `3` - verbose. [default value is `2`]
59 python_tracer_level: Toggle tracing of Python function calls. Values are:
60 `1` - enabled, `0` - disabled [default value is `0`]
61 device_tracer_level: Adjust device (TPU/GPU) tracing level. Values are:
62 `1` - enabled, `0` - disabled [default value is `1`]
63 delay_ms: Requests for all hosts to start profiling at a timestamp that is
64 `delay_ms` away from the current time. `delay_ms` is in milliseconds. If
65 zero, each host will start profiling immediately upon receiving the
66 request. Default value is `None`, allowing the profiler guess the best
67 value.
68 """
70 def __new__(cls,
71 host_tracer_level=2,
72 python_tracer_level=0,
73 device_tracer_level=1,
74 delay_ms=None):
75 return super(ProfilerOptions,
76 cls).__new__(cls, host_tracer_level, python_tracer_level,
77 device_tracer_level, delay_ms)
80@tf_export('profiler.experimental.start', v1=[])
81def start(logdir, options=None):
82 """Start profiling TensorFlow performance.
84 Args:
85 logdir: Profiling results log directory.
86 options: `ProfilerOptions` namedtuple to specify miscellaneous profiler
87 options. See example usage below.
89 Raises:
90 AlreadyExistsError: If a profiling session is already running.
92 Example usage:
93 ```python
94 options = tf.profiler.experimental.ProfilerOptions(host_tracer_level = 3,
95 python_tracer_level = 1,
96 device_tracer_level = 1)
97 tf.profiler.experimental.start('logdir_path', options = options)
98 # Training code here
99 tf.profiler.experimental.stop()
100 ```
102 To view the profiling results, launch TensorBoard and point it to `logdir`.
103 Open your browser and go to `localhost:6006/#profile` to view profiling
104 results.
106 """
107 global _profiler
108 with _profiler_lock:
109 if _profiler is not None:
110 raise errors.AlreadyExistsError(None, None,
111 'Another profiler is running.')
112 _profiler = _pywrap_profiler.ProfilerSession()
113 try:
114 # support for namedtuple in pybind11 is missing, we change it to
115 # dict type first.
116 opts = dict(options._asdict()) if options is not None else {}
117 _profiler.start(logdir, opts)
118 except errors.AlreadyExistsError:
119 logging.warning('Another profiler session is running which is probably '
120 'created by profiler server. Please avoid using profiler '
121 'server and profiler APIs at the same time.')
122 raise errors.AlreadyExistsError(None, None,
123 'Another profiler is running.')
124 except Exception:
125 _profiler = None
126 raise
129@tf_export('profiler.experimental.stop', v1=[])
130def stop(save=True):
131 """Stops the current profiling session.
133 The profiler session will be stopped and profile results can be saved.
135 Args:
136 save: An optional variable to save the results to TensorBoard. Default True.
138 Raises:
139 UnavailableError: If there is no active profiling session.
140 """
141 global _profiler
142 with _profiler_lock:
143 if _profiler is None:
144 raise errors.UnavailableError(
145 None, None,
146 'Cannot export profiling results. No profiler is running.')
147 if save:
148 try:
149 _profiler.export_to_tb()
150 except Exception:
151 _profiler = None
152 raise
153 _profiler = None
156def warmup():
157 """Warm-up the profiler session.
159 The profiler session will set up profiling context, including loading CUPTI
160 library for GPU profiling. This is used for improving the accuracy of
161 the profiling results.
163 """
164 start('')
165 stop(save=False)
168@tf_export('profiler.experimental.server.start', v1=[])
169def start_server(port):
170 """Start a profiler grpc server that listens to given port.
172 The profiler server will exit when the process finishes. The service is
173 defined in tensorflow/core/profiler/profiler_service.proto.
175 Args:
176 port: port profiler server listens to.
177 Example usage: ```python tf.profiler.experimental.server.start(6009) # do
178 your training here.
179 """
180 _pywrap_profiler.start_server(port)
183@tf_export('profiler.experimental.Profile', v1=[])
184class Profile(object):
185 """Context-manager profile API.
187 Profiling will start when entering the scope, and stop and save the results to
188 the logdir when exits the scope. Open TensorBoard profile tab to view results.
190 Example usage:
191 ```python
192 with tf.profiler.experimental.Profile("/path/to/logdir"):
193 # do some work
194 ```
195 """
197 def __init__(self, logdir, options=None):
198 """Creates a context manager object for profiler API.
200 Args:
201 logdir: profile data will save to this directory.
202 options: An optional `tf.profiler.experimental.ProfilerOptions` can be
203 provided to fine tune the profiler's behavior.
204 """
205 self._logdir = logdir
206 self._options = options
208 def __enter__(self):
209 start(self._logdir, self._options)
211 def __exit__(self, typ, value, tb):
212 stop()