Coverage Report

Created: 2025-08-03 10:06

/src/node/src/tracing/node_trace_writer.cc
Line
Count
Source (jump to first uncovered line)
1
#include "tracing/node_trace_writer.h"
2
3
#include "util-inl.h"
4
5
#include <fcntl.h>
6
#include <cstring>
7
8
namespace node {
9
namespace tracing {
10
11
NodeTraceWriter::NodeTraceWriter(const std::string& log_file_pattern)
12
0
    : log_file_pattern_(log_file_pattern) {}
13
14
0
void NodeTraceWriter::InitializeOnThread(uv_loop_t* loop) {
15
0
  CHECK_NULL(tracing_loop_);
16
0
  tracing_loop_ = loop;
17
18
0
  flush_signal_.data = this;
19
0
  int err = uv_async_init(tracing_loop_, &flush_signal_,
20
0
                          [](uv_async_t* signal) {
21
0
    NodeTraceWriter* trace_writer =
22
0
        ContainerOf(&NodeTraceWriter::flush_signal_, signal);
23
0
    trace_writer->FlushPrivate();
24
0
  });
25
0
  CHECK_EQ(err, 0);
26
27
0
  exit_signal_.data = this;
28
0
  err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb);
29
0
  CHECK_EQ(err, 0);
30
0
}
31
32
0
void NodeTraceWriter::WriteSuffix() {
33
  // If our final log file has traces, then end the file appropriately.
34
  // This means that if no trace events are recorded, then no trace file is
35
  // produced.
36
0
  bool should_flush = false;
37
0
  {
38
0
    Mutex::ScopedLock scoped_lock(stream_mutex_);
39
0
    if (total_traces_ > 0) {
40
0
      total_traces_ = kTracesPerFile;  // Act as if we reached the file limit.
41
0
      should_flush = true;
42
0
    }
43
0
  }
44
0
  if (should_flush) {
45
0
    Flush(true);
46
0
  }
47
0
}
48
49
0
NodeTraceWriter::~NodeTraceWriter() {
50
0
  WriteSuffix();
51
0
  uv_fs_t req;
52
0
  if (fd_ != -1) {
53
0
    CHECK_EQ(0, uv_fs_close(nullptr, &req, fd_, nullptr));
54
0
    uv_fs_req_cleanup(&req);
55
0
  }
56
0
  uv_async_send(&exit_signal_);
57
0
  Mutex::ScopedLock scoped_lock(request_mutex_);
58
0
  while (!exited_) {
59
0
    exit_cond_.Wait(scoped_lock);
60
0
  }
61
0
}
62
63
void replace_substring(std::string* target,
64
                       const std::string& search,
65
0
                       const std::string& insert) {
66
0
  size_t pos = target->find(search);
67
0
  for (; pos != std::string::npos; pos = target->find(search, pos)) {
68
0
    target->replace(pos, search.size(), insert);
69
0
    pos += insert.size();
70
0
  }
71
0
}
72
73
0
void NodeTraceWriter::OpenNewFileForStreaming() {
74
0
  ++file_num_;
75
0
  uv_fs_t req;
76
77
  // Evaluate a JS-style template string, it accepts the values ${pid} and
78
  // ${rotation}
79
0
  std::string filepath(log_file_pattern_);
80
0
  replace_substring(&filepath, "${pid}", std::to_string(uv_os_getpid()));
81
0
  replace_substring(&filepath, "${rotation}", std::to_string(file_num_));
82
83
0
  if (fd_ != -1) {
84
0
    CHECK_EQ(uv_fs_close(nullptr, &req, fd_, nullptr), 0);
85
0
    uv_fs_req_cleanup(&req);
86
0
  }
87
88
0
  fd_ = uv_fs_open(nullptr, &req, filepath.c_str(),
89
0
      O_CREAT | O_WRONLY | O_TRUNC, 0644, nullptr);
90
0
  uv_fs_req_cleanup(&req);
91
0
  if (fd_ < 0) {
92
0
    fprintf(stderr, "Could not open trace file %s: %s\n",
93
0
                    filepath.c_str(),
94
0
                    uv_strerror(fd_));
95
0
    fd_ = -1;
96
0
  }
97
0
}
98
99
0
void NodeTraceWriter::AppendTraceEvent(TraceObject* trace_event) {
100
0
  Mutex::ScopedLock scoped_lock(stream_mutex_);
101
  // If this is the first trace event, open a new file for streaming.
102
0
  if (total_traces_ == 0) {
103
0
    OpenNewFileForStreaming();
104
    // Constructing a new JSONTraceWriter object appends "{\"traceEvents\":["
105
    // to stream_.
106
    // In other words, the constructor initializes the serialization stream
107
    // to a state where we can start writing trace events to it.
108
    // Repeatedly constructing and destroying json_trace_writer_ allows
109
    // us to use V8's JSON writer instead of implementing our own.
110
0
    json_trace_writer_.reset(TraceWriter::CreateJSONTraceWriter(stream_));
111
0
  }
112
0
  ++total_traces_;
113
0
  json_trace_writer_->AppendTraceEvent(trace_event);
114
0
}
115
116
0
void NodeTraceWriter::FlushPrivate() {
117
0
  std::string str;
118
0
  int highest_request_id;
119
0
  {
120
0
    Mutex::ScopedLock stream_scoped_lock(stream_mutex_);
121
0
    if (total_traces_ >= kTracesPerFile) {
122
0
      total_traces_ = 0;
123
      // Destroying the member JSONTraceWriter object appends "]}" to
124
      // stream_ - in other words, ending a JSON file.
125
0
      json_trace_writer_.reset();
126
0
    }
127
    // str() makes a copy of the contents of the stream.
128
0
    str = stream_.str();
129
0
    stream_.str("");
130
0
    stream_.clear();
131
0
  }
132
0
  {
133
0
    Mutex::ScopedLock request_scoped_lock(request_mutex_);
134
0
    highest_request_id = num_write_requests_;
135
0
  }
136
0
  WriteToFile(std::move(str), highest_request_id);
137
0
}
138
139
0
void NodeTraceWriter::Flush(bool blocking) {
140
0
  Mutex::ScopedLock scoped_lock(request_mutex_);
141
0
  {
142
    // We need to lock the mutexes here in a nested fashion; stream_mutex_
143
    // protects json_trace_writer_, and without request_mutex_ there might be
144
    // a time window in which the stream state changes?
145
0
    Mutex::ScopedLock stream_mutex_lock(stream_mutex_);
146
0
    if (!json_trace_writer_)
147
0
      return;
148
0
  }
149
0
  int request_id = ++num_write_requests_;
150
0
  int err = uv_async_send(&flush_signal_);
151
0
  CHECK_EQ(err, 0);
152
0
  if (blocking) {
153
    // Wait until data associated with this request id has been written to disk.
154
    // This guarantees that data from all earlier requests have also been
155
    // written.
156
0
    while (request_id > highest_request_id_completed_) {
157
0
      request_cond_.Wait(scoped_lock);
158
0
    }
159
0
  }
160
0
}
161
162
0
void NodeTraceWriter::WriteToFile(std::string&& str, int highest_request_id) {
163
0
  if (fd_ == -1) return;
164
165
0
  uv_buf_t buf = uv_buf_init(nullptr, 0);
166
0
  {
167
0
    Mutex::ScopedLock lock(request_mutex_);
168
0
    write_req_queue_.emplace(WriteRequest {
169
0
      std::move(str), highest_request_id
170
0
    });
171
0
    if (write_req_queue_.size() == 1) {
172
0
      buf = uv_buf_init(
173
0
          const_cast<char*>(write_req_queue_.front().str.c_str()),
174
0
          write_req_queue_.front().str.length());
175
0
    }
176
0
  }
177
  // Only one write request for the same file descriptor should be active at
178
  // a time.
179
0
  if (buf.base != nullptr && fd_ != -1) {
180
0
    StartWrite(buf);
181
0
  }
182
0
}
183
184
0
void NodeTraceWriter::StartWrite(uv_buf_t buf) {
185
0
  int err = uv_fs_write(
186
0
      tracing_loop_, &write_req_, fd_, &buf, 1, -1,
187
0
      [](uv_fs_t* req) {
188
0
        NodeTraceWriter* writer =
189
0
            ContainerOf(&NodeTraceWriter::write_req_, req);
190
0
        writer->AfterWrite();
191
0
      });
192
0
  CHECK_EQ(err, 0);
193
0
}
194
195
0
void NodeTraceWriter::AfterWrite() {
196
0
  CHECK_GE(write_req_.result, 0);
197
0
  uv_fs_req_cleanup(&write_req_);
198
199
0
  uv_buf_t buf = uv_buf_init(nullptr, 0);
200
0
  {
201
0
    Mutex::ScopedLock scoped_lock(request_mutex_);
202
0
    int highest_request_id = write_req_queue_.front().highest_request_id;
203
0
    write_req_queue_.pop();
204
0
    highest_request_id_completed_ = highest_request_id;
205
0
    request_cond_.Broadcast(scoped_lock);
206
0
    if (!write_req_queue_.empty()) {
207
0
      buf = uv_buf_init(
208
0
          const_cast<char*>(write_req_queue_.front().str.c_str()),
209
0
          write_req_queue_.front().str.length());
210
0
    }
211
0
  }
212
0
  if (buf.base != nullptr && fd_ != -1) {
213
0
    StartWrite(buf);
214
0
  }
215
0
}
216
217
// static
218
0
void NodeTraceWriter::ExitSignalCb(uv_async_t* signal) {
219
0
  NodeTraceWriter* trace_writer =
220
0
      ContainerOf(&NodeTraceWriter::exit_signal_, signal);
221
  // Close both flush_signal_ and exit_signal_.
222
0
  uv_close(reinterpret_cast<uv_handle_t*>(&trace_writer->flush_signal_),
223
0
           [](uv_handle_t* signal) {
224
0
             NodeTraceWriter* trace_writer =
225
0
                 ContainerOf(&NodeTraceWriter::flush_signal_,
226
0
                             reinterpret_cast<uv_async_t*>(signal));
227
0
             uv_close(
228
0
                 reinterpret_cast<uv_handle_t*>(&trace_writer->exit_signal_),
229
0
                 [](uv_handle_t* signal) {
230
0
                   NodeTraceWriter* trace_writer =
231
0
                       ContainerOf(&NodeTraceWriter::exit_signal_,
232
0
                                   reinterpret_cast<uv_async_t*>(signal));
233
0
                   Mutex::ScopedLock scoped_lock(trace_writer->request_mutex_);
234
0
                   trace_writer->exited_ = true;
235
0
                   trace_writer->exit_cond_.Signal(scoped_lock);
236
0
                 });
237
0
           });
238
0
}
239
}  // namespace tracing
240
}  // namespace node