Line data Source code
1 : // Copyright 2018 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/microtask-queue.h"
6 :
7 : #include <stddef.h>
8 : #include <algorithm>
9 :
10 : #include "src/api.h"
11 : #include "src/base/logging.h"
12 : #include "src/handles-inl.h"
13 : #include "src/isolate.h"
14 : #include "src/objects/microtask-inl.h"
15 : #include "src/roots-inl.h"
16 : #include "src/tracing/trace-event.h"
17 : #include "src/visitors.h"
18 :
19 : namespace v8 {
20 : namespace internal {
21 :
22 : const size_t MicrotaskQueue::kRingBufferOffset =
23 : OFFSET_OF(MicrotaskQueue, ring_buffer_);
24 : const size_t MicrotaskQueue::kCapacityOffset =
25 : OFFSET_OF(MicrotaskQueue, capacity_);
26 : const size_t MicrotaskQueue::kSizeOffset = OFFSET_OF(MicrotaskQueue, size_);
27 : const size_t MicrotaskQueue::kStartOffset = OFFSET_OF(MicrotaskQueue, start_);
28 :
29 : const intptr_t MicrotaskQueue::kMinimumCapacity = 8;
30 :
31 : // static
32 62882 : void MicrotaskQueue::SetUpDefaultMicrotaskQueue(Isolate* isolate) {
33 : DCHECK_NULL(isolate->default_microtask_queue());
34 :
35 62882 : MicrotaskQueue* microtask_queue = new MicrotaskQueue;
36 62883 : microtask_queue->next_ = microtask_queue;
37 62883 : microtask_queue->prev_ = microtask_queue;
38 : isolate->set_default_microtask_queue(microtask_queue);
39 62883 : }
40 :
41 : // static
42 12 : std::unique_ptr<MicrotaskQueue> MicrotaskQueue::New(Isolate* isolate) {
43 : DCHECK_NOT_NULL(isolate->default_microtask_queue());
44 :
45 6 : std::unique_ptr<MicrotaskQueue> microtask_queue(new MicrotaskQueue);
46 :
47 : // Insert the new instance to the next of last MicrotaskQueue instance.
48 6 : MicrotaskQueue* last = isolate->default_microtask_queue()->prev_;
49 6 : microtask_queue->next_ = last->next_;
50 6 : microtask_queue->prev_ = last;
51 12 : last->next_->prev_ = microtask_queue.get();
52 6 : last->next_ = microtask_queue.get();
53 :
54 6 : return microtask_queue;
55 : }
56 :
57 : MicrotaskQueue::MicrotaskQueue() = default;
58 :
59 62873 : MicrotaskQueue::~MicrotaskQueue() {
60 62873 : if (next_ != this) {
61 : DCHECK_NE(prev_, this);
62 6 : next_->prev_ = prev_;
63 6 : prev_->next_ = next_;
64 : }
65 62873 : delete[] ring_buffer_;
66 62874 : }
67 :
68 : // static
69 2683 : Address MicrotaskQueue::CallEnqueueMicrotask(Isolate* isolate,
70 : intptr_t microtask_queue_pointer,
71 : Address raw_microtask) {
72 2683 : Microtask microtask = Microtask::cast(Object(raw_microtask));
73 : reinterpret_cast<MicrotaskQueue*>(microtask_queue_pointer)
74 2683 : ->EnqueueMicrotask(microtask);
75 2683 : return ReadOnlyRoots(isolate).undefined_value().ptr();
76 : }
77 :
78 14275 : void MicrotaskQueue::EnqueueMicrotask(Microtask microtask) {
79 14275 : if (size_ == capacity_) {
80 : // Keep the capacity of |ring_buffer_| power of 2, so that the JIT
81 : // implementation can calculate the modulo easily.
82 6254 : intptr_t new_capacity = std::max(kMinimumCapacity, capacity_ << 1);
83 3127 : ResizeBuffer(new_capacity);
84 : }
85 :
86 : DCHECK_LT(size_, capacity_);
87 28550 : ring_buffer_[(start_ + size_) % capacity_] = microtask.ptr();
88 14275 : ++size_;
89 14275 : }
90 :
91 : namespace {
92 :
93 : class SetIsRunningMicrotasks {
94 : public:
95 : explicit SetIsRunningMicrotasks(bool* flag) : flag_(flag) {
96 : DCHECK(!*flag_);
97 52430 : *flag_ = true;
98 : }
99 :
100 : ~SetIsRunningMicrotasks() {
101 : DCHECK(*flag_);
102 52430 : *flag_ = false;
103 : }
104 :
105 : private:
106 : bool* flag_;
107 : };
108 :
109 : } // namespace
110 :
111 222424 : int MicrotaskQueue::RunMicrotasks(Isolate* isolate) {
112 169994 : if (!size()) {
113 : OnCompleted(isolate);
114 117564 : return 0;
115 : }
116 :
117 : HandleScope handle_scope(isolate);
118 52430 : MaybeHandle<Object> maybe_exception;
119 :
120 : MaybeHandle<Object> maybe_result;
121 :
122 : {
123 52430 : SetIsRunningMicrotasks scope(&is_running_microtasks_);
124 : v8::Isolate::SuppressMicrotaskExecutionScope suppress(
125 104860 : reinterpret_cast<v8::Isolate*>(isolate));
126 : HandleScopeImplementer::EnteredContextRewindScope rewind_scope(
127 : isolate->handle_scope_implementer());
128 157290 : TRACE_EVENT0("v8.execute", "RunMicrotasks");
129 157290 : TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.RunMicrotasks");
130 52430 : maybe_result = Execution::TryRunMicrotasks(isolate, this, &maybe_exception);
131 : }
132 :
133 : // If execution is terminating, clean up and propagate that to TryCatch scope.
134 52450 : if (maybe_result.is_null() && maybe_exception.is_null()) {
135 20 : delete[] ring_buffer_;
136 20 : ring_buffer_ = nullptr;
137 20 : capacity_ = 0;
138 20 : size_ = 0;
139 20 : start_ = 0;
140 20 : isolate->SetTerminationOnExternalTryCatch();
141 : OnCompleted(isolate);
142 20 : return -1;
143 : }
144 : DCHECK_EQ(0, size());
145 : OnCompleted(isolate);
146 :
147 : // TODO(tzik): Return the number of microtasks run in this round.
148 52410 : return 0;
149 : }
150 :
151 307251 : void MicrotaskQueue::IterateMicrotasks(RootVisitor* visitor) {
152 307251 : if (size_) {
153 : // Iterate pending Microtasks as root objects to avoid the write barrier for
154 : // all single Microtask. If this hurts the GC performance, use a FixedArray.
155 : visitor->VisitRootPointers(
156 : Root::kStrongRoots, nullptr, FullObjectSlot(ring_buffer_ + start_),
157 2492 : FullObjectSlot(ring_buffer_ + std::min(start_ + size_, capacity_)));
158 : visitor->VisitRootPointers(
159 : Root::kStrongRoots, nullptr, FullObjectSlot(ring_buffer_),
160 623 : FullObjectSlot(ring_buffer_ + std::max(start_ + size_ - capacity_,
161 2492 : static_cast<intptr_t>(0))));
162 : }
163 :
164 307251 : if (capacity_ <= kMinimumCapacity) {
165 307134 : return;
166 : }
167 :
168 117 : intptr_t new_capacity = capacity_;
169 583 : while (new_capacity > 2 * size_) {
170 349 : new_capacity >>= 1;
171 : }
172 117 : new_capacity = std::max(new_capacity, kMinimumCapacity);
173 117 : if (new_capacity < capacity_) {
174 46 : ResizeBuffer(new_capacity);
175 : }
176 : }
177 :
178 45 : void MicrotaskQueue::AddMicrotasksCompletedCallback(
179 : MicrotasksCompletedCallback callback) {
180 : auto pos = std::find(microtasks_completed_callbacks_.begin(),
181 : microtasks_completed_callbacks_.end(), callback);
182 90 : if (pos != microtasks_completed_callbacks_.end()) return;
183 45 : microtasks_completed_callbacks_.push_back(callback);
184 : }
185 :
186 3684 : void MicrotaskQueue::RemoveMicrotasksCompletedCallback(
187 : MicrotasksCompletedCallback callback) {
188 : auto pos = std::find(microtasks_completed_callbacks_.begin(),
189 : microtasks_completed_callbacks_.end(), callback);
190 7368 : if (pos == microtasks_completed_callbacks_.end()) return;
191 45 : microtasks_completed_callbacks_.erase(pos);
192 : }
193 :
194 169992 : void MicrotaskQueue::FireMicrotasksCompletedCallback(Isolate* isolate) const {
195 : std::vector<MicrotasksCompletedCallback> callbacks(
196 169992 : microtasks_completed_callbacks_);
197 340034 : for (auto& callback : callbacks) {
198 50 : callback(reinterpret_cast<v8::Isolate*>(isolate));
199 : }
200 169992 : }
201 :
202 0 : void MicrotaskQueue::OnCompleted(Isolate* isolate) {
203 : // TODO(marja): (spec) The discussion about when to clear the KeepDuringJob
204 : // set is still open (whether to clear it after every microtask or once
205 : // during a microtask checkpoint). See also
206 : // https://github.com/tc39/proposal-weakrefs/issues/39 .
207 169994 : isolate->heap()->ClearKeepDuringJobSet();
208 :
209 169994 : FireMicrotasksCompletedCallback(isolate);
210 0 : }
211 :
212 3173 : void MicrotaskQueue::ResizeBuffer(intptr_t new_capacity) {
213 : DCHECK_LE(size_, new_capacity);
214 3173 : Address* new_ring_buffer = new Address[new_capacity];
215 583794 : for (intptr_t i = 0; i < size_; ++i) {
216 580621 : new_ring_buffer[i] = ring_buffer_[(start_ + i) % capacity_];
217 : }
218 :
219 3173 : delete[] ring_buffer_;
220 3173 : ring_buffer_ = new_ring_buffer;
221 3173 : capacity_ = new_capacity;
222 3173 : start_ = 0;
223 3173 : }
224 :
225 : } // namespace internal
226 183867 : } // namespace v8
|