/src/node/src/node_locks.cc
Line | Count | Source |
1 | | #include "node_locks.h" |
2 | | |
3 | | #include "base_object-inl.h" |
4 | | #include "env-inl.h" |
5 | | #include "node_errors.h" |
6 | | #include "node_external_reference.h" |
7 | | #include "node_internals.h" |
8 | | #include "util-inl.h" |
9 | | #include "v8.h" |
10 | | |
11 | | namespace node::worker::locks { |
12 | | |
13 | | using node::errors::TryCatchScope; |
14 | | using v8::Array; |
15 | | using v8::Context; |
16 | | using v8::DictionaryTemplate; |
17 | | using v8::Exception; |
18 | | using v8::Function; |
19 | | using v8::FunctionCallbackInfo; |
20 | | using v8::FunctionTemplate; |
21 | | using v8::HandleScope; |
22 | | using v8::Isolate; |
23 | | using v8::Local; |
24 | | using v8::LocalVector; |
25 | | using v8::MaybeLocal; |
26 | | using v8::Object; |
27 | | using v8::ObjectTemplate; |
28 | | using v8::Promise; |
29 | | using v8::PropertyAttribute; |
30 | | using v8::Value; |
31 | | |
32 | | // Reject two promises and return `false` on failure. |
33 | | static bool RejectBoth(Local<Context> ctx, |
34 | | Local<Promise::Resolver> first, |
35 | | Local<Promise::Resolver> second, |
36 | 0 | Local<Value> reason) { |
37 | 0 | return first->Reject(ctx, reason).IsJust() && |
38 | 0 | second->Reject(ctx, reason).IsJust(); |
39 | 0 | } |
40 | | |
41 | | Lock::Lock(Environment* env, |
42 | | const std::u16string& name, |
43 | | Mode mode, |
44 | | const std::string& client_id, |
45 | | Local<Promise::Resolver> waiting, |
46 | | Local<Promise::Resolver> released) |
47 | 0 | : env_(env), name_(name), mode_(mode), client_id_(client_id) { |
48 | 0 | waiting_promise_.Reset(env_->isolate(), waiting); |
49 | 0 | released_promise_.Reset(env_->isolate(), released); |
50 | 0 | } |
51 | | |
52 | 0 | void Lock::MemoryInfo(node::MemoryTracker* tracker) const { |
53 | 0 | tracker->TrackFieldWithSize("name", name_.size()); |
54 | 0 | tracker->TrackField("client_id", client_id_); |
55 | 0 | tracker->TrackField("waiting_promise", waiting_promise_); |
56 | 0 | tracker->TrackField("released_promise", released_promise_); |
57 | 0 | } |
58 | | |
59 | | LockRequest::LockRequest(Environment* env, |
60 | | Local<Promise::Resolver> waiting, |
61 | | Local<Promise::Resolver> released, |
62 | | Local<Function> callback, |
63 | | const std::u16string& name, |
64 | | Lock::Mode mode, |
65 | | std::string client_id, |
66 | | bool steal, |
67 | | bool if_available) |
68 | 0 | : env_(env), |
69 | 0 | name_(name), |
70 | 0 | mode_(mode), |
71 | 0 | client_id_(std::move(client_id)), |
72 | 0 | steal_(steal), |
73 | 0 | if_available_(if_available) { |
74 | 0 | waiting_promise_.Reset(env_->isolate(), waiting); |
75 | 0 | released_promise_.Reset(env_->isolate(), released); |
76 | 0 | callback_.Reset(env_->isolate(), callback); |
77 | 0 | } |
78 | | |
79 | 0 | Local<DictionaryTemplate> GetLockInfoTemplate(Environment* env) { |
80 | 0 | auto tmpl = env->lock_info_template(); |
81 | 0 | if (tmpl.IsEmpty()) { |
82 | 0 | static constexpr std::string_view names[] = { |
83 | 0 | "name", |
84 | 0 | "mode", |
85 | 0 | "clientId", |
86 | 0 | }; |
87 | 0 | tmpl = DictionaryTemplate::New(env->isolate(), names); |
88 | 0 | env->set_lock_info_template(tmpl); |
89 | 0 | } |
90 | 0 | return tmpl; |
91 | 0 | } |
92 | | |
93 | | // The request here can be either a Lock or a LockRequest. |
94 | | static MaybeLocal<Object> CreateLockInfoObject(Environment* env, |
95 | 0 | const auto& request) { |
96 | 0 | auto tmpl = GetLockInfoTemplate(env); |
97 | 0 | MaybeLocal<Value> values[] = { |
98 | 0 | ToV8Value(env->context(), request.name()), |
99 | 0 | request.mode() == Lock::Mode::Exclusive ? env->exclusive_string() |
100 | 0 | : env->shared_string(), |
101 | 0 | ToV8Value(env->context(), request.client_id()), |
102 | 0 | }; |
103 | |
|
104 | 0 | return NewDictionaryInstance(env->context(), tmpl, values); |
105 | 0 | } Unexecuted instantiation: node_locks.cc:v8::MaybeLocal<v8::Object> node::worker::locks::CreateLockInfoObject<node::worker::locks::LockRequest>(node::Environment*, node::worker::locks::LockRequest const&) Unexecuted instantiation: node_locks.cc:v8::MaybeLocal<v8::Object> node::worker::locks::CreateLockInfoObject<node::worker::locks::Lock>(node::Environment*, node::worker::locks::Lock const&) |
106 | | |
107 | 0 | bool LockManager::IsGrantable(const LockRequest* request) const { |
108 | | // Steal requests bypass all normal granting rules |
109 | 0 | if (request->steal()) return true; |
110 | | |
111 | 0 | auto held_locks_iter = held_locks_.find(request->name()); |
112 | | // No existing locks for this resource name |
113 | 0 | if (held_locks_iter == held_locks_.end()) return true; |
114 | | |
115 | | // Exclusive requests cannot coexist with any existing locks |
116 | 0 | if (request->mode() == Lock::Mode::Exclusive) return false; |
117 | | |
118 | | // For shared requests, check if any existing lock is exclusive |
119 | 0 | for (const auto& existing_lock : held_locks_iter->second) { |
120 | 0 | if (existing_lock->mode() == Lock::Mode::Exclusive) return false; |
121 | 0 | } |
122 | | // All existing locks are shared, so this shared request can be granted |
123 | 0 | return true; |
124 | 0 | } |
125 | | |
126 | | // Called when the user callback promise fulfills |
127 | 0 | static void OnLockCallbackFulfilled(const FunctionCallbackInfo<Value>& info) { |
128 | 0 | HandleScope handle_scope(info.GetIsolate()); |
129 | 0 | Environment* env = Environment::GetCurrent(info); |
130 | |
|
131 | 0 | BaseObjectPtr<LockHolder> lock_holder{ |
132 | 0 | BaseObject::FromJSObject<LockHolder>(info.Data())}; |
133 | 0 | std::shared_ptr<Lock> lock = lock_holder->lock(); |
134 | | |
135 | | // Release the lock and continue processing the queue. |
136 | 0 | LockManager::GetCurrent()->ReleaseLockAndProcessQueue( |
137 | 0 | env, lock, info[0], false); |
138 | 0 | } |
139 | | |
140 | | // Called when the user callback promise rejects |
141 | 0 | static void OnLockCallbackRejected(const FunctionCallbackInfo<Value>& info) { |
142 | 0 | HandleScope handle_scope(info.GetIsolate()); |
143 | 0 | Environment* env = Environment::GetCurrent(info); |
144 | |
|
145 | 0 | BaseObjectPtr<LockHolder> lock_holder{ |
146 | 0 | BaseObject::FromJSObject<LockHolder>(info.Data())}; |
147 | 0 | std::shared_ptr<Lock> lock = lock_holder->lock(); |
148 | |
|
149 | 0 | LockManager::GetCurrent()->ReleaseLockAndProcessQueue( |
150 | 0 | env, lock, info[0], true); |
151 | 0 | } |
152 | | |
153 | | // Called when the promise returned from the user's callback resolves |
154 | 0 | static void OnIfAvailableFulfill(const FunctionCallbackInfo<Value>& info) { |
155 | 0 | HandleScope handle_scope(info.GetIsolate()); |
156 | 0 | USE(info.Data().As<Promise::Resolver>()->Resolve( |
157 | 0 | info.GetIsolate()->GetCurrentContext(), info[0])); |
158 | 0 | } |
159 | | |
160 | | // Called when the promise returned from the user's callback rejects |
161 | 0 | static void OnIfAvailableReject(const FunctionCallbackInfo<Value>& info) { |
162 | 0 | USE(info.Data().As<Promise::Resolver>()->Reject( |
163 | 0 | info.GetIsolate()->GetCurrentContext(), info[0])); |
164 | 0 | } |
165 | | |
166 | 0 | void LockManager::CleanupStolenLocks(Environment* env) { |
167 | 0 | std::vector<std::u16string> resources_to_clean; |
168 | | |
169 | | // Iterate held locks and remove entries that were stolen from other envs. |
170 | 0 | { |
171 | 0 | Mutex::ScopedLock scoped_lock(mutex_); |
172 | |
|
173 | 0 | for (auto resource_iter = held_locks_.begin(); |
174 | 0 | resource_iter != held_locks_.end(); |
175 | 0 | ++resource_iter) { |
176 | 0 | auto& resource_locks = resource_iter->second; |
177 | 0 | bool has_stolen_from_other_env = false; |
178 | | |
179 | | // Check if this resource has stolen locks from other environments |
180 | 0 | for (const auto& lock_ptr : resource_locks) { |
181 | 0 | if (lock_ptr->is_stolen() && lock_ptr->env() != env) { |
182 | 0 | has_stolen_from_other_env = true; |
183 | 0 | break; |
184 | 0 | } |
185 | 0 | } |
186 | |
|
187 | 0 | if (has_stolen_from_other_env) { |
188 | 0 | resources_to_clean.push_back(resource_iter->first); |
189 | 0 | } |
190 | 0 | } |
191 | 0 | } |
192 | | |
193 | | // Clean up resources |
194 | 0 | for (const auto& resource_name : resources_to_clean) { |
195 | 0 | Mutex::ScopedLock scoped_lock(mutex_); |
196 | |
|
197 | 0 | auto resource_iter = held_locks_.find(resource_name); |
198 | 0 | if (resource_iter != held_locks_.end()) { |
199 | 0 | auto& resource_locks = resource_iter->second; |
200 | | |
201 | | // Remove stolen locks from other environments |
202 | 0 | for (auto lock_iter = resource_locks.begin(); |
203 | 0 | lock_iter != resource_locks.end();) { |
204 | 0 | if ((*lock_iter)->is_stolen() && (*lock_iter)->env() != env) { |
205 | 0 | lock_iter = resource_locks.erase(lock_iter); |
206 | 0 | } else { |
207 | 0 | ++lock_iter; |
208 | 0 | } |
209 | 0 | } |
210 | |
|
211 | 0 | if (resource_locks.empty()) { |
212 | 0 | held_locks_.erase(resource_iter); |
213 | 0 | } |
214 | 0 | } |
215 | 0 | } |
216 | 0 | } |
217 | | |
218 | | /** |
219 | | * Web Locks algorithm implementation |
220 | | * https://w3c.github.io/web-locks/#algorithms |
221 | | */ |
222 | 0 | void LockManager::ProcessQueue(Environment* env) { |
223 | 0 | Isolate* isolate = env->isolate(); |
224 | 0 | HandleScope handle_scope(isolate); |
225 | 0 | Local<Context> context = env->context(); |
226 | | |
227 | | // Remove locks that were stolen from this Environment first |
228 | 0 | CleanupStolenLocks(env); |
229 | |
|
230 | 0 | while (true) { |
231 | 0 | std::unique_ptr<LockRequest> grantable_request; |
232 | 0 | std::unique_ptr<LockRequest> if_available_request; |
233 | 0 | std::unordered_set<Environment*> other_envs_to_wake; |
234 | | |
235 | | /** |
236 | | * First pass over pending_queue_ |
237 | | * 1- Build first_seen_for_resource: the oldest pending request |
238 | | * for every resource name we encounter |
239 | | * 2- Decide what to do with each entry: |
240 | | * – If it belongs to another Environment, remember that env so we |
241 | | * can wake it later |
242 | | * – For our Environment, pick one of: |
243 | | * * grantable_request – can be granted now |
244 | | * * if_available_request – user asked for ifAvailable and the |
245 | | * resource is currently busy |
246 | | * * otherwise we skip and keep scanning |
247 | | */ |
248 | |
|
249 | 0 | { |
250 | 0 | std::unordered_map<std::u16string, LockRequest*> first_seen_for_resource; |
251 | |
|
252 | 0 | Mutex::ScopedLock scoped_lock(mutex_); |
253 | 0 | for (auto queue_iter = pending_queue_.begin(); |
254 | 0 | queue_iter != pending_queue_.end(); |
255 | 0 | ++queue_iter) { |
256 | 0 | LockRequest* request = queue_iter->get(); |
257 | | |
258 | | // Collect unique environments to wake up later |
259 | 0 | if (request->env() != env) { |
260 | 0 | other_envs_to_wake.insert(request->env()); |
261 | 0 | } |
262 | | |
263 | | // During a single pass, the first time we see a resource name is the |
264 | | // earliest pending request |
265 | 0 | auto& first_for_resource = first_seen_for_resource[request->name()]; |
266 | 0 | if (first_for_resource == nullptr) { |
267 | 0 | first_for_resource = request; // Mark as first seen for this resource |
268 | 0 | } |
269 | |
|
270 | 0 | bool has_earlier_request_for_same_resource = |
271 | 0 | (first_for_resource != request); |
272 | |
|
273 | 0 | bool should_wait_for_earlier_requests = false; |
274 | |
|
275 | 0 | if (has_earlier_request_for_same_resource) { |
276 | | // Check if this request is compatible with the earliest pending |
277 | | // request first_for_resource |
278 | 0 | if (request->mode() == Lock::Mode::Exclusive || |
279 | 0 | first_for_resource->mode() == Lock::Mode::Exclusive) { |
280 | | // Exclusive locks are incompatible with everything |
281 | 0 | should_wait_for_earlier_requests = true; |
282 | 0 | } |
283 | | // If both are shared, they're compatible and can proceed |
284 | 0 | } |
285 | | |
286 | | // Only process requests from the current environment |
287 | 0 | if (request->env() != env) { |
288 | 0 | continue; |
289 | 0 | } |
290 | | |
291 | 0 | if (should_wait_for_earlier_requests || !IsGrantable(request)) { |
292 | 0 | if (request->if_available()) { |
293 | | // ifAvailable request when resource not available: grant with null |
294 | 0 | if_available_request = std::move(*queue_iter); |
295 | 0 | pending_queue_.erase(queue_iter); |
296 | 0 | break; |
297 | 0 | } |
298 | 0 | continue; |
299 | 0 | } |
300 | | |
301 | | // Found a request that can be granted normally |
302 | 0 | grantable_request = std::move(*queue_iter); |
303 | 0 | pending_queue_.erase(queue_iter); |
304 | 0 | break; |
305 | 0 | } |
306 | 0 | } |
307 | | |
308 | | // Wake each environment only once |
309 | 0 | for (Environment* target_env : other_envs_to_wake) { |
310 | 0 | WakeEnvironment(target_env); |
311 | 0 | } |
312 | | |
313 | | /** |
314 | | * 1- We call the user callback immediately with `null` to signal |
315 | | * that the lock was not granted - Check wrapCallback function in |
316 | | * locks.js 2- Depending on what the callback returns we settle the two |
317 | | * internal promises |
318 | | * 3- No lock is added to held_locks_ in this path, so nothing to |
319 | | * remove later |
320 | | */ |
321 | 0 | if (if_available_request) { |
322 | 0 | Local<Value> null_arg = Null(isolate); |
323 | 0 | Local<Value> callback_result; |
324 | 0 | { |
325 | 0 | TryCatchScope try_catch_scope(env); |
326 | 0 | if (!if_available_request->callback() |
327 | 0 | ->Call(context, Undefined(isolate), 1, &null_arg) |
328 | 0 | .ToLocal(&callback_result)) { |
329 | | // We don't really need to check the return value here since |
330 | | // we're returning early in either case. |
331 | 0 | USE(RejectBoth(context, |
332 | 0 | if_available_request->waiting_promise(), |
333 | 0 | if_available_request->released_promise(), |
334 | 0 | try_catch_scope.Exception())); |
335 | 0 | return; |
336 | 0 | } |
337 | 0 | } |
338 | 0 | if (callback_result->IsPromise()) { |
339 | 0 | Local<Promise> p = callback_result.As<Promise>(); |
340 | |
|
341 | 0 | Local<Function> on_fulfilled; |
342 | 0 | Local<Function> on_rejected; |
343 | 0 | CHECK(Function::New(context, |
344 | 0 | OnIfAvailableFulfill, |
345 | 0 | if_available_request->released_promise()) |
346 | 0 | .ToLocal(&on_fulfilled)); |
347 | 0 | CHECK(Function::New(context, |
348 | 0 | OnIfAvailableReject, |
349 | 0 | if_available_request->released_promise()) |
350 | 0 | .ToLocal(&on_rejected)); |
351 | | |
352 | 0 | { |
353 | 0 | TryCatchScope try_catch_scope(env); |
354 | 0 | if (p->Then(context, on_fulfilled, on_rejected).IsEmpty()) { |
355 | 0 | if (!try_catch_scope.CanContinue()) return; |
356 | | |
357 | 0 | Local<Value> err_val; |
358 | 0 | if (try_catch_scope.HasCaught() && |
359 | 0 | !try_catch_scope.Exception().IsEmpty()) { |
360 | 0 | err_val = try_catch_scope.Exception(); |
361 | 0 | } else { |
362 | 0 | err_val = Exception::Error(FIXED_ONE_BYTE_STRING( |
363 | 0 | isolate, "Failed to attach promise handlers")); |
364 | 0 | } |
365 | |
|
366 | 0 | USE(RejectBoth(context, |
367 | 0 | if_available_request->waiting_promise(), |
368 | 0 | if_available_request->released_promise(), |
369 | 0 | err_val)); |
370 | 0 | return; |
371 | 0 | } |
372 | 0 | } |
373 | | |
374 | | // After handlers are attached, resolve waiting_promise with the |
375 | | // promise. |
376 | 0 | USE(if_available_request->waiting_promise() |
377 | 0 | ->Resolve(context, p) |
378 | 0 | .IsNothing()); |
379 | 0 | return; |
380 | 0 | } |
381 | | |
382 | | // Non-promise callback result: settle both promises right away. |
383 | 0 | if (if_available_request->waiting_promise() |
384 | 0 | ->Resolve(context, callback_result) |
385 | 0 | .IsNothing()) { |
386 | 0 | return; |
387 | 0 | } |
388 | 0 | USE(if_available_request->released_promise() |
389 | 0 | ->Resolve(context, callback_result) |
390 | 0 | .IsNothing()); |
391 | 0 | return; |
392 | 0 | } |
393 | | |
394 | 0 | if (!grantable_request) return; |
395 | | |
396 | | /** |
397 | | * 1- We grant the lock immediately even if other envs hold it |
398 | | * 2- All existing locks with the same name are marked stolen, their |
399 | | * released_promise is rejected, and their owners are woken so they |
400 | | * can observe the rejection |
401 | | * 3- We remove stolen locks that belong to this env right away; other |
402 | | * envs will clean up in their next queue pass |
403 | | */ |
404 | 0 | if (grantable_request->steal()) { |
405 | 0 | std::unordered_set<Environment*> envs_to_notify; |
406 | |
|
407 | 0 | { |
408 | 0 | Mutex::ScopedLock scoped_lock(mutex_); |
409 | 0 | auto held_locks_iter = held_locks_.find(grantable_request->name()); |
410 | 0 | if (held_locks_iter != held_locks_.end()) { |
411 | | // Mark existing locks as stolen and collect environments to notify |
412 | 0 | for (auto& existing_lock : held_locks_iter->second) { |
413 | 0 | existing_lock->mark_stolen(); |
414 | 0 | envs_to_notify.insert(existing_lock->env()); |
415 | |
|
416 | 0 | Local<Value> error = |
417 | 0 | Exception::Error(FIXED_ONE_BYTE_STRING(isolate, "LOCK_STOLEN")); |
418 | |
|
419 | 0 | if (existing_lock->released_promise() |
420 | 0 | ->Reject(context, error) |
421 | 0 | .IsNothing()) |
422 | 0 | return; |
423 | 0 | } |
424 | | |
425 | | // Remove stolen locks from current environment immediately |
426 | 0 | for (auto lock_iter = held_locks_iter->second.begin(); |
427 | 0 | lock_iter != held_locks_iter->second.end();) { |
428 | 0 | if ((*lock_iter)->env() == env) { |
429 | 0 | lock_iter = held_locks_iter->second.erase(lock_iter); |
430 | 0 | } else { |
431 | 0 | ++lock_iter; |
432 | 0 | } |
433 | 0 | } |
434 | |
|
435 | 0 | if (held_locks_iter->second.empty()) { |
436 | 0 | held_locks_.erase(held_locks_iter); |
437 | 0 | } |
438 | 0 | } |
439 | 0 | } |
440 | | |
441 | | // Wake other environments |
442 | 0 | for (Environment* target_env : envs_to_notify) { |
443 | 0 | if (target_env != env) { |
444 | 0 | WakeEnvironment(target_env); |
445 | 0 | } |
446 | 0 | } |
447 | 0 | } |
448 | | |
449 | | // Create and store the new granted lock |
450 | 0 | auto granted_lock = |
451 | 0 | std::make_shared<Lock>(env, |
452 | 0 | grantable_request->name(), |
453 | 0 | grantable_request->mode(), |
454 | 0 | grantable_request->client_id(), |
455 | 0 | grantable_request->waiting_promise(), |
456 | 0 | grantable_request->released_promise()); |
457 | 0 | { |
458 | 0 | Mutex::ScopedLock scoped_lock(mutex_); |
459 | 0 | held_locks_[grantable_request->name()].push_back(granted_lock); |
460 | 0 | } |
461 | | |
462 | | // Create and store the new granted lock |
463 | 0 | Local<Object> lock_info; |
464 | 0 | if (!CreateLockInfoObject(env, *grantable_request).ToLocal(&lock_info)) { |
465 | 0 | return; |
466 | 0 | } |
467 | | |
468 | | // Call user callback |
469 | 0 | Local<Value> callback_arg = lock_info; |
470 | 0 | Local<Value> callback_result; |
471 | 0 | { |
472 | 0 | TryCatchScope try_catch_scope(env); |
473 | 0 | if (!grantable_request->callback() |
474 | 0 | ->Call(context, Undefined(isolate), 1, &callback_arg) |
475 | 0 | .ToLocal(&callback_result)) { |
476 | | // We don't really need to check the return value here since |
477 | | // we're returning early in either case. |
478 | 0 | USE(RejectBoth(context, |
479 | 0 | grantable_request->waiting_promise(), |
480 | 0 | grantable_request->released_promise(), |
481 | 0 | try_catch_scope.Exception())); |
482 | 0 | return; |
483 | 0 | } |
484 | 0 | } |
485 | | |
486 | | // Create LockHolder BaseObjects to safely manage the lock's lifetime |
487 | | // until the user's callback promise settles. |
488 | 0 | auto lock_resolve_holder = LockHolder::Create(env, granted_lock); |
489 | 0 | auto lock_reject_holder = LockHolder::Create(env, granted_lock); |
490 | 0 | Local<Function> on_fulfilled_callback; |
491 | 0 | Local<Function> on_rejected_callback; |
492 | | |
493 | | // Create fulfilled callback first |
494 | 0 | if (!Function::New( |
495 | 0 | context, OnLockCallbackFulfilled, lock_resolve_holder->object()) |
496 | 0 | .ToLocal(&on_fulfilled_callback)) { |
497 | 0 | return; |
498 | 0 | } |
499 | | |
500 | | // Create rejected callback second |
501 | 0 | if (!Function::New( |
502 | 0 | context, OnLockCallbackRejected, lock_reject_holder->object()) |
503 | 0 | .ToLocal(&on_rejected_callback)) { |
504 | 0 | return; |
505 | 0 | } |
506 | | |
507 | | // Handle promise chain |
508 | 0 | if (callback_result->IsPromise()) { |
509 | 0 | Local<Promise> promise = callback_result.As<Promise>(); |
510 | 0 | { |
511 | 0 | TryCatchScope try_catch_scope(env); |
512 | 0 | if (promise->Then(context, on_fulfilled_callback, on_rejected_callback) |
513 | 0 | .IsEmpty()) { |
514 | 0 | if (!try_catch_scope.CanContinue()) return; |
515 | | |
516 | 0 | Local<Value> err_val; |
517 | 0 | if (try_catch_scope.HasCaught() && |
518 | 0 | !try_catch_scope.Exception().IsEmpty()) { |
519 | 0 | err_val = try_catch_scope.Exception(); |
520 | 0 | } else { |
521 | 0 | err_val = Exception::Error(FIXED_ONE_BYTE_STRING( |
522 | 0 | isolate, "Failed to attach promise handlers")); |
523 | 0 | } |
524 | |
|
525 | 0 | USE(RejectBoth(context, |
526 | 0 | grantable_request->waiting_promise(), |
527 | 0 | grantable_request->released_promise(), |
528 | 0 | err_val)); |
529 | 0 | return; |
530 | 0 | } |
531 | 0 | } |
532 | | |
533 | | // Lock granted: waiting_promise resolves now with the promise returned |
534 | | // by the callback; on_fulfilled/on_rejected will release the lock when |
535 | | // that promise settles. |
536 | 0 | if (grantable_request->waiting_promise() |
537 | 0 | ->Resolve(context, callback_result) |
538 | 0 | .IsNothing()) { |
539 | 0 | return; |
540 | 0 | } |
541 | 0 | } else { |
542 | 0 | if (grantable_request->waiting_promise() |
543 | 0 | ->Resolve(context, callback_result) |
544 | 0 | .IsNothing()) { |
545 | 0 | return; |
546 | 0 | } |
547 | 0 | Local<Value> promise_args[] = {callback_result}; |
548 | 0 | if (on_fulfilled_callback |
549 | 0 | ->Call(context, Undefined(isolate), 1, promise_args) |
550 | 0 | .IsEmpty()) { |
551 | | // Callback threw an error, handle it like a rejected promise |
552 | | // The error is already propagated through the TryCatch in the |
553 | | // callback |
554 | 0 | return; |
555 | 0 | } |
556 | 0 | } |
557 | 0 | } |
558 | 0 | } |
559 | | |
560 | | /** |
561 | | * name : string – resource identifier |
562 | | * clientId : string – client identifier |
563 | | * mode : string – lock mode |
564 | | * steal : boolean – whether to steal existing locks |
565 | | * ifAvailable : boolean – only grant if immediately available |
566 | | * callback : Function - JS callback |
567 | | */ |
568 | 0 | void LockManager::Request(const FunctionCallbackInfo<Value>& args) { |
569 | 0 | Environment* env = Environment::GetCurrent(args); |
570 | 0 | Isolate* isolate = env->isolate(); |
571 | 0 | HandleScope scope(isolate); |
572 | 0 | Local<Context> context = env->context(); |
573 | |
|
574 | 0 | CHECK_EQ(args.Length(), 6); |
575 | 0 | CHECK(args[0]->IsString()); // name |
576 | 0 | CHECK(args[1]->IsString()); // clientId |
577 | 0 | CHECK(args[2]->IsString()); // mode |
578 | 0 | CHECK(args[3]->IsBoolean()); // steal |
579 | 0 | CHECK(args[4]->IsBoolean()); // ifAvailable |
580 | 0 | CHECK(args[5]->IsFunction()); // callback |
581 | | |
582 | 0 | TwoByteValue resource_name(isolate, args[0]); |
583 | 0 | Utf8Value client_id(isolate, args[1]); |
584 | 0 | Utf8Value mode(isolate, args[2]); |
585 | 0 | bool steal = args[3]->BooleanValue(isolate); |
586 | 0 | bool if_available = args[4]->BooleanValue(isolate); |
587 | 0 | Local<Function> callback = args[5].As<Function>(); |
588 | |
|
589 | 0 | Local<Promise::Resolver> waiting_promise; |
590 | 0 | Local<Promise::Resolver> released_promise; |
591 | |
|
592 | 0 | if (!Promise::Resolver::New(context).ToLocal(&waiting_promise) || |
593 | 0 | !Promise::Resolver::New(context).ToLocal(&released_promise)) { |
594 | 0 | return; |
595 | 0 | } |
596 | | |
597 | | // Mark both internal promises as handled to prevent unhandled rejection |
598 | | // warnings |
599 | 0 | waiting_promise->GetPromise()->MarkAsHandled(); |
600 | 0 | released_promise->GetPromise()->MarkAsHandled(); |
601 | |
|
602 | 0 | LockManager* manager = GetCurrent(); |
603 | 0 | { |
604 | 0 | Mutex::ScopedLock scoped_lock(manager->mutex_); |
605 | | |
606 | | // Register cleanup hook for the environment only once |
607 | 0 | if (manager->registered_envs_.insert(env).second) { |
608 | 0 | env->AddCleanupHook(LockManager::OnEnvironmentCleanup, env); |
609 | 0 | } |
610 | |
|
611 | 0 | auto lock_request = std::make_unique<LockRequest>( |
612 | 0 | env, |
613 | 0 | waiting_promise, |
614 | 0 | released_promise, |
615 | 0 | callback, |
616 | 0 | resource_name.ToU16String(), |
617 | 0 | mode.ToStringView() == "shared" ? Lock::Mode::Shared |
618 | 0 | : Lock::Mode::Exclusive, |
619 | 0 | client_id.ToString(), |
620 | 0 | steal, |
621 | 0 | if_available); |
622 | | // Steal requests get priority by going to front of queue |
623 | 0 | if (steal) { |
624 | 0 | manager->pending_queue_.emplace_front(std::move(lock_request)); |
625 | 0 | } else { |
626 | 0 | manager->pending_queue_.push_back(std::move(lock_request)); |
627 | 0 | } |
628 | 0 | } |
629 | |
|
630 | 0 | manager->ProcessQueue(env); |
631 | |
|
632 | 0 | args.GetReturnValue().Set(released_promise->GetPromise()); |
633 | 0 | } |
634 | | |
635 | 0 | void LockManager::Query(const FunctionCallbackInfo<Value>& args) { |
636 | 0 | Environment* env = Environment::GetCurrent(args); |
637 | 0 | Isolate* isolate = env->isolate(); |
638 | 0 | HandleScope scope(isolate); |
639 | 0 | Local<Context> context = env->context(); |
640 | |
|
641 | 0 | Local<Promise::Resolver> resolver; |
642 | 0 | if (!Promise::Resolver::New(context).ToLocal(&resolver)) { |
643 | 0 | return; |
644 | 0 | } |
645 | | |
646 | | // Always set the return value first so Javascript gets a promise |
647 | 0 | args.GetReturnValue().Set(resolver->GetPromise()); |
648 | |
|
649 | 0 | LocalVector<Value> held_list(isolate); |
650 | 0 | LocalVector<Value> pending_list(isolate); |
651 | 0 | LockManager* manager = GetCurrent(); |
652 | |
|
653 | 0 | { |
654 | 0 | Mutex::ScopedLock scoped_lock(manager->mutex_); |
655 | |
|
656 | 0 | Local<Object> lock_info; |
657 | 0 | for (const auto& resource_entry : manager->held_locks_) { |
658 | 0 | for (const auto& held_lock : resource_entry.second) { |
659 | 0 | if (held_lock->env() == env) { |
660 | 0 | if (!CreateLockInfoObject(env, *held_lock).ToLocal(&lock_info)) { |
661 | | // There should already be a pending exception scheduled. |
662 | 0 | return; |
663 | 0 | } |
664 | 0 | held_list.push_back(lock_info); |
665 | 0 | } |
666 | 0 | } |
667 | 0 | } |
668 | | |
669 | 0 | for (const auto& pending_request : manager->pending_queue_) { |
670 | 0 | if (pending_request->env() == env) { |
671 | 0 | if (!CreateLockInfoObject(env, *pending_request).ToLocal(&lock_info)) { |
672 | | // There should already be a pending exception scheduled. |
673 | 0 | return; |
674 | 0 | } |
675 | 0 | pending_list.push_back(lock_info); |
676 | 0 | } |
677 | 0 | } |
678 | 0 | } |
679 | | |
680 | 0 | auto tmpl = env->lock_query_template(); |
681 | 0 | if (tmpl.IsEmpty()) { |
682 | 0 | static constexpr std::string_view names[] = { |
683 | 0 | "held", |
684 | 0 | "pending", |
685 | 0 | }; |
686 | 0 | tmpl = DictionaryTemplate::New(isolate, names); |
687 | 0 | env->set_lock_query_template(tmpl); |
688 | 0 | } |
689 | |
|
690 | 0 | MaybeLocal<Value> values[] = { |
691 | 0 | Array::New(isolate, held_list.data(), held_list.size()), |
692 | 0 | Array::New(isolate, pending_list.data(), pending_list.size()), |
693 | 0 | }; |
694 | |
|
695 | 0 | Local<Object> result; |
696 | 0 | if (NewDictionaryInstance(env->context(), tmpl, values).ToLocal(&result)) { |
697 | | // There's no reason to check IsNothing here since we're just returning. |
698 | 0 | USE(resolver->Resolve(context, result)); |
699 | 0 | } |
700 | 0 | } |
701 | | |
702 | | // Runs after the user callback (or its returned promise) settles. |
703 | | void LockManager::ReleaseLockAndProcessQueue(Environment* env, |
704 | | std::shared_ptr<Lock> lock, |
705 | | Local<Value> callback_result, |
706 | 0 | bool was_rejected) { |
707 | 0 | { |
708 | 0 | Mutex::ScopedLock scoped_lock(mutex_); |
709 | 0 | ReleaseLock(lock.get()); |
710 | 0 | } |
711 | |
|
712 | 0 | Local<Context> context = env->context(); |
713 | | |
714 | | // For stolen locks, the released_promise was already rejected when marked as |
715 | | // stolen. |
716 | 0 | if (!lock->is_stolen()) { |
717 | 0 | if (was_rejected) { |
718 | | // Propagate rejection from the user callback |
719 | 0 | if (lock->released_promise() |
720 | 0 | ->Reject(context, callback_result) |
721 | 0 | .IsNothing()) |
722 | 0 | return; |
723 | 0 | } else { |
724 | | // Propagate fulfilment |
725 | 0 | if (lock->released_promise() |
726 | 0 | ->Resolve(context, callback_result) |
727 | 0 | .IsNothing()) |
728 | 0 | return; |
729 | 0 | } |
730 | 0 | } |
731 | | |
732 | 0 | ProcessQueue(env); |
733 | 0 | } |
734 | | |
735 | | // Remove a lock from held_locks_ when it's no longer needed |
736 | 0 | void LockManager::ReleaseLock(Lock* lock) { |
737 | 0 | const std::u16string& resource_name = lock->name(); |
738 | 0 | auto resource_iter = held_locks_.find(resource_name); |
739 | 0 | if (resource_iter == held_locks_.end()) return; |
740 | | |
741 | 0 | auto& resource_locks = resource_iter->second; |
742 | 0 | for (auto lock_iter = resource_locks.begin(); |
743 | 0 | lock_iter != resource_locks.end(); |
744 | 0 | ++lock_iter) { |
745 | 0 | if (lock_iter->get() == lock) { |
746 | 0 | resource_locks.erase(lock_iter); |
747 | 0 | if (resource_locks.empty()) held_locks_.erase(resource_iter); |
748 | 0 | break; |
749 | 0 | } |
750 | 0 | } |
751 | 0 | } |
752 | | |
753 | | // Wakeup of target Environment's event loop |
754 | 0 | void LockManager::WakeEnvironment(Environment* target_env) { |
755 | 0 | if (target_env == nullptr || target_env->is_stopping()) return; |
756 | | |
757 | | // Schedule ProcessQueue in the target Environment on its event loop. |
758 | 0 | target_env->SetImmediateThreadsafe([](Environment* env_to_wake) { |
759 | 0 | if (env_to_wake != nullptr && !env_to_wake->is_stopping()) { |
760 | 0 | LockManager::GetCurrent()->ProcessQueue(env_to_wake); |
761 | 0 | } |
762 | 0 | }); |
763 | 0 | } |
764 | | |
765 | | // Remove all held locks and pending requests that belong to an Environment |
766 | | // that is being destroyed |
767 | 0 | void LockManager::CleanupEnvironment(Environment* env_to_cleanup) { |
768 | 0 | Mutex::ScopedLock scoped_lock(mutex_); |
769 | | |
770 | | // Remove every held lock that belongs to this Environment. |
771 | 0 | for (auto resource_iter = held_locks_.begin(); |
772 | 0 | resource_iter != held_locks_.end();) { |
773 | 0 | auto& resource_locks = resource_iter->second; |
774 | 0 | for (auto lock_iter = resource_locks.begin(); |
775 | 0 | lock_iter != resource_locks.end();) { |
776 | 0 | if ((*lock_iter)->env() == env_to_cleanup) { |
777 | 0 | lock_iter = resource_locks.erase(lock_iter); |
778 | 0 | } else { |
779 | 0 | ++lock_iter; |
780 | 0 | } |
781 | 0 | } |
782 | 0 | if (resource_locks.empty()) { |
783 | 0 | resource_iter = held_locks_.erase(resource_iter); |
784 | 0 | } else { |
785 | 0 | ++resource_iter; |
786 | 0 | } |
787 | 0 | } |
788 | | |
789 | | // Remove every pending request submitted by this Environment. |
790 | 0 | for (auto request_iter = pending_queue_.begin(); |
791 | 0 | request_iter != pending_queue_.end();) { |
792 | 0 | if ((*request_iter)->env() == env_to_cleanup) { |
793 | 0 | request_iter = pending_queue_.erase(request_iter); |
794 | 0 | } else { |
795 | 0 | ++request_iter; |
796 | 0 | } |
797 | 0 | } |
798 | | |
799 | | // Finally, remove it from registered_envs_ |
800 | 0 | registered_envs_.erase(env_to_cleanup); |
801 | 0 | } |
802 | | |
803 | | // Cleanup hook wrapper |
804 | 0 | void LockManager::OnEnvironmentCleanup(void* arg) { |
805 | 0 | Environment* env = static_cast<Environment*>(arg); |
806 | 0 | LockManager::GetCurrent()->CleanupEnvironment(env); |
807 | 0 | } |
808 | | |
809 | | LockManager LockManager::current_; |
810 | | |
811 | | void CreatePerIsolateProperties(IsolateData* isolate_data, |
812 | 35 | Local<ObjectTemplate> target) { |
813 | 35 | Isolate* isolate = isolate_data->isolate(); |
814 | 35 | SetMethod(isolate, target, "request", LockManager::Request); |
815 | 35 | SetMethod(isolate, target, "query", LockManager::Query); |
816 | | |
817 | 35 | PropertyAttribute read_only = static_cast<PropertyAttribute>( |
818 | 35 | PropertyAttribute::ReadOnly | PropertyAttribute::DontDelete); |
819 | 35 | target->Set(FIXED_ONE_BYTE_STRING(isolate, "LOCK_MODE_SHARED"), |
820 | 35 | FIXED_ONE_BYTE_STRING(isolate, "shared"), |
821 | 35 | read_only); |
822 | 35 | target->Set(FIXED_ONE_BYTE_STRING(isolate, "LOCK_MODE_EXCLUSIVE"), |
823 | 35 | FIXED_ONE_BYTE_STRING(isolate, "exclusive"), |
824 | 35 | read_only); |
825 | 35 | target->Set(FIXED_ONE_BYTE_STRING(isolate, "LOCK_STOLEN_ERROR"), |
826 | 35 | FIXED_ONE_BYTE_STRING(isolate, "LOCK_STOLEN"), |
827 | 35 | read_only); |
828 | 35 | } |
829 | | |
830 | | void CreatePerContextProperties(Local<Object> target, |
831 | | Local<Value> unused, |
832 | | Local<Context> context, |
833 | 0 | void* priv) {} |
834 | | |
835 | 0 | void RegisterExternalReferences(ExternalReferenceRegistry* registry) { |
836 | 0 | registry->Register(LockManager::Request); |
837 | 0 | registry->Register(LockManager::Query); |
838 | 0 | registry->Register(OnLockCallbackFulfilled); |
839 | 0 | registry->Register(OnLockCallbackRejected); |
840 | 0 | registry->Register(OnIfAvailableFulfill); |
841 | 0 | registry->Register(OnIfAvailableReject); |
842 | 0 | } |
843 | | |
844 | 0 | void LockHolder::MemoryInfo(node::MemoryTracker* tracker) const { |
845 | 0 | tracker->TrackField("lock", lock_); |
846 | 0 | } |
847 | | |
848 | | BaseObjectPtr<LockHolder> LockHolder::Create(Environment* env, |
849 | 0 | std::shared_ptr<Lock> lock) { |
850 | 0 | Local<Object> obj; |
851 | 0 | if (!GetConstructorTemplate(env) |
852 | 0 | ->InstanceTemplate() |
853 | 0 | ->NewInstance(env->context()) |
854 | 0 | .ToLocal(&obj)) { |
855 | 0 | return nullptr; |
856 | 0 | } |
857 | | |
858 | 0 | return MakeBaseObject<LockHolder>(env, obj, std::move(lock)); |
859 | 0 | } |
860 | | |
861 | 0 | Local<FunctionTemplate> LockHolder::GetConstructorTemplate(Environment* env) { |
862 | 0 | IsolateData* isolate_data = env->isolate_data(); |
863 | 0 | Local<FunctionTemplate> tmpl = |
864 | 0 | isolate_data->lock_holder_constructor_template(); |
865 | 0 | if (tmpl.IsEmpty()) { |
866 | 0 | Isolate* isolate = isolate_data->isolate(); |
867 | 0 | tmpl = NewFunctionTemplate(isolate, nullptr); |
868 | 0 | tmpl->SetClassName(FIXED_ONE_BYTE_STRING(isolate, "LockHolder")); |
869 | 0 | tmpl->InstanceTemplate()->SetInternalFieldCount( |
870 | 0 | LockHolder::kInternalFieldCount); |
871 | 0 | isolate_data->set_lock_holder_constructor_template(tmpl); |
872 | 0 | } |
873 | 0 | return tmpl; |
874 | 0 | } |
875 | | |
876 | | } // namespace node::worker::locks |
877 | | |
878 | | NODE_BINDING_CONTEXT_AWARE_INTERNAL( |
879 | | locks, node::worker::locks::CreatePerContextProperties) |
880 | | NODE_BINDING_PER_ISOLATE_INIT(locks, |
881 | | node::worker::locks::CreatePerIsolateProperties) |
882 | | NODE_BINDING_EXTERNAL_REFERENCE(locks, |
883 | | node::worker::locks::RegisterExternalReferences) |