/src/mozilla-central/ipc/glue/MessageChannel.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
2 | | * vim: sw=4 ts=4 et : |
3 | | */ |
4 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
5 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
6 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
7 | | |
8 | | #include "mozilla/ipc/MessageChannel.h" |
9 | | |
10 | | #include "mozilla/Assertions.h" |
11 | | #include "mozilla/DebugOnly.h" |
12 | | #include "mozilla/dom/ScriptSettings.h" |
13 | | #include "mozilla/ipc/ProtocolUtils.h" |
14 | | #include "mozilla/Logging.h" |
15 | | #include "mozilla/Move.h" |
16 | | #include "mozilla/Mutex.h" |
17 | | #include "mozilla/ScopeExit.h" |
18 | | #include "mozilla/Sprintf.h" |
19 | | #include "mozilla/Telemetry.h" |
20 | | #include "mozilla/TimeStamp.h" |
21 | | #include "mozilla/UniquePtr.h" |
22 | | #include "nsAppRunner.h" |
23 | | #include "nsAutoPtr.h" |
24 | | #include "nsContentUtils.h" |
25 | | #include "nsDataHashtable.h" |
26 | | #include "nsDebug.h" |
27 | | #include "nsISupportsImpl.h" |
28 | | #include "nsPrintfCString.h" |
29 | | #include <math.h> |
30 | | |
31 | | #ifdef MOZ_TASK_TRACER |
32 | | #include "GeckoTaskTracer.h" |
33 | | using namespace mozilla::tasktracer; |
34 | | #endif |
35 | | |
36 | | // Undo the damage done by mozzconf.h |
37 | | #undef compress |
38 | | |
39 | | static mozilla::LazyLogModule sLogModule("ipc"); |
40 | 0 | #define IPC_LOG(...) MOZ_LOG(sLogModule, LogLevel::Debug, (__VA_ARGS__)) |
41 | | |
42 | | /* |
43 | | * IPC design: |
44 | | * |
45 | | * There are three kinds of messages: async, sync, and intr. Sync and intr |
46 | | * messages are blocking. |
47 | | * |
48 | | * Terminology: To dispatch a message Foo is to run the RecvFoo code for |
49 | | * it. This is also called "handling" the message. |
50 | | * |
51 | | * Sync and async messages can sometimes "nest" inside other sync messages |
52 | | * (i.e., while waiting for the sync reply, we can dispatch the inner |
53 | | * message). Intr messages cannot nest. The three possible nesting levels are |
54 | | * NOT_NESTED, NESTED_INSIDE_SYNC, and NESTED_INSIDE_CPOW. The intended uses |
55 | | * are: |
56 | | * NOT_NESTED - most messages. |
57 | | * NESTED_INSIDE_SYNC - CPOW-related messages, which are always sync |
58 | | * and can go in either direction. |
59 | | * NESTED_INSIDE_CPOW - messages where we don't want to dispatch |
60 | | * incoming CPOWs while waiting for the response. |
61 | | * These nesting levels are ordered: NOT_NESTED, NESTED_INSIDE_SYNC, |
62 | | * NESTED_INSIDE_CPOW. Async messages cannot be NESTED_INSIDE_SYNC but they can |
63 | | * be NESTED_INSIDE_CPOW. |
64 | | * |
65 | | * To avoid jank, the parent process is not allowed to send NOT_NESTED sync messages. |
66 | | * When a process is waiting for a response to a sync message |
67 | | * M0, it will dispatch an incoming message M if: |
68 | | * 1. M has a higher nesting level than M0, or |
69 | | * 2. if M has the same nesting level as M0 and we're in the child, or |
70 | | * 3. if M has the same nesting level as M0 and it was sent by the other side |
71 | | * while dispatching M0. |
72 | | * The idea is that messages with higher nesting should take precendence. The |
73 | | * purpose of rule 2 is to handle a race where both processes send to each other |
74 | | * simultaneously. In this case, we resolve the race in favor of the parent (so |
75 | | * the child dispatches first). |
76 | | * |
77 | | * Messages satisfy the following properties: |
78 | | * A. When waiting for a response to a sync message, we won't dispatch any |
79 | | * messages of nesting level. |
80 | | * B. Messages of the same nesting level will be dispatched roughly in the |
81 | | * order they were sent. The exception is when the parent and child send |
82 | | * sync messages to each other simulataneously. In this case, the parent's |
83 | | * message is dispatched first. While it is dispatched, the child may send |
84 | | * further nested messages, and these messages may be dispatched before the |
85 | | * child's original message. We can consider ordering to be preserved here |
86 | | * because we pretend that the child's original message wasn't sent until |
87 | | * after the parent's message is finished being dispatched. |
88 | | * |
89 | | * When waiting for a sync message reply, we dispatch an async message only if |
90 | | * it is NESTED_INSIDE_CPOW. Normally NESTED_INSIDE_CPOW async |
91 | | * messages are sent only from the child. However, the parent can send |
92 | | * NESTED_INSIDE_CPOW async messages when it is creating a bridged protocol. |
93 | | * |
94 | | * Intr messages are blocking and can nest, but they don't participate in the |
95 | | * nesting levels. While waiting for an intr response, all incoming messages are |
96 | | * dispatched until a response is received. When two intr messages race with |
97 | | * each other, a similar scheme is used to ensure that one side wins. The |
98 | | * winning side is chosen based on the message type. |
99 | | * |
100 | | * Intr messages differ from sync messages in that, while sending an intr |
101 | | * message, we may dispatch an async message. This causes some additional |
102 | | * complexity. One issue is that replies can be received out of order. It's also |
103 | | * more difficult to determine whether one message is nested inside |
104 | | * another. Consequently, intr handling uses mOutOfTurnReplies and |
105 | | * mRemoteStackDepthGuess, which are not needed for sync messages. |
106 | | */ |
107 | | |
108 | | using namespace mozilla; |
109 | | using namespace mozilla::ipc; |
110 | | using namespace std; |
111 | | |
112 | | using mozilla::dom::AutoNoJSAPI; |
113 | | using mozilla::dom::ScriptSettingsInitialized; |
114 | | using mozilla::MonitorAutoLock; |
115 | | using mozilla::MonitorAutoUnlock; |
116 | | |
117 | | #define IPC_ASSERT(_cond, ...) \ |
118 | 0 | do { \ |
119 | 0 | if (!(_cond)) \ |
120 | 0 | DebugAbort(__FILE__, __LINE__, #_cond,## __VA_ARGS__); \ |
121 | 0 | } while (0) |
122 | | |
123 | | static MessageChannel* gParentProcessBlocker; |
124 | | |
125 | | namespace mozilla { |
126 | | namespace ipc { |
127 | | |
128 | | static const uint32_t kMinTelemetryMessageSize = 4096; |
129 | | |
130 | | // Note: we round the time we spend to the nearest millisecond. So a min value |
131 | | // of 1 ms actually captures from 500us and above. |
132 | | static const uint32_t kMinTelemetryIPCWriteLatencyMs = 1; |
133 | | |
134 | | // Note: we round the time we spend waiting for a response to the nearest |
135 | | // millisecond. So a min value of 1 ms actually captures from 500us and above. |
136 | | // This is used for both the sending and receiving side telemetry for sync IPC, |
137 | | // (IPC_SYNC_MAIN_LATENCY_MS and IPC_SYNC_RECEIVE_MS). |
138 | | static const uint32_t kMinTelemetrySyncIPCLatencyMs = 1; |
139 | | |
140 | | const int32_t MessageChannel::kNoTimeout = INT32_MIN; |
141 | | |
142 | | // static |
143 | | bool MessageChannel::sIsPumpingMessages = false; |
144 | | |
145 | | enum Direction |
146 | | { |
147 | | IN_MESSAGE, |
148 | | OUT_MESSAGE |
149 | | }; |
150 | | |
151 | | class MessageChannel::InterruptFrame |
152 | | { |
153 | | private: |
154 | | enum Semantics |
155 | | { |
156 | | INTR_SEMS, |
157 | | SYNC_SEMS, |
158 | | ASYNC_SEMS |
159 | | }; |
160 | | |
161 | | public: |
162 | | InterruptFrame(Direction direction, const Message* msg) |
163 | | : mMessageName(msg->name()), |
164 | | mMessageRoutingId(msg->routing_id()), |
165 | | mMesageSemantics(msg->is_interrupt() ? INTR_SEMS : |
166 | | msg->is_sync() ? SYNC_SEMS : |
167 | | ASYNC_SEMS), |
168 | | mDirection(direction), |
169 | | mMoved(false) |
170 | 0 | { |
171 | 0 | MOZ_RELEASE_ASSERT(mMessageName); |
172 | 0 | } |
173 | | |
174 | | InterruptFrame(InterruptFrame&& aOther) |
175 | 0 | { |
176 | 0 | MOZ_RELEASE_ASSERT(aOther.mMessageName); |
177 | 0 | mMessageName = aOther.mMessageName; |
178 | 0 | aOther.mMessageName = nullptr; |
179 | 0 | mMoved = aOther.mMoved; |
180 | 0 | aOther.mMoved = true; |
181 | 0 |
|
182 | 0 | mMessageRoutingId = aOther.mMessageRoutingId; |
183 | 0 | mMesageSemantics = aOther.mMesageSemantics; |
184 | 0 | mDirection = aOther.mDirection; |
185 | 0 | } |
186 | | |
187 | | ~InterruptFrame() |
188 | 0 | { |
189 | 0 | MOZ_RELEASE_ASSERT(mMessageName || mMoved); |
190 | 0 | } |
191 | | |
192 | | InterruptFrame& operator=(InterruptFrame&& aOther) |
193 | 0 | { |
194 | 0 | MOZ_RELEASE_ASSERT(&aOther != this); |
195 | 0 | this->~InterruptFrame(); |
196 | 0 | new (this) InterruptFrame(std::move(aOther)); |
197 | 0 | return *this; |
198 | 0 | } |
199 | | |
200 | | bool IsInterruptIncall() const |
201 | 0 | { |
202 | 0 | return INTR_SEMS == mMesageSemantics && IN_MESSAGE == mDirection; |
203 | 0 | } |
204 | | |
205 | | bool IsInterruptOutcall() const |
206 | 0 | { |
207 | 0 | return INTR_SEMS == mMesageSemantics && OUT_MESSAGE == mDirection; |
208 | 0 | } |
209 | | |
210 | 0 | bool IsOutgoingSync() const { |
211 | 0 | return (mMesageSemantics == INTR_SEMS || mMesageSemantics == SYNC_SEMS) && |
212 | 0 | mDirection == OUT_MESSAGE; |
213 | 0 | } |
214 | | |
215 | | void Describe(int32_t* id, const char** dir, const char** sems, |
216 | | const char** name) const |
217 | 0 | { |
218 | 0 | *id = mMessageRoutingId; |
219 | 0 | *dir = (IN_MESSAGE == mDirection) ? "in" : "out"; |
220 | 0 | *sems = (INTR_SEMS == mMesageSemantics) ? "intr" : |
221 | 0 | (SYNC_SEMS == mMesageSemantics) ? "sync" : |
222 | 0 | "async"; |
223 | 0 | *name = mMessageName; |
224 | 0 | } |
225 | | |
226 | | int32_t GetRoutingId() const |
227 | 0 | { |
228 | 0 | return mMessageRoutingId; |
229 | 0 | } |
230 | | |
231 | | private: |
232 | | const char* mMessageName; |
233 | | int32_t mMessageRoutingId; |
234 | | Semantics mMesageSemantics; |
235 | | Direction mDirection; |
236 | | bool mMoved; |
237 | | |
238 | | // Disable harmful methods. |
239 | | InterruptFrame(const InterruptFrame& aOther) = delete; |
240 | | InterruptFrame& operator=(const InterruptFrame&) = delete; |
241 | | }; |
242 | | |
243 | | class MOZ_STACK_CLASS MessageChannel::CxxStackFrame |
244 | | { |
245 | | public: |
246 | | CxxStackFrame(MessageChannel& that, Direction direction, const Message* msg) |
247 | | : mThat(that) |
248 | 0 | { |
249 | 0 | mThat.AssertWorkerThread(); |
250 | 0 |
|
251 | 0 | if (mThat.mCxxStackFrames.empty()) |
252 | 0 | mThat.EnteredCxxStack(); |
253 | 0 |
|
254 | 0 | if (!mThat.mCxxStackFrames.append(InterruptFrame(direction, msg))) |
255 | 0 | MOZ_CRASH(); |
256 | 0 |
|
257 | 0 | const InterruptFrame& frame = mThat.mCxxStackFrames.back(); |
258 | 0 |
|
259 | 0 | if (frame.IsInterruptIncall()) |
260 | 0 | mThat.EnteredCall(); |
261 | 0 |
|
262 | 0 | if (frame.IsOutgoingSync()) |
263 | 0 | mThat.EnteredSyncSend(); |
264 | 0 |
|
265 | 0 | mThat.mSawInterruptOutMsg |= frame.IsInterruptOutcall(); |
266 | 0 | } |
267 | | |
268 | 0 | ~CxxStackFrame() { |
269 | 0 | mThat.AssertWorkerThread(); |
270 | 0 |
|
271 | 0 | MOZ_RELEASE_ASSERT(!mThat.mCxxStackFrames.empty()); |
272 | 0 |
|
273 | 0 | const InterruptFrame& frame = mThat.mCxxStackFrames.back(); |
274 | 0 | bool exitingSync = frame.IsOutgoingSync(); |
275 | 0 | bool exitingCall = frame.IsInterruptIncall(); |
276 | 0 | mThat.mCxxStackFrames.shrinkBy(1); |
277 | 0 |
|
278 | 0 | bool exitingStack = mThat.mCxxStackFrames.empty(); |
279 | 0 |
|
280 | 0 | // According how lifetime is declared, mListener on MessageChannel |
281 | 0 | // lives longer than MessageChannel itself. Hence is expected to |
282 | 0 | // be alive. There is nothing to even assert here, there is no place |
283 | 0 | // we would be nullifying mListener on MessageChannel. |
284 | 0 |
|
285 | 0 | if (exitingCall) |
286 | 0 | mThat.ExitedCall(); |
287 | 0 |
|
288 | 0 | if (exitingSync) |
289 | 0 | mThat.ExitedSyncSend(); |
290 | 0 |
|
291 | 0 | if (exitingStack) |
292 | 0 | mThat.ExitedCxxStack(); |
293 | 0 | } |
294 | | private: |
295 | | MessageChannel& mThat; |
296 | | |
297 | | // Disable harmful methods. |
298 | | CxxStackFrame() = delete; |
299 | | CxxStackFrame(const CxxStackFrame&) = delete; |
300 | | CxxStackFrame& operator=(const CxxStackFrame&) = delete; |
301 | | }; |
302 | | |
303 | | class AutoEnterTransaction |
304 | | { |
305 | | public: |
306 | | explicit AutoEnterTransaction(MessageChannel *aChan, |
307 | | int32_t aMsgSeqno, |
308 | | int32_t aTransactionID, |
309 | | int aNestedLevel) |
310 | | : mChan(aChan), |
311 | | mActive(true), |
312 | | mOutgoing(true), |
313 | | mNestedLevel(aNestedLevel), |
314 | | mSeqno(aMsgSeqno), |
315 | | mTransaction(aTransactionID), |
316 | | mNext(mChan->mTransactionStack) |
317 | 0 | { |
318 | 0 | mChan->mMonitor->AssertCurrentThreadOwns(); |
319 | 0 | mChan->mTransactionStack = this; |
320 | 0 | } |
321 | | |
322 | | explicit AutoEnterTransaction(MessageChannel *aChan, const IPC::Message &aMessage) |
323 | | : mChan(aChan), |
324 | | mActive(true), |
325 | | mOutgoing(false), |
326 | | mNestedLevel(aMessage.nested_level()), |
327 | | mSeqno(aMessage.seqno()), |
328 | | mTransaction(aMessage.transaction_id()), |
329 | | mNext(mChan->mTransactionStack) |
330 | 0 | { |
331 | 0 | mChan->mMonitor->AssertCurrentThreadOwns(); |
332 | 0 |
|
333 | 0 | if (!aMessage.is_sync()) { |
334 | 0 | mActive = false; |
335 | 0 | return; |
336 | 0 | } |
337 | 0 | |
338 | 0 | mChan->mTransactionStack = this; |
339 | 0 | } |
340 | | |
341 | 0 | ~AutoEnterTransaction() { |
342 | 0 | mChan->mMonitor->AssertCurrentThreadOwns(); |
343 | 0 | if (mActive) { |
344 | 0 | mChan->mTransactionStack = mNext; |
345 | 0 | } |
346 | 0 | } |
347 | | |
348 | 0 | void Cancel() { |
349 | 0 | AutoEnterTransaction *cur = mChan->mTransactionStack; |
350 | 0 | MOZ_RELEASE_ASSERT(cur == this); |
351 | 0 | while (cur && cur->mNestedLevel != IPC::Message::NOT_NESTED) { |
352 | 0 | // Note that, in the following situation, we will cancel multiple |
353 | 0 | // transactions: |
354 | 0 | // 1. Parent sends NESTED_INSIDE_SYNC message P1 to child. |
355 | 0 | // 2. Child sends NESTED_INSIDE_SYNC message C1 to child. |
356 | 0 | // 3. Child dispatches P1, parent blocks. |
357 | 0 | // 4. Child cancels. |
358 | 0 | // In this case, both P1 and C1 are cancelled. The parent will |
359 | 0 | // remove C1 from its queue when it gets the cancellation message. |
360 | 0 | MOZ_RELEASE_ASSERT(cur->mActive); |
361 | 0 | cur->mActive = false; |
362 | 0 | cur = cur->mNext; |
363 | 0 | } |
364 | 0 |
|
365 | 0 | mChan->mTransactionStack = cur; |
366 | 0 |
|
367 | 0 | MOZ_RELEASE_ASSERT(IsComplete()); |
368 | 0 | } |
369 | | |
370 | 0 | bool AwaitingSyncReply() const { |
371 | 0 | MOZ_RELEASE_ASSERT(mActive); |
372 | 0 | if (mOutgoing) { |
373 | 0 | return true; |
374 | 0 | } |
375 | 0 | return mNext ? mNext->AwaitingSyncReply() : false; |
376 | 0 | } |
377 | | |
378 | 0 | int AwaitingSyncReplyNestedLevel() const { |
379 | 0 | MOZ_RELEASE_ASSERT(mActive); |
380 | 0 | if (mOutgoing) { |
381 | 0 | return mNestedLevel; |
382 | 0 | } |
383 | 0 | return mNext ? mNext->AwaitingSyncReplyNestedLevel() : 0; |
384 | 0 | } |
385 | | |
386 | 0 | bool DispatchingSyncMessage() const { |
387 | 0 | MOZ_RELEASE_ASSERT(mActive); |
388 | 0 | if (!mOutgoing) { |
389 | 0 | return true; |
390 | 0 | } |
391 | 0 | return mNext ? mNext->DispatchingSyncMessage() : false; |
392 | 0 | } |
393 | | |
394 | 0 | int DispatchingSyncMessageNestedLevel() const { |
395 | 0 | MOZ_RELEASE_ASSERT(mActive); |
396 | 0 | if (!mOutgoing) { |
397 | 0 | return mNestedLevel; |
398 | 0 | } |
399 | 0 | return mNext ? mNext->DispatchingSyncMessageNestedLevel() : 0; |
400 | 0 | } |
401 | | |
402 | 0 | int NestedLevel() const { |
403 | 0 | MOZ_RELEASE_ASSERT(mActive); |
404 | 0 | return mNestedLevel; |
405 | 0 | } |
406 | | |
407 | 0 | int32_t SequenceNumber() const { |
408 | 0 | MOZ_RELEASE_ASSERT(mActive); |
409 | 0 | return mSeqno; |
410 | 0 | } |
411 | | |
412 | 0 | int32_t TransactionID() const { |
413 | 0 | MOZ_RELEASE_ASSERT(mActive); |
414 | 0 | return mTransaction; |
415 | 0 | } |
416 | | |
417 | 0 | void ReceivedReply(IPC::Message&& aMessage) { |
418 | 0 | MOZ_RELEASE_ASSERT(aMessage.seqno() == mSeqno); |
419 | 0 | MOZ_RELEASE_ASSERT(aMessage.transaction_id() == mTransaction); |
420 | 0 | MOZ_RELEASE_ASSERT(!mReply); |
421 | 0 | IPC_LOG("Reply received on worker thread: seqno=%d", mSeqno); |
422 | 0 | mReply = MakeUnique<IPC::Message>(std::move(aMessage)); |
423 | 0 | MOZ_RELEASE_ASSERT(IsComplete()); |
424 | 0 | } |
425 | | |
426 | 0 | void HandleReply(IPC::Message&& aMessage) { |
427 | 0 | AutoEnterTransaction *cur = mChan->mTransactionStack; |
428 | 0 | MOZ_RELEASE_ASSERT(cur == this); |
429 | 0 | while (cur) { |
430 | 0 | MOZ_RELEASE_ASSERT(cur->mActive); |
431 | 0 | if (aMessage.seqno() == cur->mSeqno) { |
432 | 0 | cur->ReceivedReply(std::move(aMessage)); |
433 | 0 | break; |
434 | 0 | } |
435 | 0 | cur = cur->mNext; |
436 | 0 | MOZ_RELEASE_ASSERT(cur); |
437 | 0 | } |
438 | 0 | } |
439 | | |
440 | 0 | bool IsComplete() { |
441 | 0 | return !mActive || mReply; |
442 | 0 | } |
443 | | |
444 | 0 | bool IsOutgoing() { |
445 | 0 | return mOutgoing; |
446 | 0 | } |
447 | | |
448 | 0 | bool IsCanceled() { |
449 | 0 | return !mActive; |
450 | 0 | } |
451 | | |
452 | 0 | bool IsBottom() const { |
453 | 0 | return !mNext; |
454 | 0 | } |
455 | | |
456 | 0 | bool IsError() { |
457 | 0 | MOZ_RELEASE_ASSERT(mReply); |
458 | 0 | return mReply->is_reply_error(); |
459 | 0 | } |
460 | | |
461 | 0 | UniquePtr<IPC::Message> GetReply() { |
462 | 0 | return std::move(mReply); |
463 | 0 | } |
464 | | |
465 | | private: |
466 | | MessageChannel *mChan; |
467 | | |
468 | | // Active is true if this transaction is on the mChan->mTransactionStack |
469 | | // stack. Generally we're not on the stack if the transaction was canceled |
470 | | // or if it was for a message that doesn't require transactions (an async |
471 | | // message). |
472 | | bool mActive; |
473 | | |
474 | | // Is this stack frame for an outgoing message? |
475 | | bool mOutgoing; |
476 | | |
477 | | // Properties of the message being sent/received. |
478 | | int mNestedLevel; |
479 | | int32_t mSeqno; |
480 | | int32_t mTransaction; |
481 | | |
482 | | // Next item in mChan->mTransactionStack. |
483 | | AutoEnterTransaction *mNext; |
484 | | |
485 | | // Pointer the a reply received for this message, if one was received. |
486 | | UniquePtr<IPC::Message> mReply; |
487 | | }; |
488 | | |
489 | | class PendingResponseReporter final : public nsIMemoryReporter |
490 | | { |
491 | 0 | ~PendingResponseReporter() {} |
492 | | public: |
493 | | NS_DECL_THREADSAFE_ISUPPORTS |
494 | | |
495 | | NS_IMETHOD |
496 | | CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData, |
497 | | bool aAnonymize) override |
498 | 0 | { |
499 | 0 | MOZ_COLLECT_REPORT( |
500 | 0 | "unresolved-ipc-responses", KIND_OTHER, UNITS_COUNT, MessageChannel::gUnresolvedResponses, |
501 | 0 | "Outstanding IPC async message responses that are still not resolved."); |
502 | 0 | return NS_OK; |
503 | 0 | } |
504 | | }; |
505 | | |
506 | | NS_IMPL_ISUPPORTS(PendingResponseReporter, nsIMemoryReporter) |
507 | | |
508 | | class ChannelCountReporter final : public nsIMemoryReporter |
509 | | { |
510 | | ~ChannelCountReporter() = default; |
511 | | |
512 | | struct ChannelCounts { |
513 | | size_t mNow; |
514 | | size_t mMax; |
515 | | |
516 | 0 | ChannelCounts() : mNow(0), mMax(0) { } |
517 | | |
518 | 0 | void Inc() { |
519 | 0 | ++mNow; |
520 | 0 | if (mMax < mNow) { |
521 | 0 | mMax = mNow; |
522 | 0 | } |
523 | 0 | } |
524 | | |
525 | 0 | void Dec() { |
526 | 0 | MOZ_ASSERT(mNow > 0); |
527 | 0 | --mNow; |
528 | 0 | } |
529 | | }; |
530 | | |
531 | | using CountTable = nsDataHashtable<nsDepCharHashKey, ChannelCounts>; |
532 | | |
533 | | static StaticMutex sChannelCountMutex; |
534 | | static CountTable* sChannelCounts; |
535 | | |
536 | | public: |
537 | | NS_DECL_THREADSAFE_ISUPPORTS |
538 | | |
539 | | NS_IMETHOD |
540 | | CollectReports(nsIHandleReportCallback* aHandleReport, nsISupports* aData, |
541 | | bool aAnonymize) override |
542 | 0 | { |
543 | 0 | StaticMutexAutoLock countLock(sChannelCountMutex); |
544 | 0 | if (!sChannelCounts) { |
545 | 0 | return NS_OK; |
546 | 0 | } |
547 | 0 | for (auto iter = sChannelCounts->Iter(); !iter.Done(); iter.Next()) { |
548 | 0 | nsPrintfCString pathNow("ipc-channels/%s", iter.Key()); |
549 | 0 | nsPrintfCString pathMax("ipc-channels-peak/%s", iter.Key()); |
550 | 0 | nsPrintfCString descNow("Number of IPC channels for" |
551 | 0 | " top-level actor type %s", iter.Key()); |
552 | 0 | nsPrintfCString descMax("Peak number of IPC channels for" |
553 | 0 | " top-level actor type %s", iter.Key()); |
554 | 0 |
|
555 | 0 | aHandleReport->Callback(EmptyCString(), pathNow, KIND_OTHER, |
556 | 0 | UNITS_COUNT, iter.Data().mNow, descNow, |
557 | 0 | aData); |
558 | 0 | aHandleReport->Callback(EmptyCString(), pathMax, KIND_OTHER, |
559 | 0 | UNITS_COUNT, iter.Data().mMax, descMax, |
560 | 0 | aData); |
561 | 0 | } |
562 | 0 | return NS_OK; |
563 | 0 | } |
564 | | |
565 | | static void |
566 | | Increment(const char* aName) |
567 | 0 | { |
568 | 0 | StaticMutexAutoLock countLock(sChannelCountMutex); |
569 | 0 | if (!sChannelCounts) { |
570 | 0 | sChannelCounts = new CountTable; |
571 | 0 | } |
572 | 0 | sChannelCounts->GetOrInsert(aName).Inc(); |
573 | 0 | } |
574 | | |
575 | | static void |
576 | | Decrement(const char* aName) |
577 | 0 | { |
578 | 0 | StaticMutexAutoLock countLock(sChannelCountMutex); |
579 | 0 | MOZ_ASSERT(sChannelCounts); |
580 | 0 | sChannelCounts->GetOrInsert(aName).Dec(); |
581 | 0 | } |
582 | | }; |
583 | | |
584 | | StaticMutex ChannelCountReporter::sChannelCountMutex; |
585 | | ChannelCountReporter::CountTable* ChannelCountReporter::sChannelCounts; |
586 | | |
587 | | NS_IMPL_ISUPPORTS(ChannelCountReporter, nsIMemoryReporter) |
588 | | |
589 | | // In child processes, the first MessageChannel is created before |
590 | | // XPCOM is initialized enough to construct the memory reporter |
591 | | // manager. This retries every time a MessageChannel is constructed, |
592 | | // which is good enough in practice. |
593 | | template<class Reporter> |
594 | | static void TryRegisterStrongMemoryReporter() |
595 | 0 | { |
596 | 0 | static Atomic<bool> registered; |
597 | 0 | if (registered.compareExchange(false, true)) { |
598 | 0 | RefPtr<Reporter> reporter = new Reporter(); |
599 | 0 | if (NS_FAILED(RegisterStrongMemoryReporter(reporter))) { |
600 | 0 | registered = false; |
601 | 0 | } |
602 | 0 | } |
603 | 0 | } Unexecuted instantiation: Unified_cpp_ipc_glue1.cpp:void mozilla::ipc::TryRegisterStrongMemoryReporter<mozilla::ipc::PendingResponseReporter>() Unexecuted instantiation: Unified_cpp_ipc_glue1.cpp:void mozilla::ipc::TryRegisterStrongMemoryReporter<mozilla::ipc::ChannelCountReporter>() |
604 | | |
605 | | Atomic<size_t> MessageChannel::gUnresolvedResponses; |
606 | | |
607 | | MessageChannel::MessageChannel(const char* aName, |
608 | | IToplevelProtocol *aListener) |
609 | | : mName(aName), |
610 | | mListener(aListener), |
611 | | mChannelState(ChannelClosed), |
612 | | mSide(UnknownSide), |
613 | | mIsCrossProcess(false), |
614 | | mLink(nullptr), |
615 | | mWorkerLoop(nullptr), |
616 | | mChannelErrorTask(nullptr), |
617 | | mWorkerThread(nullptr), |
618 | | mTimeoutMs(kNoTimeout), |
619 | | mInTimeoutSecondHalf(false), |
620 | | mNextSeqno(0), |
621 | | mLastSendError(SyncSendError::SendSuccess), |
622 | | mDispatchingAsyncMessage(false), |
623 | | mDispatchingAsyncMessageNestedLevel(0), |
624 | | mTransactionStack(nullptr), |
625 | | mTimedOutMessageSeqno(0), |
626 | | mTimedOutMessageNestedLevel(0), |
627 | | mMaybeDeferredPendingCount(0), |
628 | | mRemoteStackDepthGuess(0), |
629 | | mSawInterruptOutMsg(false), |
630 | | mIsWaitingForIncoming(false), |
631 | | mAbortOnError(false), |
632 | | mNotifiedChannelDone(false), |
633 | | mFlags(REQUIRE_DEFAULT), |
634 | | mPeerPidSet(false), |
635 | | mPeerPid(-1), |
636 | | mIsPostponingSends(false), |
637 | | mInKillHardShutdown(false), |
638 | | mBuildIDsConfirmedMatch(false) |
639 | 0 | { |
640 | 0 | MOZ_COUNT_CTOR(ipc::MessageChannel); |
641 | 0 |
|
642 | | #ifdef OS_WIN |
643 | | mTopFrame = nullptr; |
644 | | mIsSyncWaitingOnNonMainThread = false; |
645 | | #endif |
646 | |
|
647 | 0 | mOnChannelConnectedTask = NewNonOwningCancelableRunnableMethod( |
648 | 0 | "ipc::MessageChannel::DispatchOnChannelConnected", |
649 | 0 | this, |
650 | 0 | &MessageChannel::DispatchOnChannelConnected); |
651 | 0 |
|
652 | | #ifdef OS_WIN |
653 | | mEvent = CreateEventW(nullptr, TRUE, FALSE, nullptr); |
654 | | MOZ_RELEASE_ASSERT(mEvent, "CreateEvent failed! Nothing is going to work!"); |
655 | | #endif |
656 | |
|
657 | 0 | TryRegisterStrongMemoryReporter<PendingResponseReporter>(); |
658 | 0 | TryRegisterStrongMemoryReporter<ChannelCountReporter>(); |
659 | 0 | } |
660 | | |
661 | | MessageChannel::~MessageChannel() |
662 | 0 | { |
663 | 0 | MOZ_COUNT_DTOR(ipc::MessageChannel); |
664 | 0 | IPC_ASSERT(mCxxStackFrames.empty(), "mismatched CxxStackFrame ctor/dtors"); |
665 | | #ifdef OS_WIN |
666 | | if (mEvent) { |
667 | | BOOL ok = CloseHandle(mEvent); |
668 | | mEvent = nullptr; |
669 | | |
670 | | if (!ok) { |
671 | | gfxDevCrash(mozilla::gfx::LogReason::MessageChannelCloseFailure) << |
672 | | "MessageChannel failed to close. GetLastError: " << |
673 | | GetLastError(); |
674 | | } |
675 | | MOZ_RELEASE_ASSERT(ok); |
676 | | } else { |
677 | | gfxDevCrash(mozilla::gfx::LogReason::MessageChannelCloseFailure) << |
678 | | "MessageChannel destructor ran without an mEvent Handle"; |
679 | | } |
680 | | #endif |
681 | | Clear(); |
682 | 0 | } |
683 | | |
684 | | #ifdef DEBUG |
685 | | void |
686 | | MessageChannel::AssertMaybeDeferredCountCorrect() |
687 | | { |
688 | | size_t count = 0; |
689 | | for (MessageTask* task : mPending) { |
690 | | if (!IsAlwaysDeferred(task->Msg())) { |
691 | | count++; |
692 | | } |
693 | | } |
694 | | |
695 | | MOZ_ASSERT(count == mMaybeDeferredPendingCount); |
696 | | } |
697 | | #endif |
698 | | |
699 | | // This function returns the current transaction ID. Since the notion of a |
700 | | // "current transaction" can be hard to define when messages race with each |
701 | | // other and one gets canceled and the other doesn't, we require that this |
702 | | // function is only called when the current transaction is known to be for a |
703 | | // NESTED_INSIDE_SYNC message. In that case, we know for sure what the caller is |
704 | | // looking for. |
705 | | int32_t |
706 | | MessageChannel::CurrentNestedInsideSyncTransaction() const |
707 | 0 | { |
708 | 0 | mMonitor->AssertCurrentThreadOwns(); |
709 | 0 | if (!mTransactionStack) { |
710 | 0 | return 0; |
711 | 0 | } |
712 | 0 | MOZ_RELEASE_ASSERT(mTransactionStack->NestedLevel() == IPC::Message::NESTED_INSIDE_SYNC); |
713 | 0 | return mTransactionStack->TransactionID(); |
714 | 0 | } |
715 | | |
716 | | bool |
717 | | MessageChannel::AwaitingSyncReply() const |
718 | 0 | { |
719 | 0 | mMonitor->AssertCurrentThreadOwns(); |
720 | 0 | return mTransactionStack ? mTransactionStack->AwaitingSyncReply() : false; |
721 | 0 | } |
722 | | |
723 | | int |
724 | | MessageChannel::AwaitingSyncReplyNestedLevel() const |
725 | 0 | { |
726 | 0 | mMonitor->AssertCurrentThreadOwns(); |
727 | 0 | return mTransactionStack ? mTransactionStack->AwaitingSyncReplyNestedLevel() : 0; |
728 | 0 | } |
729 | | |
730 | | bool |
731 | | MessageChannel::DispatchingSyncMessage() const |
732 | 0 | { |
733 | 0 | mMonitor->AssertCurrentThreadOwns(); |
734 | 0 | return mTransactionStack ? mTransactionStack->DispatchingSyncMessage() : false; |
735 | 0 | } |
736 | | |
737 | | int |
738 | | MessageChannel::DispatchingSyncMessageNestedLevel() const |
739 | 0 | { |
740 | 0 | mMonitor->AssertCurrentThreadOwns(); |
741 | 0 | return mTransactionStack ? mTransactionStack->DispatchingSyncMessageNestedLevel() : 0; |
742 | 0 | } |
743 | | |
744 | | static void |
745 | | PrintErrorMessage(Side side, const char* channelName, const char* msg) |
746 | 0 | { |
747 | 0 | const char *from = (side == ChildSide) |
748 | 0 | ? "Child" |
749 | 0 | : ((side == ParentSide) ? "Parent" : "Unknown"); |
750 | 0 | printf_stderr("\n###!!! [%s][%s] Error: %s\n\n", from, channelName, msg); |
751 | 0 | } |
752 | | |
753 | | bool |
754 | | MessageChannel::Connected() const |
755 | 0 | { |
756 | 0 | mMonitor->AssertCurrentThreadOwns(); |
757 | 0 |
|
758 | 0 | // The transport layer allows us to send messages before |
759 | 0 | // receiving the "connected" ack from the remote side. |
760 | 0 | return (ChannelOpening == mChannelState || ChannelConnected == mChannelState); |
761 | 0 | } |
762 | | |
763 | | bool |
764 | | MessageChannel::CanSend() const |
765 | 0 | { |
766 | 0 | if (!mMonitor) { |
767 | 0 | return false; |
768 | 0 | } |
769 | 0 | MonitorAutoLock lock(*mMonitor); |
770 | 0 | return Connected(); |
771 | 0 | } |
772 | | |
773 | | void |
774 | | MessageChannel::WillDestroyCurrentMessageLoop() |
775 | 0 | { |
776 | | #if defined(DEBUG) |
777 | | CrashReporter::AnnotateCrashReport(CrashReporter::Annotation::IPCFatalErrorProtocol, |
778 | | nsDependentCString(mName)); |
779 | | MOZ_CRASH("MessageLoop destroyed before MessageChannel that's bound to it"); |
780 | | #endif |
781 | |
|
782 | 0 | // Clear mWorkerThread to avoid posting to it in the future. |
783 | 0 | MonitorAutoLock lock(*mMonitor); |
784 | 0 | mWorkerLoop = nullptr; |
785 | 0 | } |
786 | | |
787 | | void |
788 | | MessageChannel::Clear() |
789 | 0 | { |
790 | 0 | // Don't clear mWorkerThread; we use it in AssertLinkThread() and |
791 | 0 | // AssertWorkerThread(). |
792 | 0 | // |
793 | 0 | // Also don't clear mListener. If we clear it, then sending a message |
794 | 0 | // through this channel after it's Clear()'ed can cause this process to |
795 | 0 | // crash. |
796 | 0 | // |
797 | 0 | // In practice, mListener owns the channel, so the channel gets deleted |
798 | 0 | // before mListener. But just to be safe, mListener is a weak pointer. |
799 | 0 |
|
800 | 0 | #if !defined(ANDROID) |
801 | 0 | // KillHard shutdowns can occur with the channel in connected state. We are |
802 | 0 | // already collecting crash dump data about KillHard shutdowns and we |
803 | 0 | // shouldn't intentionally crash here. |
804 | 0 | if (!Unsound_IsClosed() && !mInKillHardShutdown) { |
805 | 0 | CrashReporter::AnnotateCrashReport( |
806 | 0 | CrashReporter::Annotation::IPCFatalErrorProtocol, nsDependentCString(mName)); |
807 | 0 | switch (mChannelState) { |
808 | 0 | case ChannelOpening: |
809 | 0 | MOZ_CRASH("MessageChannel destroyed without being closed " \ |
810 | 0 | "(mChannelState == ChannelOpening)."); |
811 | 0 | break; |
812 | 0 | case ChannelConnected: |
813 | 0 | MOZ_CRASH("MessageChannel destroyed without being closed " \ |
814 | 0 | "(mChannelState == ChannelConnected)."); |
815 | 0 | break; |
816 | 0 | case ChannelTimeout: |
817 | 0 | MOZ_CRASH("MessageChannel destroyed without being closed " \ |
818 | 0 | "(mChannelState == ChannelTimeout)."); |
819 | 0 | break; |
820 | 0 | case ChannelClosing: |
821 | 0 | MOZ_CRASH("MessageChannel destroyed without being closed " \ |
822 | 0 | "(mChannelState == ChannelClosing)."); |
823 | 0 | break; |
824 | 0 | case ChannelError: |
825 | 0 | MOZ_CRASH("MessageChannel destroyed without being closed " \ |
826 | 0 | "(mChannelState == ChannelError)."); |
827 | 0 | break; |
828 | 0 | default: |
829 | 0 | MOZ_CRASH("MessageChannel destroyed without being closed."); |
830 | 0 | } |
831 | 0 | } |
832 | 0 | #endif |
833 | 0 |
|
834 | 0 | if (gParentProcessBlocker == this) { |
835 | 0 | gParentProcessBlocker = nullptr; |
836 | 0 | } |
837 | 0 |
|
838 | 0 | if (mWorkerLoop) { |
839 | 0 | mWorkerLoop->RemoveDestructionObserver(this); |
840 | 0 | } |
841 | 0 |
|
842 | 0 | gUnresolvedResponses -= mPendingResponses.size(); |
843 | 0 | for (auto& pair : mPendingResponses) { |
844 | 0 | pair.second.get()->Reject(ResponseRejectReason::ChannelClosed); |
845 | 0 | } |
846 | 0 | mPendingResponses.clear(); |
847 | 0 |
|
848 | 0 | mWorkerLoop = nullptr; |
849 | 0 | if (mLink != nullptr && mIsCrossProcess) { |
850 | 0 | ChannelCountReporter::Decrement(mName); |
851 | 0 | } |
852 | 0 | delete mLink; |
853 | 0 | mLink = nullptr; |
854 | 0 |
|
855 | 0 | mOnChannelConnectedTask->Cancel(); |
856 | 0 |
|
857 | 0 | if (mChannelErrorTask) { |
858 | 0 | mChannelErrorTask->Cancel(); |
859 | 0 | mChannelErrorTask = nullptr; |
860 | 0 | } |
861 | 0 |
|
862 | 0 | // Free up any memory used by pending messages. |
863 | 0 | for (MessageTask* task : mPending) { |
864 | 0 | task->Clear(); |
865 | 0 | } |
866 | 0 | mPending.clear(); |
867 | 0 |
|
868 | 0 | mMaybeDeferredPendingCount = 0; |
869 | 0 |
|
870 | 0 | mOutOfTurnReplies.clear(); |
871 | 0 | while (!mDeferred.empty()) { |
872 | 0 | mDeferred.pop(); |
873 | 0 | } |
874 | 0 | } |
875 | | |
876 | | bool |
877 | | MessageChannel::Open(Transport* aTransport, MessageLoop* aIOLoop, Side aSide) |
878 | 0 | { |
879 | 0 | MOZ_ASSERT(!mLink, "Open() called > once"); |
880 | 0 |
|
881 | 0 | mMonitor = new RefCountedMonitor(); |
882 | 0 | mWorkerLoop = MessageLoop::current(); |
883 | 0 | mWorkerThread = GetCurrentVirtualThread(); |
884 | 0 | mWorkerLoop->AddDestructionObserver(this); |
885 | 0 | mListener->SetIsMainThreadProtocol(); |
886 | 0 |
|
887 | 0 | ProcessLink *link = new ProcessLink(this); |
888 | 0 | link->Open(aTransport, aIOLoop, aSide); // :TODO: n.b.: sets mChild |
889 | 0 | mLink = link; |
890 | 0 | mIsCrossProcess = true; |
891 | 0 | ChannelCountReporter::Increment(mName); |
892 | 0 | return true; |
893 | 0 | } |
894 | | |
895 | | bool |
896 | | MessageChannel::Open(MessageChannel *aTargetChan, nsIEventTarget *aEventTarget, Side aSide) |
897 | 0 | { |
898 | 0 | // Opens a connection to another thread in the same process. |
899 | 0 |
|
900 | 0 | // This handshake proceeds as follows: |
901 | 0 | // - Let A be the thread initiating the process (either child or parent) |
902 | 0 | // and B be the other thread. |
903 | 0 | // - A spawns thread for B, obtaining B's message loop |
904 | 0 | // - A creates ProtocolChild and ProtocolParent instances. |
905 | 0 | // Let PA be the one appropriate to A and PB the side for B. |
906 | 0 | // - A invokes PA->Open(PB, ...): |
907 | 0 | // - set state to mChannelOpening |
908 | 0 | // - this will place a work item in B's worker loop (see next bullet) |
909 | 0 | // and then spins until PB->mChannelState becomes mChannelConnected |
910 | 0 | // - meanwhile, on PB's worker loop, the work item is removed and: |
911 | 0 | // - invokes PB->SlaveOpen(PA, ...): |
912 | 0 | // - sets its state and that of PA to Connected |
913 | 0 | MOZ_ASSERT(aTargetChan, "Need a target channel"); |
914 | 0 | MOZ_ASSERT(ChannelClosed == mChannelState, "Not currently closed"); |
915 | 0 |
|
916 | 0 | CommonThreadOpenInit(aTargetChan, aSide); |
917 | 0 |
|
918 | 0 | Side oppSide = UnknownSide; |
919 | 0 | switch(aSide) { |
920 | 0 | case ChildSide: oppSide = ParentSide; break; |
921 | 0 | case ParentSide: oppSide = ChildSide; break; |
922 | 0 | case UnknownSide: break; |
923 | 0 | } |
924 | 0 | |
925 | 0 | mMonitor = new RefCountedMonitor(); |
926 | 0 |
|
927 | 0 | MonitorAutoLock lock(*mMonitor); |
928 | 0 | mChannelState = ChannelOpening; |
929 | 0 | MOZ_ALWAYS_SUCCEEDS(aEventTarget->Dispatch(NewNonOwningRunnableMethod<MessageChannel*, Side>( |
930 | 0 | "ipc::MessageChannel::OnOpenAsSlave", |
931 | 0 | aTargetChan, |
932 | 0 | &MessageChannel::OnOpenAsSlave, |
933 | 0 | this, |
934 | 0 | oppSide))); |
935 | 0 |
|
936 | 0 | while (ChannelOpening == mChannelState) |
937 | 0 | mMonitor->Wait(); |
938 | 0 | MOZ_RELEASE_ASSERT(ChannelConnected == mChannelState, "not connected when awoken"); |
939 | 0 | return (ChannelConnected == mChannelState); |
940 | 0 | } |
941 | | |
942 | | void |
943 | | MessageChannel::OnOpenAsSlave(MessageChannel *aTargetChan, Side aSide) |
944 | 0 | { |
945 | 0 | // Invoked when the other side has begun the open. |
946 | 0 | MOZ_ASSERT(ChannelClosed == mChannelState, "Not currently closed"); |
947 | 0 | MOZ_ASSERT(ChannelOpening == aTargetChan->mChannelState, |
948 | 0 | "Target channel not in the process of opening"); |
949 | 0 |
|
950 | 0 | CommonThreadOpenInit(aTargetChan, aSide); |
951 | 0 | mMonitor = aTargetChan->mMonitor; |
952 | 0 |
|
953 | 0 | MonitorAutoLock lock(*mMonitor); |
954 | 0 | MOZ_RELEASE_ASSERT(ChannelOpening == aTargetChan->mChannelState, |
955 | 0 | "Target channel not in the process of opening"); |
956 | 0 | mChannelState = ChannelConnected; |
957 | 0 | aTargetChan->mChannelState = ChannelConnected; |
958 | 0 | aTargetChan->mMonitor->Notify(); |
959 | 0 | } |
960 | | |
961 | | void |
962 | | MessageChannel::CommonThreadOpenInit(MessageChannel *aTargetChan, Side aSide) |
963 | 0 | { |
964 | 0 | mWorkerLoop = MessageLoop::current(); |
965 | 0 | mWorkerThread = GetCurrentVirtualThread(); |
966 | 0 | mWorkerLoop->AddDestructionObserver(this); |
967 | 0 | mListener->SetIsMainThreadProtocol(); |
968 | 0 |
|
969 | 0 | mLink = new ThreadLink(this, aTargetChan); |
970 | 0 | mSide = aSide; |
971 | 0 | } |
972 | | |
973 | | bool |
974 | | MessageChannel::Echo(Message* aMsg) |
975 | 0 | { |
976 | 0 | UniquePtr<Message> msg(aMsg); |
977 | 0 | AssertWorkerThread(); |
978 | 0 | mMonitor->AssertNotCurrentThreadOwns(); |
979 | 0 | if (MSG_ROUTING_NONE == msg->routing_id()) { |
980 | 0 | ReportMessageRouteError("MessageChannel::Echo"); |
981 | 0 | return false; |
982 | 0 | } |
983 | 0 | |
984 | 0 | MonitorAutoLock lock(*mMonitor); |
985 | 0 |
|
986 | 0 | if (!Connected()) { |
987 | 0 | ReportConnectionError("MessageChannel", msg.get()); |
988 | 0 | return false; |
989 | 0 | } |
990 | 0 | |
991 | 0 | mLink->EchoMessage(msg.release()); |
992 | 0 | return true; |
993 | 0 | } |
994 | | |
995 | | bool |
996 | | MessageChannel::Send(Message* aMsg) |
997 | 0 | { |
998 | 0 | if (aMsg->size() >= kMinTelemetryMessageSize) { |
999 | 0 | Telemetry::Accumulate(Telemetry::IPC_MESSAGE_SIZE2, aMsg->size()); |
1000 | 0 | } |
1001 | 0 |
|
1002 | 0 | // If the message was created by the IPC bindings, the create time will be |
1003 | 0 | // recorded. Use this information to report the IPC_WRITE_MAIN_THREAD_LATENCY_MS (time |
1004 | 0 | // from message creation to it being sent). |
1005 | 0 | if (NS_IsMainThread() && aMsg->create_time()) { |
1006 | 0 | uint32_t latencyMs = round((mozilla::TimeStamp::Now() - aMsg->create_time()).ToMilliseconds()); |
1007 | 0 | if (latencyMs >= kMinTelemetryIPCWriteLatencyMs) { |
1008 | 0 | mozilla::Telemetry::Accumulate(mozilla::Telemetry::IPC_WRITE_MAIN_THREAD_LATENCY_MS, |
1009 | 0 | nsDependentCString(aMsg->name()), |
1010 | 0 | latencyMs); |
1011 | 0 | } |
1012 | 0 | } |
1013 | 0 |
|
1014 | 0 | MOZ_RELEASE_ASSERT(!aMsg->is_sync()); |
1015 | 0 | MOZ_RELEASE_ASSERT(aMsg->nested_level() != IPC::Message::NESTED_INSIDE_SYNC); |
1016 | 0 |
|
1017 | 0 | CxxStackFrame frame(*this, OUT_MESSAGE, aMsg); |
1018 | 0 |
|
1019 | 0 | UniquePtr<Message> msg(aMsg); |
1020 | 0 | AssertWorkerThread(); |
1021 | 0 | mMonitor->AssertNotCurrentThreadOwns(); |
1022 | 0 | if (MSG_ROUTING_NONE == msg->routing_id()) { |
1023 | 0 | ReportMessageRouteError("MessageChannel::Send"); |
1024 | 0 | return false; |
1025 | 0 | } |
1026 | 0 | |
1027 | 0 | MonitorAutoLock lock(*mMonitor); |
1028 | 0 | if (!Connected()) { |
1029 | 0 | ReportConnectionError("MessageChannel", msg.get()); |
1030 | 0 | return false; |
1031 | 0 | } |
1032 | 0 | SendMessageToLink(msg.release()); |
1033 | 0 | return true; |
1034 | 0 | } |
1035 | | |
1036 | | void |
1037 | | MessageChannel::SendMessageToLink(Message* aMsg) |
1038 | 0 | { |
1039 | 0 | if (mIsPostponingSends) { |
1040 | 0 | UniquePtr<Message> msg(aMsg); |
1041 | 0 | mPostponedSends.push_back(std::move(msg)); |
1042 | 0 | return; |
1043 | 0 | } |
1044 | 0 | mLink->SendMessage(aMsg); |
1045 | 0 | } |
1046 | | |
1047 | | void |
1048 | | MessageChannel::BeginPostponingSends() |
1049 | 0 | { |
1050 | 0 | AssertWorkerThread(); |
1051 | 0 | mMonitor->AssertNotCurrentThreadOwns(); |
1052 | 0 |
|
1053 | 0 | MonitorAutoLock lock(*mMonitor); |
1054 | 0 | { |
1055 | 0 | MOZ_ASSERT(!mIsPostponingSends); |
1056 | 0 | mIsPostponingSends = true; |
1057 | 0 | } |
1058 | 0 | } |
1059 | | |
1060 | | void |
1061 | | MessageChannel::StopPostponingSends() |
1062 | 0 | { |
1063 | 0 | // Note: this can be called from any thread. |
1064 | 0 | MonitorAutoLock lock(*mMonitor); |
1065 | 0 |
|
1066 | 0 | MOZ_ASSERT(mIsPostponingSends); |
1067 | 0 |
|
1068 | 0 | for (UniquePtr<Message>& iter : mPostponedSends) { |
1069 | 0 | mLink->SendMessage(iter.release()); |
1070 | 0 | } |
1071 | 0 |
|
1072 | 0 | // We unset this after SendMessage so we can make correct thread |
1073 | 0 | // assertions in MessageLink. |
1074 | 0 | mIsPostponingSends = false; |
1075 | 0 | mPostponedSends.clear(); |
1076 | 0 | } |
1077 | | |
1078 | | UniquePtr<MessageChannel::UntypedCallbackHolder> |
1079 | | MessageChannel::PopCallback(const Message& aMsg) |
1080 | 0 | { |
1081 | 0 | auto iter = mPendingResponses.find(aMsg.seqno()); |
1082 | 0 | if (iter != mPendingResponses.end()) { |
1083 | 0 | UniquePtr<MessageChannel::UntypedCallbackHolder> ret = std::move(iter->second); |
1084 | 0 | mPendingResponses.erase(iter); |
1085 | 0 | gUnresolvedResponses--; |
1086 | 0 | return ret; |
1087 | 0 | } |
1088 | 0 | return nullptr; |
1089 | 0 | } |
1090 | | |
1091 | | void |
1092 | | MessageChannel::RejectPendingResponsesForActor(ActorIdType aActorId) |
1093 | 0 | { |
1094 | 0 | auto itr = mPendingResponses.begin(); |
1095 | 0 | while (itr != mPendingResponses.end()) { |
1096 | 0 | if (itr->second.get()->mActorId != aActorId) { |
1097 | 0 | ++itr; |
1098 | 0 | continue; |
1099 | 0 | } |
1100 | 0 | itr->second.get()->Reject(ResponseRejectReason::ActorDestroyed); |
1101 | 0 | // Take special care of advancing the iterator since we are |
1102 | 0 | // removing it while iterating. |
1103 | 0 | itr = mPendingResponses.erase(itr); |
1104 | 0 | gUnresolvedResponses--; |
1105 | 0 | } |
1106 | 0 | } |
1107 | | |
1108 | | class BuildIDsMatchMessage : public IPC::Message |
1109 | | { |
1110 | | public: |
1111 | | BuildIDsMatchMessage() |
1112 | | : IPC::Message(MSG_ROUTING_NONE, BUILD_IDS_MATCH_MESSAGE_TYPE) |
1113 | 0 | { |
1114 | 0 | } |
1115 | | void Log(const std::string& aPrefix, FILE* aOutf) const |
1116 | 0 | { |
1117 | 0 | fputs("(special `Build IDs match' message)", aOutf); |
1118 | 0 | } |
1119 | | }; |
1120 | | |
1121 | | // Send the parent a special async message to confirm when the parent and child |
1122 | | // are of the same buildID. Skips sending the message and returns false if the |
1123 | | // buildIDs don't match. This is a minor variation on |
1124 | | // MessageChannel::Send(Message* aMsg). |
1125 | | bool |
1126 | | MessageChannel::SendBuildIDsMatchMessage(const char* aParentBuildID) |
1127 | 0 | { |
1128 | 0 | MOZ_ASSERT(!XRE_IsParentProcess()); |
1129 | 0 |
|
1130 | 0 | nsCString parentBuildID(aParentBuildID); |
1131 | 0 | nsCString childBuildID(mozilla::PlatformBuildID()); |
1132 | 0 |
|
1133 | 0 | if (parentBuildID != childBuildID) { |
1134 | 0 | // The build IDs didn't match, usually because an update occurred in the |
1135 | 0 | // background. |
1136 | 0 | return false; |
1137 | 0 | } |
1138 | 0 | |
1139 | 0 | nsAutoPtr<BuildIDsMatchMessage> msg(new BuildIDsMatchMessage()); |
1140 | 0 |
|
1141 | 0 | MOZ_RELEASE_ASSERT(!msg->is_sync()); |
1142 | 0 | MOZ_RELEASE_ASSERT(msg->nested_level() != IPC::Message::NESTED_INSIDE_SYNC); |
1143 | 0 |
|
1144 | 0 | AssertWorkerThread(); |
1145 | 0 | mMonitor->AssertNotCurrentThreadOwns(); |
1146 | 0 | // Don't check for MSG_ROUTING_NONE. |
1147 | 0 |
|
1148 | 0 | MonitorAutoLock lock(*mMonitor); |
1149 | 0 | if (!Connected()) { |
1150 | 0 | ReportConnectionError("MessageChannel", msg); |
1151 | 0 | return false; |
1152 | 0 | } |
1153 | 0 | mLink->SendMessage(msg.forget()); |
1154 | 0 | return true; |
1155 | 0 | } |
1156 | | |
1157 | | class CancelMessage : public IPC::Message |
1158 | | { |
1159 | | public: |
1160 | | explicit CancelMessage(int transaction) : |
1161 | | IPC::Message(MSG_ROUTING_NONE, CANCEL_MESSAGE_TYPE) |
1162 | 0 | { |
1163 | 0 | set_transaction_id(transaction); |
1164 | 0 | } |
1165 | 0 | static bool Read(const Message* msg) { |
1166 | 0 | return true; |
1167 | 0 | } |
1168 | 0 | void Log(const std::string& aPrefix, FILE* aOutf) const { |
1169 | 0 | fputs("(special `Cancel' message)", aOutf); |
1170 | 0 | } |
1171 | | }; |
1172 | | |
1173 | | bool |
1174 | | MessageChannel::MaybeInterceptSpecialIOMessage(const Message& aMsg) |
1175 | 0 | { |
1176 | 0 | AssertLinkThread(); |
1177 | 0 | mMonitor->AssertCurrentThreadOwns(); |
1178 | 0 |
|
1179 | 0 | if (MSG_ROUTING_NONE == aMsg.routing_id()) { |
1180 | 0 | if (GOODBYE_MESSAGE_TYPE == aMsg.type()) { |
1181 | 0 | // :TODO: Sort out Close() on this side racing with Close() on the |
1182 | 0 | // other side |
1183 | 0 | mChannelState = ChannelClosing; |
1184 | 0 | if (LoggingEnabled()) { |
1185 | 0 | printf("NOTE: %s process received `Goodbye', closing down\n", |
1186 | 0 | (mSide == ChildSide) ? "child" : "parent"); |
1187 | 0 | } |
1188 | 0 | return true; |
1189 | 0 | } else if (CANCEL_MESSAGE_TYPE == aMsg.type()) { |
1190 | 0 | IPC_LOG("Cancel from message"); |
1191 | 0 | CancelTransaction(aMsg.transaction_id()); |
1192 | 0 | NotifyWorkerThread(); |
1193 | 0 | return true; |
1194 | 0 | } else if (BUILD_IDS_MATCH_MESSAGE_TYPE == aMsg.type()) { |
1195 | 0 | IPC_LOG("Build IDs match message"); |
1196 | 0 | mBuildIDsConfirmedMatch = true; |
1197 | 0 | return true; |
1198 | 0 | } |
1199 | 0 | } |
1200 | 0 | return false; |
1201 | 0 | } |
1202 | | |
1203 | | /* static */ bool |
1204 | | MessageChannel::IsAlwaysDeferred(const Message& aMsg) |
1205 | 0 | { |
1206 | 0 | // If a message is not NESTED_INSIDE_CPOW and not sync, then we always defer |
1207 | 0 | // it. |
1208 | 0 | return aMsg.nested_level() != IPC::Message::NESTED_INSIDE_CPOW && |
1209 | 0 | !aMsg.is_sync(); |
1210 | 0 | } |
1211 | | |
1212 | | bool |
1213 | | MessageChannel::ShouldDeferMessage(const Message& aMsg) |
1214 | 0 | { |
1215 | 0 | // Never defer messages that have the highest nested level, even async |
1216 | 0 | // ones. This is safe because only the child can send these messages, so |
1217 | 0 | // they can never nest. |
1218 | 0 | if (aMsg.nested_level() == IPC::Message::NESTED_INSIDE_CPOW) { |
1219 | 0 | MOZ_ASSERT(!IsAlwaysDeferred(aMsg)); |
1220 | 0 | return false; |
1221 | 0 | } |
1222 | 0 |
|
1223 | 0 | // Unless they're NESTED_INSIDE_CPOW, we always defer async messages. |
1224 | 0 | // Note that we never send an async NESTED_INSIDE_SYNC message. |
1225 | 0 | if (!aMsg.is_sync()) { |
1226 | 0 | MOZ_RELEASE_ASSERT(aMsg.nested_level() == IPC::Message::NOT_NESTED); |
1227 | 0 | MOZ_ASSERT(IsAlwaysDeferred(aMsg)); |
1228 | 0 | return true; |
1229 | 0 | } |
1230 | 0 | |
1231 | 0 | MOZ_ASSERT(!IsAlwaysDeferred(aMsg)); |
1232 | 0 |
|
1233 | 0 | int msgNestedLevel = aMsg.nested_level(); |
1234 | 0 | int waitingNestedLevel = AwaitingSyncReplyNestedLevel(); |
1235 | 0 |
|
1236 | 0 | // Always defer if the nested level of the incoming message is less than the |
1237 | 0 | // nested level of the message we're awaiting. |
1238 | 0 | if (msgNestedLevel < waitingNestedLevel) |
1239 | 0 | return true; |
1240 | 0 | |
1241 | 0 | // Never defer if the message has strictly greater nested level. |
1242 | 0 | if (msgNestedLevel > waitingNestedLevel) |
1243 | 0 | return false; |
1244 | 0 | |
1245 | 0 | // When both sides send sync messages of the same nested level, we resolve the |
1246 | 0 | // race by dispatching in the child and deferring the incoming message in |
1247 | 0 | // the parent. However, the parent still needs to dispatch nested sync |
1248 | 0 | // messages. |
1249 | 0 | // |
1250 | 0 | // Deferring in the parent only sort of breaks message ordering. When the |
1251 | 0 | // child's message comes in, we can pretend the child hasn't quite |
1252 | 0 | // finished sending it yet. Since the message is sync, we know that the |
1253 | 0 | // child hasn't moved on yet. |
1254 | 0 | return mSide == ParentSide && aMsg.transaction_id() != CurrentNestedInsideSyncTransaction(); |
1255 | 0 | } |
1256 | | |
1257 | | void |
1258 | | MessageChannel::OnMessageReceivedFromLink(Message&& aMsg) |
1259 | 0 | { |
1260 | 0 | AssertLinkThread(); |
1261 | 0 | mMonitor->AssertCurrentThreadOwns(); |
1262 | 0 |
|
1263 | 0 | if (MaybeInterceptSpecialIOMessage(aMsg)) |
1264 | 0 | return; |
1265 | 0 | |
1266 | 0 | #ifdef EARLY_BETA_OR_EARLIER |
1267 | 0 | mListener->OnChannelReceivedMessage(aMsg); |
1268 | 0 | #endif |
1269 | 0 |
|
1270 | 0 | // Regardless of the Interrupt stack, if we're awaiting a sync reply, |
1271 | 0 | // we know that it needs to be immediately handled to unblock us. |
1272 | 0 | if (aMsg.is_sync() && aMsg.is_reply()) { |
1273 | 0 | IPC_LOG("Received reply seqno=%d xid=%d", aMsg.seqno(), aMsg.transaction_id()); |
1274 | 0 |
|
1275 | 0 | if (aMsg.seqno() == mTimedOutMessageSeqno) { |
1276 | 0 | // Drop the message, but allow future sync messages to be sent. |
1277 | 0 | IPC_LOG("Received reply to timedout message; igoring; xid=%d", mTimedOutMessageSeqno); |
1278 | 0 | EndTimeout(); |
1279 | 0 | return; |
1280 | 0 | } |
1281 | 0 |
|
1282 | 0 | MOZ_RELEASE_ASSERT(AwaitingSyncReply()); |
1283 | 0 | MOZ_RELEASE_ASSERT(!mTimedOutMessageSeqno); |
1284 | 0 |
|
1285 | 0 | mTransactionStack->HandleReply(std::move(aMsg)); |
1286 | 0 | NotifyWorkerThread(); |
1287 | 0 | return; |
1288 | 0 | } |
1289 | 0 | |
1290 | 0 | // Nested messages cannot be compressed. |
1291 | 0 | MOZ_RELEASE_ASSERT(aMsg.compress_type() == IPC::Message::COMPRESSION_NONE || |
1292 | 0 | aMsg.nested_level() == IPC::Message::NOT_NESTED); |
1293 | 0 |
|
1294 | 0 | bool reuseTask = false; |
1295 | 0 | if (aMsg.compress_type() == IPC::Message::COMPRESSION_ENABLED) { |
1296 | 0 | bool compress = (!mPending.isEmpty() && |
1297 | 0 | mPending.getLast()->Msg().type() == aMsg.type() && |
1298 | 0 | mPending.getLast()->Msg().routing_id() == aMsg.routing_id()); |
1299 | 0 | if (compress) { |
1300 | 0 | // This message type has compression enabled, and the back of the |
1301 | 0 | // queue was the same message type and routed to the same destination. |
1302 | 0 | // Replace it with the newer message. |
1303 | 0 | MOZ_RELEASE_ASSERT(mPending.getLast()->Msg().compress_type() == |
1304 | 0 | IPC::Message::COMPRESSION_ENABLED); |
1305 | 0 | mPending.getLast()->Msg() = std::move(aMsg); |
1306 | 0 |
|
1307 | 0 | reuseTask = true; |
1308 | 0 | } |
1309 | 0 | } else if (aMsg.compress_type() == IPC::Message::COMPRESSION_ALL && !mPending.isEmpty()) { |
1310 | 0 | for (MessageTask* p = mPending.getLast(); p; p = p->getPrevious()) { |
1311 | 0 | if (p->Msg().type() == aMsg.type() && |
1312 | 0 | p->Msg().routing_id() == aMsg.routing_id()) |
1313 | 0 | { |
1314 | 0 | // This message type has compression enabled, and the queue |
1315 | 0 | // holds a message with the same message type and routed to the |
1316 | 0 | // same destination. Erase it. Note that, since we always |
1317 | 0 | // compress these redundancies, There Can Be Only One. |
1318 | 0 | MOZ_RELEASE_ASSERT(p->Msg().compress_type() == IPC::Message::COMPRESSION_ALL); |
1319 | 0 | MOZ_RELEASE_ASSERT(IsAlwaysDeferred(p->Msg())); |
1320 | 0 | p->remove(); |
1321 | 0 | break; |
1322 | 0 | } |
1323 | 0 | } |
1324 | 0 | } |
1325 | 0 |
|
1326 | 0 | bool alwaysDeferred = IsAlwaysDeferred(aMsg); |
1327 | 0 |
|
1328 | 0 | bool wakeUpSyncSend = AwaitingSyncReply() && !ShouldDeferMessage(aMsg); |
1329 | 0 |
|
1330 | 0 | bool shouldWakeUp = AwaitingInterruptReply() || |
1331 | 0 | wakeUpSyncSend || |
1332 | 0 | AwaitingIncomingMessage(); |
1333 | 0 |
|
1334 | 0 | // Although we usually don't need to post a message task if |
1335 | 0 | // shouldWakeUp is true, it's easier to post anyway than to have to |
1336 | 0 | // guarantee that every Send call processes everything it's supposed to |
1337 | 0 | // before returning. |
1338 | 0 | bool shouldPostTask = !shouldWakeUp || wakeUpSyncSend; |
1339 | 0 |
|
1340 | 0 | IPC_LOG("Receive on link thread; seqno=%d, xid=%d, shouldWakeUp=%d", |
1341 | 0 | aMsg.seqno(), aMsg.transaction_id(), shouldWakeUp); |
1342 | 0 |
|
1343 | 0 | if (reuseTask) { |
1344 | 0 | return; |
1345 | 0 | } |
1346 | 0 | |
1347 | 0 | // There are three cases we're concerned about, relating to the state of the |
1348 | 0 | // main thread: |
1349 | 0 | // |
1350 | 0 | // (1) We are waiting on a sync reply - main thread is blocked on the |
1351 | 0 | // IPC monitor. |
1352 | 0 | // - If the message is NESTED_INSIDE_SYNC, we wake up the main thread to |
1353 | 0 | // deliver the message depending on ShouldDeferMessage. Otherwise, we |
1354 | 0 | // leave it in the mPending queue, posting a task to the main event |
1355 | 0 | // loop, where it will be processed once the synchronous reply has been |
1356 | 0 | // received. |
1357 | 0 | // |
1358 | 0 | // (2) We are waiting on an Interrupt reply - main thread is blocked on the |
1359 | 0 | // IPC monitor. |
1360 | 0 | // - Always notify and wake up the main thread. |
1361 | 0 | // |
1362 | 0 | // (3) We are not waiting on a reply. |
1363 | 0 | // - We post a task to the main event loop. |
1364 | 0 | // |
1365 | 0 | // Note that, we may notify the main thread even though the monitor is not |
1366 | 0 | // blocked. This is okay, since we always check for pending events before |
1367 | 0 | // blocking again. |
1368 | 0 | |
1369 | | #ifdef MOZ_TASK_TRACER |
1370 | | aMsg.TaskTracerDispatch(); |
1371 | | #endif |
1372 | 0 | RefPtr<MessageTask> task = new MessageTask(this, std::move(aMsg)); |
1373 | 0 | mPending.insertBack(task); |
1374 | 0 |
|
1375 | 0 | if (!alwaysDeferred) { |
1376 | 0 | mMaybeDeferredPendingCount++; |
1377 | 0 | } |
1378 | 0 |
|
1379 | 0 | if (shouldWakeUp) { |
1380 | 0 | NotifyWorkerThread(); |
1381 | 0 | } |
1382 | 0 |
|
1383 | 0 | if (shouldPostTask) { |
1384 | 0 | task->Post(); |
1385 | 0 | } |
1386 | 0 | } |
1387 | | |
1388 | | void |
1389 | | MessageChannel::PeekMessages(const std::function<bool(const Message& aMsg)>& aInvoke) |
1390 | 0 | { |
1391 | 0 | // FIXME: We shouldn't be holding the lock for aInvoke! |
1392 | 0 | MonitorAutoLock lock(*mMonitor); |
1393 | 0 |
|
1394 | 0 | for (MessageTask* it : mPending) { |
1395 | 0 | const Message &msg = it->Msg(); |
1396 | 0 | if (!aInvoke(msg)) { |
1397 | 0 | break; |
1398 | 0 | } |
1399 | 0 | } |
1400 | 0 | } |
1401 | | |
1402 | | void |
1403 | | MessageChannel::ProcessPendingRequests(AutoEnterTransaction& aTransaction) |
1404 | 0 | { |
1405 | 0 | mMonitor->AssertCurrentThreadOwns(); |
1406 | 0 |
|
1407 | 0 | AssertMaybeDeferredCountCorrect(); |
1408 | 0 | if (mMaybeDeferredPendingCount == 0) { |
1409 | 0 | return; |
1410 | 0 | } |
1411 | 0 | |
1412 | 0 | IPC_LOG("ProcessPendingRequests for seqno=%d, xid=%d", |
1413 | 0 | aTransaction.SequenceNumber(), aTransaction.TransactionID()); |
1414 | 0 |
|
1415 | 0 | // Loop until there aren't any more nested messages to process. |
1416 | 0 | for (;;) { |
1417 | 0 | // If we canceled during ProcessPendingRequest, then we need to leave |
1418 | 0 | // immediately because the results of ShouldDeferMessage will be |
1419 | 0 | // operating with weird state (as if no Send is in progress). That could |
1420 | 0 | // cause even NOT_NESTED sync messages to be processed (but not |
1421 | 0 | // NOT_NESTED async messages), which would break message ordering. |
1422 | 0 | if (aTransaction.IsCanceled()) { |
1423 | 0 | return; |
1424 | 0 | } |
1425 | 0 | |
1426 | 0 | mozilla::Vector<Message> toProcess; |
1427 | 0 |
|
1428 | 0 | for (MessageTask* p = mPending.getFirst(); p; ) { |
1429 | 0 | Message &msg = p->Msg(); |
1430 | 0 |
|
1431 | 0 | MOZ_RELEASE_ASSERT(!aTransaction.IsCanceled(), |
1432 | 0 | "Calling ShouldDeferMessage when cancelled"); |
1433 | 0 | bool defer = ShouldDeferMessage(msg); |
1434 | 0 |
|
1435 | 0 | // Only log the interesting messages. |
1436 | 0 | if (msg.is_sync() || msg.nested_level() == IPC::Message::NESTED_INSIDE_CPOW) { |
1437 | 0 | IPC_LOG("ShouldDeferMessage(seqno=%d) = %d", msg.seqno(), defer); |
1438 | 0 | } |
1439 | 0 |
|
1440 | 0 | if (!defer) { |
1441 | 0 | MOZ_ASSERT(!IsAlwaysDeferred(msg)); |
1442 | 0 |
|
1443 | 0 | if (!toProcess.append(std::move(msg))) |
1444 | 0 | MOZ_CRASH(); |
1445 | 0 |
|
1446 | 0 | mMaybeDeferredPendingCount--; |
1447 | 0 |
|
1448 | 0 | p = p->removeAndGetNext(); |
1449 | 0 | continue; |
1450 | 0 | } |
1451 | 0 | p = p->getNext(); |
1452 | 0 | } |
1453 | 0 |
|
1454 | 0 | if (toProcess.empty()) { |
1455 | 0 | break; |
1456 | 0 | } |
1457 | 0 | |
1458 | 0 | // Processing these messages could result in more messages, so we |
1459 | 0 | // loop around to check for more afterwards. |
1460 | 0 | |
1461 | 0 | for (auto it = toProcess.begin(); it != toProcess.end(); it++) { |
1462 | 0 | ProcessPendingRequest(std::move(*it)); |
1463 | 0 | } |
1464 | 0 | } |
1465 | 0 |
|
1466 | 0 | AssertMaybeDeferredCountCorrect(); |
1467 | 0 | } |
1468 | | |
1469 | | bool |
1470 | | MessageChannel::Send(Message* aMsg, Message* aReply) |
1471 | 0 | { |
1472 | 0 | mozilla::TimeStamp start = TimeStamp::Now(); |
1473 | 0 | if (aMsg->size() >= kMinTelemetryMessageSize) { |
1474 | 0 | Telemetry::Accumulate(Telemetry::IPC_MESSAGE_SIZE2, aMsg->size()); |
1475 | 0 | } |
1476 | 0 |
|
1477 | 0 | UniquePtr<Message> msg(aMsg); |
1478 | 0 |
|
1479 | 0 | // Sanity checks. |
1480 | 0 | AssertWorkerThread(); |
1481 | 0 | mMonitor->AssertNotCurrentThreadOwns(); |
1482 | 0 |
|
1483 | | #ifdef OS_WIN |
1484 | | SyncStackFrame frame(this, false); |
1485 | | NeuteredWindowRegion neuteredRgn(mFlags & REQUIRE_DEFERRED_MESSAGE_PROTECTION); |
1486 | | #endif |
1487 | | #ifdef MOZ_TASK_TRACER |
1488 | | AutoScopedLabel autolabel("sync message %s", aMsg->name()); |
1489 | | #endif |
1490 | |
|
1491 | 0 | CxxStackFrame f(*this, OUT_MESSAGE, msg.get()); |
1492 | 0 |
|
1493 | 0 | MonitorAutoLock lock(*mMonitor); |
1494 | 0 |
|
1495 | 0 | if (mTimedOutMessageSeqno) { |
1496 | 0 | // Don't bother sending another sync message if a previous one timed out |
1497 | 0 | // and we haven't received a reply for it. Once the original timed-out |
1498 | 0 | // message receives a reply, we'll be able to send more sync messages |
1499 | 0 | // again. |
1500 | 0 | IPC_LOG("Send() failed due to previous timeout"); |
1501 | 0 | mLastSendError = SyncSendError::PreviousTimeout; |
1502 | 0 | return false; |
1503 | 0 | } |
1504 | 0 |
|
1505 | 0 | if (DispatchingSyncMessageNestedLevel() == IPC::Message::NOT_NESTED && |
1506 | 0 | msg->nested_level() > IPC::Message::NOT_NESTED) |
1507 | 0 | { |
1508 | 0 | // Don't allow sending CPOWs while we're dispatching a sync message. |
1509 | 0 | // If you want to do that, use sendRpcMessage instead. |
1510 | 0 | IPC_LOG("Nested level forbids send"); |
1511 | 0 | mLastSendError = SyncSendError::SendingCPOWWhileDispatchingSync; |
1512 | 0 | return false; |
1513 | 0 | } |
1514 | 0 |
|
1515 | 0 | if (DispatchingSyncMessageNestedLevel() == IPC::Message::NESTED_INSIDE_CPOW || |
1516 | 0 | DispatchingAsyncMessageNestedLevel() == IPC::Message::NESTED_INSIDE_CPOW) |
1517 | 0 | { |
1518 | 0 | // Generally only the parent dispatches urgent messages. And the only |
1519 | 0 | // sync messages it can send are NESTED_INSIDE_SYNC. Mainly we want to ensure |
1520 | 0 | // here that we don't return false for non-CPOW messages. |
1521 | 0 | MOZ_RELEASE_ASSERT(msg->nested_level() == IPC::Message::NESTED_INSIDE_SYNC); |
1522 | 0 | IPC_LOG("Sending while dispatching urgent message"); |
1523 | 0 | mLastSendError = SyncSendError::SendingCPOWWhileDispatchingUrgent; |
1524 | 0 | return false; |
1525 | 0 | } |
1526 | 0 | |
1527 | 0 | if (msg->nested_level() < DispatchingSyncMessageNestedLevel() || |
1528 | 0 | msg->nested_level() < AwaitingSyncReplyNestedLevel()) |
1529 | 0 | { |
1530 | 0 | MOZ_RELEASE_ASSERT(DispatchingSyncMessage() || DispatchingAsyncMessage()); |
1531 | 0 | MOZ_RELEASE_ASSERT(!mIsPostponingSends); |
1532 | 0 | IPC_LOG("Cancel from Send"); |
1533 | 0 | CancelMessage *cancel = new CancelMessage(CurrentNestedInsideSyncTransaction()); |
1534 | 0 | CancelTransaction(CurrentNestedInsideSyncTransaction()); |
1535 | 0 | mLink->SendMessage(cancel); |
1536 | 0 | } |
1537 | 0 |
|
1538 | 0 | IPC_ASSERT(msg->is_sync(), "can only Send() sync messages here"); |
1539 | 0 |
|
1540 | 0 | IPC_ASSERT(msg->nested_level() >= DispatchingSyncMessageNestedLevel(), |
1541 | 0 | "can't send sync message of a lesser nested level than what's being dispatched"); |
1542 | 0 | IPC_ASSERT(AwaitingSyncReplyNestedLevel() <= msg->nested_level(), |
1543 | 0 | "nested sync message sends must be of increasing nested level"); |
1544 | 0 | IPC_ASSERT(DispatchingSyncMessageNestedLevel() != IPC::Message::NESTED_INSIDE_CPOW, |
1545 | 0 | "not allowed to send messages while dispatching urgent messages"); |
1546 | 0 |
|
1547 | 0 | IPC_ASSERT(DispatchingAsyncMessageNestedLevel() != IPC::Message::NESTED_INSIDE_CPOW, |
1548 | 0 | "not allowed to send messages while dispatching urgent messages"); |
1549 | 0 |
|
1550 | 0 | if (!Connected()) { |
1551 | 0 | ReportConnectionError("MessageChannel::SendAndWait", msg.get()); |
1552 | 0 | mLastSendError = SyncSendError::NotConnectedBeforeSend; |
1553 | 0 | return false; |
1554 | 0 | } |
1555 | 0 | |
1556 | 0 | msg->set_seqno(NextSeqno()); |
1557 | 0 |
|
1558 | 0 | int32_t seqno = msg->seqno(); |
1559 | 0 | int nestedLevel = msg->nested_level(); |
1560 | 0 | msgid_t replyType = msg->type() + 1; |
1561 | 0 |
|
1562 | 0 | AutoEnterTransaction *stackTop = mTransactionStack; |
1563 | 0 |
|
1564 | 0 | // If the most recent message on the stack is NESTED_INSIDE_SYNC, then our |
1565 | 0 | // message should nest inside that and we use the same transaction |
1566 | 0 | // ID. Otherwise we need a new transaction ID (so we use the seqno of the |
1567 | 0 | // message we're sending). |
1568 | 0 | bool nest = stackTop && stackTop->NestedLevel() == IPC::Message::NESTED_INSIDE_SYNC; |
1569 | 0 | int32_t transaction = nest ? stackTop->TransactionID() : seqno; |
1570 | 0 | msg->set_transaction_id(transaction); |
1571 | 0 |
|
1572 | 0 | bool handleWindowsMessages = mListener->HandleWindowsMessages(*aMsg); |
1573 | 0 | AutoEnterTransaction transact(this, seqno, transaction, nestedLevel); |
1574 | 0 |
|
1575 | 0 | IPC_LOG("Send seqno=%d, xid=%d", seqno, transaction); |
1576 | 0 |
|
1577 | 0 | // msg will be destroyed soon, but name() is not owned by msg. |
1578 | 0 | const char* msgName = msg->name(); |
1579 | 0 |
|
1580 | 0 | SendMessageToLink(msg.release()); |
1581 | 0 |
|
1582 | 0 | while (true) { |
1583 | 0 | MOZ_RELEASE_ASSERT(!transact.IsCanceled()); |
1584 | 0 | ProcessPendingRequests(transact); |
1585 | 0 | if (transact.IsComplete()) { |
1586 | 0 | break; |
1587 | 0 | } |
1588 | 0 | if (!Connected()) { |
1589 | 0 | ReportConnectionError("MessageChannel::Send"); |
1590 | 0 | mLastSendError = SyncSendError::DisconnectedDuringSend; |
1591 | 0 | return false; |
1592 | 0 | } |
1593 | 0 | |
1594 | 0 | MOZ_RELEASE_ASSERT(!mTimedOutMessageSeqno); |
1595 | 0 | MOZ_RELEASE_ASSERT(!transact.IsComplete()); |
1596 | 0 | MOZ_RELEASE_ASSERT(mTransactionStack == &transact); |
1597 | 0 |
|
1598 | 0 | bool maybeTimedOut = !WaitForSyncNotify(handleWindowsMessages); |
1599 | 0 |
|
1600 | 0 | if (mListener->NeedArtificialSleep()) { |
1601 | 0 | MonitorAutoUnlock unlock(*mMonitor); |
1602 | 0 | mListener->ArtificialSleep(); |
1603 | 0 | } |
1604 | 0 |
|
1605 | 0 | if (!Connected()) { |
1606 | 0 | ReportConnectionError("MessageChannel::SendAndWait"); |
1607 | 0 | mLastSendError = SyncSendError::DisconnectedDuringSend; |
1608 | 0 | return false; |
1609 | 0 | } |
1610 | 0 | |
1611 | 0 | if (transact.IsCanceled()) { |
1612 | 0 | break; |
1613 | 0 | } |
1614 | 0 | |
1615 | 0 | MOZ_RELEASE_ASSERT(mTransactionStack == &transact); |
1616 | 0 |
|
1617 | 0 | // We only time out a message if it initiated a new transaction (i.e., |
1618 | 0 | // if neither side has any other message Sends on the stack). |
1619 | 0 | bool canTimeOut = transact.IsBottom(); |
1620 | 0 | if (maybeTimedOut && canTimeOut && !ShouldContinueFromTimeout()) { |
1621 | 0 | // Since ShouldContinueFromTimeout drops the lock, we need to |
1622 | 0 | // re-check all our conditions here. We shouldn't time out if any of |
1623 | 0 | // these things happen because there won't be a reply to the timed |
1624 | 0 | // out message in these cases. |
1625 | 0 | if (transact.IsComplete()) { |
1626 | 0 | break; |
1627 | 0 | } |
1628 | 0 | |
1629 | 0 | IPC_LOG("Timing out Send: xid=%d", transaction); |
1630 | 0 |
|
1631 | 0 | mTimedOutMessageSeqno = seqno; |
1632 | 0 | mTimedOutMessageNestedLevel = nestedLevel; |
1633 | 0 | mLastSendError = SyncSendError::TimedOut; |
1634 | 0 | return false; |
1635 | 0 | } |
1636 | 0 | |
1637 | 0 | if (transact.IsCanceled()) { |
1638 | 0 | break; |
1639 | 0 | } |
1640 | 0 | } |
1641 | 0 |
|
1642 | 0 | if (transact.IsCanceled()) { |
1643 | 0 | IPC_LOG("Other side canceled seqno=%d, xid=%d", seqno, transaction); |
1644 | 0 | mLastSendError = SyncSendError::CancelledAfterSend; |
1645 | 0 | return false; |
1646 | 0 | } |
1647 | 0 |
|
1648 | 0 | if (transact.IsError()) { |
1649 | 0 | IPC_LOG("Error: seqno=%d, xid=%d", seqno, transaction); |
1650 | 0 | mLastSendError = SyncSendError::ReplyError; |
1651 | 0 | return false; |
1652 | 0 | } |
1653 | 0 |
|
1654 | 0 | uint32_t latencyMs = round((TimeStamp::Now() - start).ToMilliseconds()); |
1655 | 0 | IPC_LOG("Got reply: seqno=%d, xid=%d, msgName=%s, latency=%ums", |
1656 | 0 | seqno, transaction, msgName, latencyMs); |
1657 | 0 |
|
1658 | 0 | UniquePtr<Message> reply = transact.GetReply(); |
1659 | 0 |
|
1660 | 0 | MOZ_RELEASE_ASSERT(reply); |
1661 | 0 | MOZ_RELEASE_ASSERT(reply->is_reply(), "expected reply"); |
1662 | 0 | MOZ_RELEASE_ASSERT(!reply->is_reply_error()); |
1663 | 0 | MOZ_RELEASE_ASSERT(reply->seqno() == seqno); |
1664 | 0 | MOZ_RELEASE_ASSERT(reply->type() == replyType, "wrong reply type"); |
1665 | 0 | MOZ_RELEASE_ASSERT(reply->is_sync()); |
1666 | 0 |
|
1667 | 0 | *aReply = std::move(*reply); |
1668 | 0 | if (aReply->size() >= kMinTelemetryMessageSize) { |
1669 | 0 | Telemetry::Accumulate(Telemetry::IPC_REPLY_SIZE, |
1670 | 0 | nsDependentCString(msgName), aReply->size()); |
1671 | 0 | } |
1672 | 0 |
|
1673 | 0 | // NOTE: Only collect IPC_SYNC_MAIN_LATENCY_MS on the main thread (bug 1343729) |
1674 | 0 | if (NS_IsMainThread() && latencyMs >= kMinTelemetrySyncIPCLatencyMs) { |
1675 | 0 | Telemetry::Accumulate(Telemetry::IPC_SYNC_MAIN_LATENCY_MS, |
1676 | 0 | nsDependentCString(msgName), latencyMs); |
1677 | 0 | } |
1678 | 0 | return true; |
1679 | 0 | } |
1680 | | |
1681 | | bool |
1682 | | MessageChannel::Call(Message* aMsg, Message* aReply) |
1683 | 0 | { |
1684 | 0 | UniquePtr<Message> msg(aMsg); |
1685 | 0 | AssertWorkerThread(); |
1686 | 0 | mMonitor->AssertNotCurrentThreadOwns(); |
1687 | 0 |
|
1688 | | #ifdef OS_WIN |
1689 | | SyncStackFrame frame(this, true); |
1690 | | #endif |
1691 | | #ifdef MOZ_TASK_TRACER |
1692 | | AutoScopedLabel autolabel("sync message %s", aMsg->name()); |
1693 | | #endif |
1694 | |
|
1695 | 0 | // This must come before MonitorAutoLock, as its destructor acquires the |
1696 | 0 | // monitor lock. |
1697 | 0 | CxxStackFrame cxxframe(*this, OUT_MESSAGE, msg.get()); |
1698 | 0 |
|
1699 | 0 | MonitorAutoLock lock(*mMonitor); |
1700 | 0 | if (!Connected()) { |
1701 | 0 | ReportConnectionError("MessageChannel::Call", msg.get()); |
1702 | 0 | return false; |
1703 | 0 | } |
1704 | 0 | |
1705 | 0 | // Sanity checks. |
1706 | 0 | IPC_ASSERT(!AwaitingSyncReply(), |
1707 | 0 | "cannot issue Interrupt call while blocked on sync request"); |
1708 | 0 | IPC_ASSERT(!DispatchingSyncMessage(), |
1709 | 0 | "violation of sync handler invariant"); |
1710 | 0 | IPC_ASSERT(msg->is_interrupt(), "can only Call() Interrupt messages here"); |
1711 | 0 | IPC_ASSERT(!mIsPostponingSends, "not postponing sends"); |
1712 | 0 |
|
1713 | 0 | msg->set_seqno(NextSeqno()); |
1714 | 0 | msg->set_interrupt_remote_stack_depth_guess(mRemoteStackDepthGuess); |
1715 | 0 | msg->set_interrupt_local_stack_depth(1 + InterruptStackDepth()); |
1716 | 0 | mInterruptStack.push(MessageInfo(*msg)); |
1717 | 0 | mLink->SendMessage(msg.release()); |
1718 | 0 |
|
1719 | 0 | while (true) { |
1720 | 0 | // if a handler invoked by *Dispatch*() spun a nested event |
1721 | 0 | // loop, and the connection was broken during that loop, we |
1722 | 0 | // might have already processed the OnError event. if so, |
1723 | 0 | // trying another loop iteration will be futile because |
1724 | 0 | // channel state will have been cleared |
1725 | 0 | if (!Connected()) { |
1726 | 0 | ReportConnectionError("MessageChannel::Call"); |
1727 | 0 | return false; |
1728 | 0 | } |
1729 | 0 | |
1730 | | #ifdef OS_WIN |
1731 | | // We need to limit the scoped of neuteredRgn to this spot in the code. |
1732 | | // Window neutering can't be enabled during some plugin calls because |
1733 | | // we then risk the neutered window procedure being subclassed by a |
1734 | | // plugin. |
1735 | | { |
1736 | | NeuteredWindowRegion neuteredRgn(mFlags & REQUIRE_DEFERRED_MESSAGE_PROTECTION); |
1737 | | /* We should pump messages at this point to ensure that the IPC peer |
1738 | | does not become deadlocked on a pending inter-thread SendMessage() */ |
1739 | | neuteredRgn.PumpOnce(); |
1740 | | } |
1741 | | #endif |
1742 | | |
1743 | 0 | // Now might be the time to process a message deferred because of race |
1744 | 0 | // resolution. |
1745 | 0 | MaybeUndeferIncall(); |
1746 | 0 |
|
1747 | 0 | // Wait for an event to occur. |
1748 | 0 | while (!InterruptEventOccurred()) { |
1749 | 0 | bool maybeTimedOut = !WaitForInterruptNotify(); |
1750 | 0 |
|
1751 | 0 | // We might have received a "subtly deferred" message in a nested |
1752 | 0 | // loop that it's now time to process. |
1753 | 0 | if (InterruptEventOccurred() || |
1754 | 0 | (!maybeTimedOut && (!mDeferred.empty() || !mOutOfTurnReplies.empty()))) |
1755 | 0 | { |
1756 | 0 | break; |
1757 | 0 | } |
1758 | 0 | |
1759 | 0 | if (maybeTimedOut && !ShouldContinueFromTimeout()) |
1760 | 0 | return false; |
1761 | 0 | } |
1762 | 0 |
|
1763 | 0 | Message recvd; |
1764 | 0 | MessageMap::iterator it; |
1765 | 0 |
|
1766 | 0 | if ((it = mOutOfTurnReplies.find(mInterruptStack.top().seqno())) |
1767 | 0 | != mOutOfTurnReplies.end()) |
1768 | 0 | { |
1769 | 0 | recvd = std::move(it->second); |
1770 | 0 | mOutOfTurnReplies.erase(it); |
1771 | 0 | } else if (!mPending.isEmpty()) { |
1772 | 0 | RefPtr<MessageTask> task = mPending.popFirst(); |
1773 | 0 | recvd = std::move(task->Msg()); |
1774 | 0 | if (!IsAlwaysDeferred(recvd)) { |
1775 | 0 | mMaybeDeferredPendingCount--; |
1776 | 0 | } |
1777 | 0 | } else { |
1778 | 0 | // because of subtleties with nested event loops, it's possible |
1779 | 0 | // that we got here and nothing happened. or, we might have a |
1780 | 0 | // deferred in-call that needs to be processed. either way, we |
1781 | 0 | // won't break the inner while loop again until something new |
1782 | 0 | // happens. |
1783 | 0 | continue; |
1784 | 0 | } |
1785 | 0 | |
1786 | 0 | // If the message is not Interrupt, we can dispatch it as normal. |
1787 | 0 | if (!recvd.is_interrupt()) { |
1788 | 0 | DispatchMessage(std::move(recvd)); |
1789 | 0 | if (!Connected()) { |
1790 | 0 | ReportConnectionError("MessageChannel::DispatchMessage"); |
1791 | 0 | return false; |
1792 | 0 | } |
1793 | 0 | continue; |
1794 | 0 | } |
1795 | 0 | |
1796 | 0 | // If the message is an Interrupt reply, either process it as a reply to our |
1797 | 0 | // call, or add it to the list of out-of-turn replies we've received. |
1798 | 0 | if (recvd.is_reply()) { |
1799 | 0 | IPC_ASSERT(!mInterruptStack.empty(), "invalid Interrupt stack"); |
1800 | 0 |
|
1801 | 0 | // If this is not a reply the call we've initiated, add it to our |
1802 | 0 | // out-of-turn replies and keep polling for events. |
1803 | 0 | { |
1804 | 0 | const MessageInfo &outcall = mInterruptStack.top(); |
1805 | 0 |
|
1806 | 0 | // Note, In the parent, sequence numbers increase from 0, and |
1807 | 0 | // in the child, they decrease from 0. |
1808 | 0 | if ((mSide == ChildSide && recvd.seqno() > outcall.seqno()) || |
1809 | 0 | (mSide != ChildSide && recvd.seqno() < outcall.seqno())) |
1810 | 0 | { |
1811 | 0 | mOutOfTurnReplies[recvd.seqno()] = std::move(recvd); |
1812 | 0 | continue; |
1813 | 0 | } |
1814 | 0 | |
1815 | 0 | IPC_ASSERT(recvd.is_reply_error() || |
1816 | 0 | (recvd.type() == (outcall.type() + 1) && |
1817 | 0 | recvd.seqno() == outcall.seqno()), |
1818 | 0 | "somebody's misbehavin'", true); |
1819 | 0 | } |
1820 | 0 |
|
1821 | 0 | // We received a reply to our most recent outstanding call. Pop |
1822 | 0 | // this frame and return the reply. |
1823 | 0 | mInterruptStack.pop(); |
1824 | 0 |
|
1825 | 0 | bool is_reply_error = recvd.is_reply_error(); |
1826 | 0 | if (!is_reply_error) { |
1827 | 0 | *aReply = std::move(recvd); |
1828 | 0 | } |
1829 | 0 |
|
1830 | 0 | // If we have no more pending out calls waiting on replies, then |
1831 | 0 | // the reply queue should be empty. |
1832 | 0 | IPC_ASSERT(!mInterruptStack.empty() || mOutOfTurnReplies.empty(), |
1833 | 0 | "still have pending replies with no pending out-calls", |
1834 | 0 | true); |
1835 | 0 |
|
1836 | 0 | return !is_reply_error; |
1837 | 0 | } |
1838 | 0 |
|
1839 | 0 | // Dispatch an Interrupt in-call. Snapshot the current stack depth while we |
1840 | 0 | // own the monitor. |
1841 | 0 | size_t stackDepth = InterruptStackDepth(); |
1842 | 0 | { |
1843 | | #ifdef MOZ_TASK_TRACER |
1844 | | Message::AutoTaskTracerRun tasktracerRun(recvd); |
1845 | | #endif |
1846 | | MonitorAutoUnlock unlock(*mMonitor); |
1847 | 0 |
|
1848 | 0 | CxxStackFrame frame(*this, IN_MESSAGE, &recvd); |
1849 | 0 | DispatchInterruptMessage(std::move(recvd), stackDepth); |
1850 | 0 | } |
1851 | 0 | if (!Connected()) { |
1852 | 0 | ReportConnectionError("MessageChannel::DispatchInterruptMessage"); |
1853 | 0 | return false; |
1854 | 0 | } |
1855 | 0 | } |
1856 | 0 |
|
1857 | 0 | return true; |
1858 | 0 | } |
1859 | | |
1860 | | bool |
1861 | | MessageChannel::WaitForIncomingMessage() |
1862 | 0 | { |
1863 | | #ifdef OS_WIN |
1864 | | SyncStackFrame frame(this, true); |
1865 | | NeuteredWindowRegion neuteredRgn(mFlags & REQUIRE_DEFERRED_MESSAGE_PROTECTION); |
1866 | | #endif |
1867 | |
|
1868 | 0 | MonitorAutoLock lock(*mMonitor); |
1869 | 0 | AutoEnterWaitForIncoming waitingForIncoming(*this); |
1870 | 0 | if (mChannelState != ChannelConnected) { |
1871 | 0 | return false; |
1872 | 0 | } |
1873 | 0 | if (!HasPendingEvents()) { |
1874 | 0 | return WaitForInterruptNotify(); |
1875 | 0 | } |
1876 | 0 | |
1877 | 0 | MOZ_RELEASE_ASSERT(!mPending.isEmpty()); |
1878 | 0 | RefPtr<MessageTask> task = mPending.getFirst(); |
1879 | 0 | RunMessage(*task); |
1880 | 0 | return true; |
1881 | 0 | } |
1882 | | |
1883 | | bool |
1884 | | MessageChannel::HasPendingEvents() |
1885 | 0 | { |
1886 | 0 | AssertWorkerThread(); |
1887 | 0 | mMonitor->AssertCurrentThreadOwns(); |
1888 | 0 | return Connected() && !mPending.isEmpty(); |
1889 | 0 | } |
1890 | | |
1891 | | bool |
1892 | | MessageChannel::InterruptEventOccurred() |
1893 | 0 | { |
1894 | 0 | AssertWorkerThread(); |
1895 | 0 | mMonitor->AssertCurrentThreadOwns(); |
1896 | 0 | IPC_ASSERT(InterruptStackDepth() > 0, "not in wait loop"); |
1897 | 0 |
|
1898 | 0 | return (!Connected() || |
1899 | 0 | !mPending.isEmpty() || |
1900 | 0 | (!mOutOfTurnReplies.empty() && |
1901 | 0 | mOutOfTurnReplies.find(mInterruptStack.top().seqno()) != |
1902 | 0 | mOutOfTurnReplies.end())); |
1903 | 0 | } |
1904 | | |
1905 | | bool |
1906 | | MessageChannel::ProcessPendingRequest(Message &&aUrgent) |
1907 | 0 | { |
1908 | 0 | AssertWorkerThread(); |
1909 | 0 | mMonitor->AssertCurrentThreadOwns(); |
1910 | 0 |
|
1911 | 0 | IPC_LOG("Process pending: seqno=%d, xid=%d", aUrgent.seqno(), aUrgent.transaction_id()); |
1912 | 0 |
|
1913 | 0 | DispatchMessage(std::move(aUrgent)); |
1914 | 0 | if (!Connected()) { |
1915 | 0 | ReportConnectionError("MessageChannel::ProcessPendingRequest"); |
1916 | 0 | return false; |
1917 | 0 | } |
1918 | 0 | |
1919 | 0 | return true; |
1920 | 0 | } |
1921 | | |
1922 | | bool |
1923 | | MessageChannel::ShouldRunMessage(const Message& aMsg) |
1924 | 0 | { |
1925 | 0 | if (!mTimedOutMessageSeqno) { |
1926 | 0 | return true; |
1927 | 0 | } |
1928 | 0 | |
1929 | 0 | // If we've timed out a message and we're awaiting the reply to the timed |
1930 | 0 | // out message, we have to be careful what messages we process. Here's what |
1931 | 0 | // can go wrong: |
1932 | 0 | // 1. child sends a NOT_NESTED sync message S |
1933 | 0 | // 2. parent sends a NESTED_INSIDE_SYNC sync message H at the same time |
1934 | 0 | // 3. parent times out H |
1935 | 0 | // 4. child starts processing H and sends a NESTED_INSIDE_SYNC message H' nested |
1936 | 0 | // within the same transaction |
1937 | 0 | // 5. parent dispatches S and sends reply |
1938 | 0 | // 6. child asserts because it instead expected a reply to H'. |
1939 | 0 | // |
1940 | 0 | // To solve this, we refuse to process S in the parent until we get a reply |
1941 | 0 | // to H. More generally, let the timed out message be M. We don't process a |
1942 | 0 | // message unless the child would need the response to that message in order |
1943 | 0 | // to process M. Those messages are the ones that have a higher nested level |
1944 | 0 | // than M or that are part of the same transaction as M. |
1945 | 0 | if (aMsg.nested_level() < mTimedOutMessageNestedLevel || |
1946 | 0 | (aMsg.nested_level() == mTimedOutMessageNestedLevel |
1947 | 0 | && aMsg.transaction_id() != mTimedOutMessageSeqno)) |
1948 | 0 | { |
1949 | 0 | return false; |
1950 | 0 | } |
1951 | 0 | |
1952 | 0 | return true; |
1953 | 0 | } |
1954 | | |
1955 | | void |
1956 | | MessageChannel::RunMessage(MessageTask& aTask) |
1957 | 0 | { |
1958 | 0 | AssertWorkerThread(); |
1959 | 0 | mMonitor->AssertCurrentThreadOwns(); |
1960 | 0 |
|
1961 | 0 | Message& msg = aTask.Msg(); |
1962 | 0 |
|
1963 | 0 | if (!Connected()) { |
1964 | 0 | ReportConnectionError("RunMessage"); |
1965 | 0 | return; |
1966 | 0 | } |
1967 | 0 | |
1968 | 0 | // Check that we're going to run the first message that's valid to run. |
1969 | | #if 0 |
1970 | | #ifdef DEBUG |
1971 | | nsCOMPtr<nsIEventTarget> messageTarget = |
1972 | | mListener->GetMessageEventTarget(msg); |
1973 | | |
1974 | | for (MessageTask* task : mPending) { |
1975 | | if (task == &aTask) { |
1976 | | break; |
1977 | | } |
1978 | | |
1979 | | nsCOMPtr<nsIEventTarget> taskTarget = |
1980 | | mListener->GetMessageEventTarget(task->Msg()); |
1981 | | |
1982 | | MOZ_ASSERT(!ShouldRunMessage(task->Msg()) || |
1983 | | taskTarget != messageTarget || |
1984 | | aTask.Msg().priority() != task->Msg().priority()); |
1985 | | |
1986 | | } |
1987 | | #endif |
1988 | | #endif |
1989 | | |
1990 | 0 | if (!mDeferred.empty()) { |
1991 | 0 | MaybeUndeferIncall(); |
1992 | 0 | } |
1993 | 0 |
|
1994 | 0 | if (!ShouldRunMessage(msg)) { |
1995 | 0 | return; |
1996 | 0 | } |
1997 | 0 | |
1998 | 0 | MOZ_RELEASE_ASSERT(aTask.isInList()); |
1999 | 0 | aTask.remove(); |
2000 | 0 |
|
2001 | 0 | if (!IsAlwaysDeferred(msg)) { |
2002 | 0 | mMaybeDeferredPendingCount--; |
2003 | 0 | } |
2004 | 0 |
|
2005 | 0 | if (IsOnCxxStack() && msg.is_interrupt() && msg.is_reply()) { |
2006 | 0 | // We probably just received a reply in a nested loop for an |
2007 | 0 | // Interrupt call sent before entering that loop. |
2008 | 0 | mOutOfTurnReplies[msg.seqno()] = std::move(msg); |
2009 | 0 | return; |
2010 | 0 | } |
2011 | 0 | |
2012 | 0 | DispatchMessage(std::move(msg)); |
2013 | 0 | } |
2014 | | |
2015 | | NS_IMPL_ISUPPORTS_INHERITED(MessageChannel::MessageTask, CancelableRunnable, nsIRunnablePriority) |
2016 | | |
2017 | | MessageChannel::MessageTask::MessageTask(MessageChannel* aChannel, Message&& aMessage) |
2018 | | : CancelableRunnable(aMessage.name()) |
2019 | | , mChannel(aChannel) |
2020 | | , mMessage(std::move(aMessage)) |
2021 | | , mScheduled(false) |
2022 | 0 | { |
2023 | 0 | } |
2024 | | |
2025 | | nsresult |
2026 | | MessageChannel::MessageTask::Run() |
2027 | 0 | { |
2028 | 0 | if (!mChannel) { |
2029 | 0 | return NS_OK; |
2030 | 0 | } |
2031 | 0 | |
2032 | 0 | mChannel->AssertWorkerThread(); |
2033 | 0 | mChannel->mMonitor->AssertNotCurrentThreadOwns(); |
2034 | 0 |
|
2035 | 0 | MonitorAutoLock lock(*mChannel->mMonitor); |
2036 | 0 |
|
2037 | 0 | // In case we choose not to run this message, we may need to be able to Post |
2038 | 0 | // it again. |
2039 | 0 | mScheduled = false; |
2040 | 0 |
|
2041 | 0 | if (!isInList()) { |
2042 | 0 | return NS_OK; |
2043 | 0 | } |
2044 | 0 | |
2045 | 0 | mChannel->RunMessage(*this); |
2046 | 0 | return NS_OK; |
2047 | 0 | } |
2048 | | |
2049 | | // Warning: This method removes the receiver from whatever list it might be in. |
2050 | | nsresult |
2051 | | MessageChannel::MessageTask::Cancel() |
2052 | 0 | { |
2053 | 0 | if (!mChannel) { |
2054 | 0 | return NS_OK; |
2055 | 0 | } |
2056 | 0 | |
2057 | 0 | mChannel->AssertWorkerThread(); |
2058 | 0 | mChannel->mMonitor->AssertNotCurrentThreadOwns(); |
2059 | 0 |
|
2060 | 0 | MonitorAutoLock lock(*mChannel->mMonitor); |
2061 | 0 |
|
2062 | 0 | if (!isInList()) { |
2063 | 0 | return NS_OK; |
2064 | 0 | } |
2065 | 0 | remove(); |
2066 | 0 |
|
2067 | 0 | if (!IsAlwaysDeferred(Msg())) { |
2068 | 0 | mChannel->mMaybeDeferredPendingCount--; |
2069 | 0 | } |
2070 | 0 |
|
2071 | 0 | return NS_OK; |
2072 | 0 | } |
2073 | | |
2074 | | void |
2075 | | MessageChannel::MessageTask::Post() |
2076 | 0 | { |
2077 | 0 | MOZ_RELEASE_ASSERT(!mScheduled); |
2078 | 0 | MOZ_RELEASE_ASSERT(isInList()); |
2079 | 0 |
|
2080 | 0 | mScheduled = true; |
2081 | 0 |
|
2082 | 0 | RefPtr<MessageTask> self = this; |
2083 | 0 | nsCOMPtr<nsIEventTarget> eventTarget = |
2084 | 0 | mChannel->mListener->GetMessageEventTarget(mMessage); |
2085 | 0 |
|
2086 | 0 | if (eventTarget) { |
2087 | 0 | eventTarget->Dispatch(self.forget(), NS_DISPATCH_NORMAL); |
2088 | 0 | } else if (mChannel->mWorkerLoop) { |
2089 | 0 | mChannel->mWorkerLoop->PostTask(self.forget()); |
2090 | 0 | } |
2091 | 0 | } |
2092 | | |
2093 | | void |
2094 | | MessageChannel::MessageTask::Clear() |
2095 | 0 | { |
2096 | 0 | mChannel->AssertWorkerThread(); |
2097 | 0 |
|
2098 | 0 | mChannel = nullptr; |
2099 | 0 | } |
2100 | | |
2101 | | NS_IMETHODIMP |
2102 | | MessageChannel::MessageTask::GetPriority(uint32_t* aPriority) |
2103 | 0 | { |
2104 | 0 | if (recordreplay::IsRecordingOrReplaying()) { |
2105 | 0 | // Ignore message priorities in recording/replaying processes. Incoming |
2106 | 0 | // messages were sorted in the middleman process according to their |
2107 | 0 | // priority before being forwarded here, and reordering them again in this |
2108 | 0 | // process can cause problems such as dispatching messages for an actor |
2109 | 0 | // before the constructor for that actor. |
2110 | 0 | *aPriority = PRIORITY_NORMAL; |
2111 | 0 | return NS_OK; |
2112 | 0 | } |
2113 | 0 | switch (mMessage.priority()) { |
2114 | 0 | case Message::NORMAL_PRIORITY: |
2115 | 0 | *aPriority = PRIORITY_NORMAL; |
2116 | 0 | break; |
2117 | 0 | case Message::INPUT_PRIORITY: |
2118 | 0 | *aPriority = PRIORITY_INPUT; |
2119 | 0 | break; |
2120 | 0 | case Message::HIGH_PRIORITY: |
2121 | 0 | *aPriority = PRIORITY_HIGH; |
2122 | 0 | break; |
2123 | 0 | default: |
2124 | 0 | MOZ_ASSERT(false); |
2125 | 0 | break; |
2126 | 0 | } |
2127 | 0 | return NS_OK; |
2128 | 0 | } |
2129 | | |
2130 | | bool |
2131 | | MessageChannel::MessageTask::GetAffectedSchedulerGroups(SchedulerGroupSet& aGroups) |
2132 | 0 | { |
2133 | 0 | if (!mChannel) { |
2134 | 0 | return false; |
2135 | 0 | } |
2136 | 0 | |
2137 | 0 | mChannel->AssertWorkerThread(); |
2138 | 0 | return mChannel->mListener->GetMessageSchedulerGroups(mMessage, aGroups); |
2139 | 0 | } |
2140 | | |
2141 | | void |
2142 | | MessageChannel::DispatchMessage(Message &&aMsg) |
2143 | 0 | { |
2144 | 0 | AssertWorkerThread(); |
2145 | 0 | mMonitor->AssertCurrentThreadOwns(); |
2146 | 0 |
|
2147 | 0 | Maybe<AutoNoJSAPI> nojsapi; |
2148 | 0 | if (ScriptSettingsInitialized() && NS_IsMainThread()) |
2149 | 0 | nojsapi.emplace(); |
2150 | 0 |
|
2151 | 0 | nsAutoPtr<Message> reply; |
2152 | 0 |
|
2153 | 0 | IPC_LOG("DispatchMessage: seqno=%d, xid=%d", aMsg.seqno(), aMsg.transaction_id()); |
2154 | 0 |
|
2155 | 0 | { |
2156 | 0 | AutoEnterTransaction transaction(this, aMsg); |
2157 | 0 |
|
2158 | 0 | int id = aMsg.transaction_id(); |
2159 | 0 | MOZ_RELEASE_ASSERT(!aMsg.is_sync() || id == transaction.TransactionID()); |
2160 | 0 |
|
2161 | 0 | { |
2162 | | #ifdef MOZ_TASK_TRACER |
2163 | | Message::AutoTaskTracerRun tasktracerRun(aMsg); |
2164 | | #endif |
2165 | | MonitorAutoUnlock unlock(*mMonitor); |
2166 | 0 | CxxStackFrame frame(*this, IN_MESSAGE, &aMsg); |
2167 | 0 |
|
2168 | 0 | mListener->ArtificialSleep(); |
2169 | 0 |
|
2170 | 0 | if (aMsg.is_sync()) |
2171 | 0 | DispatchSyncMessage(aMsg, *getter_Transfers(reply)); |
2172 | 0 | else if (aMsg.is_interrupt()) |
2173 | 0 | DispatchInterruptMessage(std::move(aMsg), 0); |
2174 | 0 | else |
2175 | 0 | DispatchAsyncMessage(aMsg); |
2176 | 0 |
|
2177 | 0 | mListener->ArtificialSleep(); |
2178 | 0 | } |
2179 | 0 |
|
2180 | 0 | if (reply && transaction.IsCanceled()) { |
2181 | 0 | // The transaction has been canceled. Don't send a reply. |
2182 | 0 | IPC_LOG("Nulling out reply due to cancellation, seqno=%d, xid=%d", aMsg.seqno(), id); |
2183 | 0 | reply = nullptr; |
2184 | 0 | } |
2185 | 0 | } |
2186 | 0 |
|
2187 | 0 | if (reply && ChannelConnected == mChannelState) { |
2188 | 0 | IPC_LOG("Sending reply seqno=%d, xid=%d", aMsg.seqno(), aMsg.transaction_id()); |
2189 | 0 | mLink->SendMessage(reply.forget()); |
2190 | 0 | } |
2191 | 0 | } |
2192 | | |
2193 | | void |
2194 | | MessageChannel::DispatchSyncMessage(const Message& aMsg, Message*& aReply) |
2195 | 0 | { |
2196 | 0 | AssertWorkerThread(); |
2197 | 0 |
|
2198 | 0 | mozilla::TimeStamp start = TimeStamp::Now(); |
2199 | 0 |
|
2200 | 0 | int nestedLevel = aMsg.nested_level(); |
2201 | 0 |
|
2202 | 0 | MOZ_RELEASE_ASSERT(nestedLevel == IPC::Message::NOT_NESTED || |
2203 | 0 | NS_IsMainThread() || |
2204 | 0 | // Middleman processes forward sync messages on a non-main thread. |
2205 | 0 | recordreplay::IsMiddleman()); |
2206 | | #ifdef MOZ_TASK_TRACER |
2207 | | AutoScopedLabel autolabel("sync message %s", aMsg.name()); |
2208 | | #endif |
2209 | |
|
2210 | 0 | MessageChannel* dummy; |
2211 | 0 | MessageChannel*& blockingVar = mSide == ChildSide && NS_IsMainThread() ? gParentProcessBlocker : dummy; |
2212 | 0 |
|
2213 | 0 | Result rv; |
2214 | 0 | { |
2215 | 0 | AutoSetValue<MessageChannel*> blocked(blockingVar, this); |
2216 | 0 | rv = mListener->OnMessageReceived(aMsg, aReply); |
2217 | 0 | } |
2218 | 0 |
|
2219 | 0 | uint32_t latencyMs = round((TimeStamp::Now() - start).ToMilliseconds()); |
2220 | 0 | if (latencyMs >= kMinTelemetrySyncIPCLatencyMs) { |
2221 | 0 | Telemetry::Accumulate(Telemetry::IPC_SYNC_RECEIVE_MS, |
2222 | 0 | nsDependentCString(aMsg.name()), |
2223 | 0 | latencyMs); |
2224 | 0 | } |
2225 | 0 |
|
2226 | 0 | if (!MaybeHandleError(rv, aMsg, "DispatchSyncMessage")) { |
2227 | 0 | aReply = Message::ForSyncDispatchError(aMsg.nested_level()); |
2228 | 0 | } |
2229 | 0 | aReply->set_seqno(aMsg.seqno()); |
2230 | 0 | aReply->set_transaction_id(aMsg.transaction_id()); |
2231 | 0 | } |
2232 | | |
2233 | | void |
2234 | | MessageChannel::DispatchAsyncMessage(const Message& aMsg) |
2235 | 0 | { |
2236 | 0 | AssertWorkerThread(); |
2237 | 0 | MOZ_RELEASE_ASSERT(!aMsg.is_interrupt() && !aMsg.is_sync()); |
2238 | 0 |
|
2239 | 0 | if (aMsg.routing_id() == MSG_ROUTING_NONE) { |
2240 | 0 | MOZ_CRASH("unhandled special message!"); |
2241 | 0 | } |
2242 | 0 |
|
2243 | 0 | Result rv; |
2244 | 0 | { |
2245 | 0 | int nestedLevel = aMsg.nested_level(); |
2246 | 0 | AutoSetValue<bool> async(mDispatchingAsyncMessage, true); |
2247 | 0 | AutoSetValue<int> nestedLevelSet(mDispatchingAsyncMessageNestedLevel, nestedLevel); |
2248 | 0 | rv = mListener->OnMessageReceived(aMsg); |
2249 | 0 | } |
2250 | 0 | MaybeHandleError(rv, aMsg, "DispatchAsyncMessage"); |
2251 | 0 | } |
2252 | | |
2253 | | void |
2254 | | MessageChannel::DispatchInterruptMessage(Message&& aMsg, size_t stackDepth) |
2255 | 0 | { |
2256 | 0 | AssertWorkerThread(); |
2257 | 0 | mMonitor->AssertNotCurrentThreadOwns(); |
2258 | 0 |
|
2259 | 0 | IPC_ASSERT(aMsg.is_interrupt() && !aMsg.is_reply(), "wrong message type"); |
2260 | 0 |
|
2261 | 0 | if (ShouldDeferInterruptMessage(aMsg, stackDepth)) { |
2262 | 0 | // We now know the other side's stack has one more frame |
2263 | 0 | // than we thought. |
2264 | 0 | ++mRemoteStackDepthGuess; // decremented in MaybeProcessDeferred() |
2265 | 0 | mDeferred.push(std::move(aMsg)); |
2266 | 0 | return; |
2267 | 0 | } |
2268 | 0 | |
2269 | 0 | // If we "lost" a race and need to process the other side's in-call, we |
2270 | 0 | // don't need to fix up the mRemoteStackDepthGuess here, because we're just |
2271 | 0 | // about to increment it, which will make it correct again. |
2272 | 0 | |
2273 | | #ifdef OS_WIN |
2274 | | SyncStackFrame frame(this, true); |
2275 | | #endif |
2276 | | |
2277 | 0 | nsAutoPtr<Message> reply; |
2278 | 0 |
|
2279 | 0 | ++mRemoteStackDepthGuess; |
2280 | 0 | Result rv = mListener->OnCallReceived(aMsg, *getter_Transfers(reply)); |
2281 | 0 | --mRemoteStackDepthGuess; |
2282 | 0 |
|
2283 | 0 | if (!MaybeHandleError(rv, aMsg, "DispatchInterruptMessage")) { |
2284 | 0 | reply = Message::ForInterruptDispatchError(); |
2285 | 0 | } |
2286 | 0 | reply->set_seqno(aMsg.seqno()); |
2287 | 0 |
|
2288 | 0 | MonitorAutoLock lock(*mMonitor); |
2289 | 0 | if (ChannelConnected == mChannelState) { |
2290 | 0 | mLink->SendMessage(reply.forget()); |
2291 | 0 | } |
2292 | 0 | } |
2293 | | |
2294 | | bool |
2295 | | MessageChannel::ShouldDeferInterruptMessage(const Message& aMsg, size_t aStackDepth) |
2296 | 0 | { |
2297 | 0 | AssertWorkerThread(); |
2298 | 0 |
|
2299 | 0 | // We may or may not own the lock in this function, so don't access any |
2300 | 0 | // channel state. |
2301 | 0 |
|
2302 | 0 | IPC_ASSERT(aMsg.is_interrupt() && !aMsg.is_reply(), "wrong message type"); |
2303 | 0 |
|
2304 | 0 | // Race detection: see the long comment near mRemoteStackDepthGuess in |
2305 | 0 | // MessageChannel.h. "Remote" stack depth means our side, and "local" means |
2306 | 0 | // the other side. |
2307 | 0 | if (aMsg.interrupt_remote_stack_depth_guess() == RemoteViewOfStackDepth(aStackDepth)) { |
2308 | 0 | return false; |
2309 | 0 | } |
2310 | 0 | |
2311 | 0 | // Interrupt in-calls have raced. The winner, if there is one, gets to defer |
2312 | 0 | // processing of the other side's in-call. |
2313 | 0 | bool defer; |
2314 | 0 | const char* winner; |
2315 | 0 | const MessageInfo parentMsgInfo = |
2316 | 0 | (mSide == ChildSide) ? MessageInfo(aMsg) : mInterruptStack.top(); |
2317 | 0 | const MessageInfo childMsgInfo = |
2318 | 0 | (mSide == ChildSide) ? mInterruptStack.top() : MessageInfo(aMsg); |
2319 | 0 | switch (mListener->MediateInterruptRace(parentMsgInfo, childMsgInfo)) |
2320 | 0 | { |
2321 | 0 | case RIPChildWins: |
2322 | 0 | winner = "child"; |
2323 | 0 | defer = (mSide == ChildSide); |
2324 | 0 | break; |
2325 | 0 | case RIPParentWins: |
2326 | 0 | winner = "parent"; |
2327 | 0 | defer = (mSide != ChildSide); |
2328 | 0 | break; |
2329 | 0 | case RIPError: |
2330 | 0 | MOZ_CRASH("NYI: 'Error' Interrupt race policy"); |
2331 | 0 | default: |
2332 | 0 | MOZ_CRASH("not reached"); |
2333 | 0 | } |
2334 | 0 |
|
2335 | 0 | IPC_LOG("race in %s: %s won", |
2336 | 0 | (mSide == ChildSide) ? "child" : "parent", |
2337 | 0 | winner); |
2338 | 0 |
|
2339 | 0 | return defer; |
2340 | 0 | } |
2341 | | |
2342 | | void |
2343 | | MessageChannel::MaybeUndeferIncall() |
2344 | 0 | { |
2345 | 0 | AssertWorkerThread(); |
2346 | 0 | mMonitor->AssertCurrentThreadOwns(); |
2347 | 0 |
|
2348 | 0 | if (mDeferred.empty()) |
2349 | 0 | return; |
2350 | 0 | |
2351 | 0 | size_t stackDepth = InterruptStackDepth(); |
2352 | 0 |
|
2353 | 0 | Message& deferred = mDeferred.top(); |
2354 | 0 |
|
2355 | 0 | // the other side can only *under*-estimate our actual stack depth |
2356 | 0 | IPC_ASSERT(deferred.interrupt_remote_stack_depth_guess() <= stackDepth, |
2357 | 0 | "fatal logic error"); |
2358 | 0 |
|
2359 | 0 | if (ShouldDeferInterruptMessage(deferred, stackDepth)) { |
2360 | 0 | return; |
2361 | 0 | } |
2362 | 0 | |
2363 | 0 | // maybe time to process this message |
2364 | 0 | Message call(std::move(deferred)); |
2365 | 0 | mDeferred.pop(); |
2366 | 0 |
|
2367 | 0 | // fix up fudge factor we added to account for race |
2368 | 0 | IPC_ASSERT(0 < mRemoteStackDepthGuess, "fatal logic error"); |
2369 | 0 | --mRemoteStackDepthGuess; |
2370 | 0 |
|
2371 | 0 | MOZ_RELEASE_ASSERT(call.nested_level() == IPC::Message::NOT_NESTED); |
2372 | 0 | RefPtr<MessageTask> task = new MessageTask(this, std::move(call)); |
2373 | 0 | mPending.insertBack(task); |
2374 | 0 | MOZ_ASSERT(IsAlwaysDeferred(task->Msg())); |
2375 | 0 | task->Post(); |
2376 | 0 | } |
2377 | | |
2378 | | void |
2379 | | MessageChannel::EnteredCxxStack() |
2380 | 0 | { |
2381 | 0 | mListener->EnteredCxxStack(); |
2382 | 0 | } |
2383 | | |
2384 | | void |
2385 | | MessageChannel::ExitedCxxStack() |
2386 | 0 | { |
2387 | 0 | mListener->ExitedCxxStack(); |
2388 | 0 | if (mSawInterruptOutMsg) { |
2389 | 0 | MonitorAutoLock lock(*mMonitor); |
2390 | 0 | // see long comment in OnMaybeDequeueOne() |
2391 | 0 | EnqueuePendingMessages(); |
2392 | 0 | mSawInterruptOutMsg = false; |
2393 | 0 | } |
2394 | 0 | } |
2395 | | |
2396 | | void |
2397 | | MessageChannel::EnteredCall() |
2398 | 0 | { |
2399 | 0 | mListener->EnteredCall(); |
2400 | 0 | } |
2401 | | |
2402 | | void |
2403 | | MessageChannel::ExitedCall() |
2404 | 0 | { |
2405 | 0 | mListener->ExitedCall(); |
2406 | 0 | } |
2407 | | |
2408 | | void |
2409 | | MessageChannel::EnteredSyncSend() |
2410 | 0 | { |
2411 | 0 | mListener->OnEnteredSyncSend(); |
2412 | 0 | } |
2413 | | |
2414 | | void |
2415 | | MessageChannel::ExitedSyncSend() |
2416 | 0 | { |
2417 | 0 | mListener->OnExitedSyncSend(); |
2418 | 0 | } |
2419 | | |
2420 | | void |
2421 | | MessageChannel::EnqueuePendingMessages() |
2422 | 0 | { |
2423 | 0 | AssertWorkerThread(); |
2424 | 0 | mMonitor->AssertCurrentThreadOwns(); |
2425 | 0 |
|
2426 | 0 | MaybeUndeferIncall(); |
2427 | 0 |
|
2428 | 0 | // XXX performance tuning knob: could process all or k pending |
2429 | 0 | // messages here, rather than enqueuing for later processing |
2430 | 0 |
|
2431 | 0 | RepostAllMessages(); |
2432 | 0 | } |
2433 | | |
2434 | | bool |
2435 | | MessageChannel::WaitResponse(bool aWaitTimedOut) |
2436 | 0 | { |
2437 | 0 | if (aWaitTimedOut) { |
2438 | 0 | if (mInTimeoutSecondHalf) { |
2439 | 0 | // We've really timed out this time. |
2440 | 0 | return false; |
2441 | 0 | } |
2442 | 0 | // Try a second time. |
2443 | 0 | mInTimeoutSecondHalf = true; |
2444 | 0 | } else { |
2445 | 0 | mInTimeoutSecondHalf = false; |
2446 | 0 | } |
2447 | 0 | return true; |
2448 | 0 | } |
2449 | | |
2450 | | #ifndef OS_WIN |
2451 | | bool |
2452 | | MessageChannel::WaitForSyncNotify(bool /* aHandleWindowsMessages */) |
2453 | 0 | { |
2454 | | #ifdef DEBUG |
2455 | | // WARNING: We don't release the lock here. We can't because the link thread |
2456 | | // could signal at this time and we would miss it. Instead we require |
2457 | | // ArtificialTimeout() to be extremely simple. |
2458 | | if (mListener->ArtificialTimeout()) { |
2459 | | return false; |
2460 | | } |
2461 | | #endif |
2462 | |
|
2463 | 0 | TimeDuration timeout = (kNoTimeout == mTimeoutMs) ? |
2464 | 0 | TimeDuration::Forever() : |
2465 | 0 | TimeDuration::FromMilliseconds(mTimeoutMs); |
2466 | 0 | CVStatus status = mMonitor->Wait(timeout); |
2467 | 0 |
|
2468 | 0 | // If the timeout didn't expire, we know we received an event. The |
2469 | 0 | // converse is not true. |
2470 | 0 | return WaitResponse(status == CVStatus::Timeout); |
2471 | 0 | } |
2472 | | |
2473 | | bool |
2474 | | MessageChannel::WaitForInterruptNotify() |
2475 | 0 | { |
2476 | 0 | return WaitForSyncNotify(true); |
2477 | 0 | } |
2478 | | |
2479 | | void |
2480 | | MessageChannel::NotifyWorkerThread() |
2481 | 0 | { |
2482 | 0 | mMonitor->Notify(); |
2483 | 0 | } |
2484 | | #endif |
2485 | | |
2486 | | bool |
2487 | | MessageChannel::ShouldContinueFromTimeout() |
2488 | 0 | { |
2489 | 0 | AssertWorkerThread(); |
2490 | 0 | mMonitor->AssertCurrentThreadOwns(); |
2491 | 0 |
|
2492 | 0 | bool cont; |
2493 | 0 | { |
2494 | 0 | MonitorAutoUnlock unlock(*mMonitor); |
2495 | 0 | cont = mListener->ShouldContinueFromReplyTimeout(); |
2496 | 0 | mListener->ArtificialSleep(); |
2497 | 0 | } |
2498 | 0 |
|
2499 | 0 | static enum { UNKNOWN, NOT_DEBUGGING, DEBUGGING } sDebuggingChildren = UNKNOWN; |
2500 | 0 |
|
2501 | 0 | if (sDebuggingChildren == UNKNOWN) { |
2502 | 0 | sDebuggingChildren = getenv("MOZ_DEBUG_CHILD_PROCESS") || |
2503 | 0 | getenv("MOZ_DEBUG_CHILD_PAUSE") |
2504 | 0 | ? DEBUGGING |
2505 | 0 | : NOT_DEBUGGING; |
2506 | 0 | } |
2507 | 0 | if (sDebuggingChildren == DEBUGGING) { |
2508 | 0 | return true; |
2509 | 0 | } |
2510 | 0 | |
2511 | 0 | return cont; |
2512 | 0 | } |
2513 | | |
2514 | | void |
2515 | | MessageChannel::SetReplyTimeoutMs(int32_t aTimeoutMs) |
2516 | 0 | { |
2517 | 0 | // Set channel timeout value. Since this is broken up into |
2518 | 0 | // two period, the minimum timeout value is 2ms. |
2519 | 0 | AssertWorkerThread(); |
2520 | 0 | mTimeoutMs = (aTimeoutMs <= 0) |
2521 | 0 | ? kNoTimeout |
2522 | 0 | : (int32_t)ceil((double)aTimeoutMs / 2.0); |
2523 | 0 | } |
2524 | | |
2525 | | void |
2526 | | MessageChannel::OnChannelConnected(int32_t peer_id) |
2527 | 0 | { |
2528 | 0 | MOZ_RELEASE_ASSERT(!mPeerPidSet); |
2529 | 0 | mPeerPidSet = true; |
2530 | 0 | mPeerPid = peer_id; |
2531 | 0 | RefPtr<CancelableRunnable> task = mOnChannelConnectedTask; |
2532 | 0 | if (mWorkerLoop) { |
2533 | 0 | mWorkerLoop->PostTask(task.forget()); |
2534 | 0 | } |
2535 | 0 | } |
2536 | | |
2537 | | void |
2538 | | MessageChannel::DispatchOnChannelConnected() |
2539 | 0 | { |
2540 | 0 | AssertWorkerThread(); |
2541 | 0 | MOZ_RELEASE_ASSERT(mPeerPidSet); |
2542 | 0 | mListener->OnChannelConnected(mPeerPid); |
2543 | 0 | } |
2544 | | |
2545 | | void |
2546 | | MessageChannel::ReportMessageRouteError(const char* channelName) const |
2547 | 0 | { |
2548 | 0 | PrintErrorMessage(mSide, channelName, "Need a route"); |
2549 | 0 | mListener->ProcessingError(MsgRouteError, "MsgRouteError"); |
2550 | 0 | } |
2551 | | |
2552 | | void |
2553 | | MessageChannel::ReportConnectionError(const char* aChannelName, Message* aMsg) const |
2554 | 0 | { |
2555 | 0 | AssertWorkerThread(); |
2556 | 0 | mMonitor->AssertCurrentThreadOwns(); |
2557 | 0 |
|
2558 | 0 | const char* errorMsg = nullptr; |
2559 | 0 | switch (mChannelState) { |
2560 | 0 | case ChannelClosed: |
2561 | 0 | errorMsg = "Closed channel: cannot send/recv"; |
2562 | 0 | break; |
2563 | 0 | case ChannelOpening: |
2564 | 0 | errorMsg = "Opening channel: not yet ready for send/recv"; |
2565 | 0 | break; |
2566 | 0 | case ChannelTimeout: |
2567 | 0 | errorMsg = "Channel timeout: cannot send/recv"; |
2568 | 0 | break; |
2569 | 0 | case ChannelClosing: |
2570 | 0 | errorMsg = "Channel closing: too late to send/recv, messages will be lost"; |
2571 | 0 | break; |
2572 | 0 | case ChannelError: |
2573 | 0 | errorMsg = "Channel error: cannot send/recv"; |
2574 | 0 | break; |
2575 | 0 |
|
2576 | 0 | default: |
2577 | 0 | MOZ_CRASH("unreached"); |
2578 | 0 | } |
2579 | 0 |
|
2580 | 0 | if (aMsg) { |
2581 | 0 | char reason[512]; |
2582 | 0 | SprintfLiteral(reason,"(msgtype=0x%X,name=%s) %s", |
2583 | 0 | aMsg->type(), aMsg->name(), errorMsg); |
2584 | 0 |
|
2585 | 0 | PrintErrorMessage(mSide, aChannelName, reason); |
2586 | 0 | } else { |
2587 | 0 | PrintErrorMessage(mSide, aChannelName, errorMsg); |
2588 | 0 | } |
2589 | 0 |
|
2590 | 0 | MonitorAutoUnlock unlock(*mMonitor); |
2591 | 0 | mListener->ProcessingError(MsgDropped, errorMsg); |
2592 | 0 | } |
2593 | | |
2594 | | bool |
2595 | | MessageChannel::MaybeHandleError(Result code, const Message& aMsg, const char* channelName) |
2596 | 0 | { |
2597 | 0 | if (MsgProcessed == code) |
2598 | 0 | return true; |
2599 | 0 | |
2600 | 0 | const char* errorMsg = nullptr; |
2601 | 0 | switch (code) { |
2602 | 0 | case MsgNotKnown: |
2603 | 0 | errorMsg = "Unknown message: not processed"; |
2604 | 0 | break; |
2605 | 0 | case MsgNotAllowed: |
2606 | 0 | errorMsg = "Message not allowed: cannot be sent/recvd in this state"; |
2607 | 0 | break; |
2608 | 0 | case MsgPayloadError: |
2609 | 0 | errorMsg = "Payload error: message could not be deserialized"; |
2610 | 0 | break; |
2611 | 0 | case MsgProcessingError: |
2612 | 0 | errorMsg = "Processing error: message was deserialized, but the handler returned false (indicating failure)"; |
2613 | 0 | break; |
2614 | 0 | case MsgRouteError: |
2615 | 0 | errorMsg = "Route error: message sent to unknown actor ID"; |
2616 | 0 | break; |
2617 | 0 | case MsgValueError: |
2618 | 0 | errorMsg = "Value error: message was deserialized, but contained an illegal value"; |
2619 | 0 | break; |
2620 | 0 |
|
2621 | 0 | default: |
2622 | 0 | MOZ_CRASH("unknown Result code"); |
2623 | 0 | return false; |
2624 | 0 | } |
2625 | 0 | |
2626 | 0 | char reason[512]; |
2627 | 0 | const char* msgname = aMsg.name(); |
2628 | 0 | if (msgname[0] == '?') { |
2629 | 0 | SprintfLiteral(reason,"(msgtype=0x%X) %s", aMsg.type(), errorMsg); |
2630 | 0 | } else { |
2631 | 0 | SprintfLiteral(reason,"%s %s", msgname, errorMsg); |
2632 | 0 | } |
2633 | 0 |
|
2634 | 0 | PrintErrorMessage(mSide, channelName, reason); |
2635 | 0 |
|
2636 | 0 | // Error handled in mozilla::ipc::IPCResult. |
2637 | 0 | if (code == MsgProcessingError) { |
2638 | 0 | return false; |
2639 | 0 | } |
2640 | 0 | |
2641 | 0 | mListener->ProcessingError(code, reason); |
2642 | 0 |
|
2643 | 0 | return false; |
2644 | 0 | } |
2645 | | |
2646 | | void |
2647 | | MessageChannel::OnChannelErrorFromLink() |
2648 | 0 | { |
2649 | 0 | AssertLinkThread(); |
2650 | 0 | mMonitor->AssertCurrentThreadOwns(); |
2651 | 0 |
|
2652 | 0 | IPC_LOG("OnChannelErrorFromLink"); |
2653 | 0 |
|
2654 | 0 | if (InterruptStackDepth() > 0) |
2655 | 0 | NotifyWorkerThread(); |
2656 | 0 |
|
2657 | 0 | if (AwaitingSyncReply() || AwaitingIncomingMessage()) |
2658 | 0 | NotifyWorkerThread(); |
2659 | 0 |
|
2660 | 0 | if (ChannelClosing != mChannelState) { |
2661 | 0 | if (mAbortOnError) { |
2662 | 0 | MOZ_CRASH("Aborting on channel error."); |
2663 | 0 | } |
2664 | 0 | mChannelState = ChannelError; |
2665 | 0 | mMonitor->Notify(); |
2666 | 0 | } |
2667 | 0 |
|
2668 | 0 | PostErrorNotifyTask(); |
2669 | 0 | } |
2670 | | |
2671 | | void |
2672 | | MessageChannel::NotifyMaybeChannelError() |
2673 | 0 | { |
2674 | 0 | mMonitor->AssertNotCurrentThreadOwns(); |
2675 | 0 |
|
2676 | 0 | // TODO sort out Close() on this side racing with Close() on the other side |
2677 | 0 | if (ChannelClosing == mChannelState) { |
2678 | 0 | // the channel closed, but we received a "Goodbye" message warning us |
2679 | 0 | // about it. no worries |
2680 | 0 | mChannelState = ChannelClosed; |
2681 | 0 | NotifyChannelClosed(); |
2682 | 0 | return; |
2683 | 0 | } |
2684 | 0 | |
2685 | 0 | Clear(); |
2686 | 0 |
|
2687 | 0 | // Oops, error! Let the listener know about it. |
2688 | 0 | mChannelState = ChannelError; |
2689 | 0 |
|
2690 | 0 | // IPDL assumes these notifications do not fire twice, so we do not let |
2691 | 0 | // that happen. |
2692 | 0 | if (mNotifiedChannelDone) { |
2693 | 0 | return; |
2694 | 0 | } |
2695 | 0 | mNotifiedChannelDone = true; |
2696 | 0 |
|
2697 | 0 | // After this, the channel may be deleted. Based on the premise that |
2698 | 0 | // mListener owns this channel, any calls back to this class that may |
2699 | 0 | // work with mListener should still work on living objects. |
2700 | 0 | mListener->OnChannelError(); |
2701 | 0 | } |
2702 | | |
2703 | | void |
2704 | | MessageChannel::OnNotifyMaybeChannelError() |
2705 | 0 | { |
2706 | 0 | AssertWorkerThread(); |
2707 | 0 | mMonitor->AssertNotCurrentThreadOwns(); |
2708 | 0 |
|
2709 | 0 | mChannelErrorTask = nullptr; |
2710 | 0 |
|
2711 | 0 | // OnChannelError holds mMonitor when it posts this task and this |
2712 | 0 | // task cannot be allowed to run until OnChannelError has |
2713 | 0 | // exited. We enforce that order by grabbing the mutex here which |
2714 | 0 | // should only continue once OnChannelError has completed. |
2715 | 0 | { |
2716 | 0 | MonitorAutoLock lock(*mMonitor); |
2717 | 0 | // nothing to do here |
2718 | 0 | } |
2719 | 0 |
|
2720 | 0 | if (IsOnCxxStack()) { |
2721 | 0 | mChannelErrorTask = NewNonOwningCancelableRunnableMethod( |
2722 | 0 | "ipc::MessageChannel::OnNotifyMaybeChannelError", |
2723 | 0 | this, |
2724 | 0 | &MessageChannel::OnNotifyMaybeChannelError); |
2725 | 0 | RefPtr<Runnable> task = mChannelErrorTask; |
2726 | 0 | // 10 ms delay is completely arbitrary |
2727 | 0 | if (mWorkerLoop) { |
2728 | 0 | mWorkerLoop->PostDelayedTask(task.forget(), 10); |
2729 | 0 | } |
2730 | 0 | return; |
2731 | 0 | } |
2732 | 0 |
|
2733 | 0 | NotifyMaybeChannelError(); |
2734 | 0 | } |
2735 | | |
2736 | | void |
2737 | | MessageChannel::PostErrorNotifyTask() |
2738 | 0 | { |
2739 | 0 | mMonitor->AssertCurrentThreadOwns(); |
2740 | 0 |
|
2741 | 0 | if (mChannelErrorTask || !mWorkerLoop) |
2742 | 0 | return; |
2743 | 0 | |
2744 | 0 | // This must be the last code that runs on this thread! |
2745 | 0 | mChannelErrorTask = NewNonOwningCancelableRunnableMethod( |
2746 | 0 | "ipc::MessageChannel::OnNotifyMaybeChannelError", |
2747 | 0 | this, |
2748 | 0 | &MessageChannel::OnNotifyMaybeChannelError); |
2749 | 0 | RefPtr<Runnable> task = mChannelErrorTask; |
2750 | 0 | mWorkerLoop->PostTask(task.forget()); |
2751 | 0 | } |
2752 | | |
2753 | | // Special async message. |
2754 | | class GoodbyeMessage : public IPC::Message |
2755 | | { |
2756 | | public: |
2757 | | GoodbyeMessage() : |
2758 | | IPC::Message(MSG_ROUTING_NONE, GOODBYE_MESSAGE_TYPE) |
2759 | 0 | { |
2760 | 0 | } |
2761 | 0 | static bool Read(const Message* msg) { |
2762 | 0 | return true; |
2763 | 0 | } |
2764 | 0 | void Log(const std::string& aPrefix, FILE* aOutf) const { |
2765 | 0 | fputs("(special `Goodbye' message)", aOutf); |
2766 | 0 | } |
2767 | | }; |
2768 | | |
2769 | | void |
2770 | | MessageChannel::SynchronouslyClose() |
2771 | 0 | { |
2772 | 0 | AssertWorkerThread(); |
2773 | 0 | mMonitor->AssertCurrentThreadOwns(); |
2774 | 0 | mLink->SendClose(); |
2775 | 0 | while (ChannelClosed != mChannelState) |
2776 | 0 | mMonitor->Wait(); |
2777 | 0 | } |
2778 | | |
2779 | | void |
2780 | | MessageChannel::CloseWithError() |
2781 | 0 | { |
2782 | 0 | AssertWorkerThread(); |
2783 | 0 |
|
2784 | 0 | MonitorAutoLock lock(*mMonitor); |
2785 | 0 | if (ChannelConnected != mChannelState) { |
2786 | 0 | return; |
2787 | 0 | } |
2788 | 0 | SynchronouslyClose(); |
2789 | 0 | mChannelState = ChannelError; |
2790 | 0 | PostErrorNotifyTask(); |
2791 | 0 | } |
2792 | | |
2793 | | void |
2794 | | MessageChannel::CloseWithTimeout() |
2795 | 0 | { |
2796 | 0 | AssertWorkerThread(); |
2797 | 0 |
|
2798 | 0 | MonitorAutoLock lock(*mMonitor); |
2799 | 0 | if (ChannelConnected != mChannelState) { |
2800 | 0 | return; |
2801 | 0 | } |
2802 | 0 | SynchronouslyClose(); |
2803 | 0 | mChannelState = ChannelTimeout; |
2804 | 0 | } |
2805 | | |
2806 | | void |
2807 | | MessageChannel::Close() |
2808 | 0 | { |
2809 | 0 | AssertWorkerThread(); |
2810 | 0 |
|
2811 | 0 | { |
2812 | 0 | // We don't use MonitorAutoLock here as that causes some sort of |
2813 | 0 | // deadlock in the error/timeout-with-a-listener state below when |
2814 | 0 | // compiling an optimized msvc build. |
2815 | 0 | mMonitor->Lock(); |
2816 | 0 |
|
2817 | 0 | // Instead just use a ScopeExit to manage the unlock. |
2818 | 0 | RefPtr<RefCountedMonitor> monitor(mMonitor); |
2819 | 0 | auto exit = MakeScopeExit([m = std::move(monitor)] () { |
2820 | 0 | m->Unlock(); |
2821 | 0 | }); |
2822 | 0 |
|
2823 | 0 | if (ChannelError == mChannelState || ChannelTimeout == mChannelState) { |
2824 | 0 | // See bug 538586: if the listener gets deleted while the |
2825 | 0 | // IO thread's NotifyChannelError event is still enqueued |
2826 | 0 | // and subsequently deletes us, then the error event will |
2827 | 0 | // also be deleted and the listener will never be notified |
2828 | 0 | // of the channel error. |
2829 | 0 | if (mListener) { |
2830 | 0 | exit.release(); // Explicitly unlocking, clear scope exit. |
2831 | 0 | mMonitor->Unlock(); |
2832 | 0 | NotifyMaybeChannelError(); |
2833 | 0 | } |
2834 | 0 | return; |
2835 | 0 | } |
2836 | 0 |
|
2837 | 0 | if (ChannelOpening == mChannelState) { |
2838 | 0 | // SynchronouslyClose() waits for an ack from the other side, so |
2839 | 0 | // the opening sequence should complete before this returns. |
2840 | 0 | SynchronouslyClose(); |
2841 | 0 | mChannelState = ChannelError; |
2842 | 0 | NotifyMaybeChannelError(); |
2843 | 0 | return; |
2844 | 0 | } |
2845 | 0 | |
2846 | 0 | if (ChannelClosed == mChannelState) { |
2847 | 0 | // XXX be strict about this until there's a compelling reason |
2848 | 0 | // to relax |
2849 | 0 | MOZ_CRASH("Close() called on closed channel!"); |
2850 | 0 | } |
2851 | 0 |
|
2852 | 0 | // Notify the other side that we're about to close our socket. If we've |
2853 | 0 | // already received a Goodbye from the other side (and our state is |
2854 | 0 | // ChannelClosing), there's no reason to send one. |
2855 | 0 | if (ChannelConnected == mChannelState) { |
2856 | 0 | mLink->SendMessage(new GoodbyeMessage()); |
2857 | 0 | } |
2858 | 0 | SynchronouslyClose(); |
2859 | 0 | } |
2860 | 0 |
|
2861 | 0 | NotifyChannelClosed(); |
2862 | 0 | } |
2863 | | |
2864 | | void |
2865 | | MessageChannel::NotifyChannelClosed() |
2866 | 0 | { |
2867 | 0 | mMonitor->AssertNotCurrentThreadOwns(); |
2868 | 0 |
|
2869 | 0 | if (ChannelClosed != mChannelState) |
2870 | 0 | MOZ_CRASH("channel should have been closed!"); |
2871 | 0 |
|
2872 | 0 | Clear(); |
2873 | 0 |
|
2874 | 0 | // IPDL assumes these notifications do not fire twice, so we do not let |
2875 | 0 | // that happen. |
2876 | 0 | if (mNotifiedChannelDone) { |
2877 | 0 | return; |
2878 | 0 | } |
2879 | 0 | mNotifiedChannelDone = true; |
2880 | 0 |
|
2881 | 0 | // OK, the IO thread just closed the channel normally. Let the |
2882 | 0 | // listener know about it. After this point the channel may be |
2883 | 0 | // deleted. |
2884 | 0 | mListener->OnChannelClose(); |
2885 | 0 | } |
2886 | | |
2887 | | void |
2888 | | MessageChannel::DebugAbort(const char* file, int line, const char* cond, |
2889 | | const char* why, |
2890 | | bool reply) |
2891 | 0 | { |
2892 | 0 | printf_stderr("###!!! [MessageChannel][%s][%s:%d] " |
2893 | 0 | "Assertion (%s) failed. %s %s\n", |
2894 | 0 | mSide == ChildSide ? "Child" : "Parent", |
2895 | 0 | file, line, cond, |
2896 | 0 | why, |
2897 | 0 | reply ? "(reply)" : ""); |
2898 | 0 | // technically we need the mutex for this, but we're dying anyway |
2899 | 0 | DumpInterruptStack(" "); |
2900 | 0 | printf_stderr(" remote Interrupt stack guess: %zu\n", |
2901 | 0 | mRemoteStackDepthGuess); |
2902 | 0 | printf_stderr(" deferred stack size: %zu\n", |
2903 | 0 | mDeferred.size()); |
2904 | 0 | printf_stderr(" out-of-turn Interrupt replies stack size: %zu\n", |
2905 | 0 | mOutOfTurnReplies.size()); |
2906 | 0 |
|
2907 | 0 | MessageQueue pending = std::move(mPending); |
2908 | 0 | while (!pending.isEmpty()) { |
2909 | 0 | printf_stderr(" [ %s%s ]\n", |
2910 | 0 | pending.getFirst()->Msg().is_interrupt() ? "intr" : |
2911 | 0 | (pending.getFirst()->Msg().is_sync() ? "sync" : "async"), |
2912 | 0 | pending.getFirst()->Msg().is_reply() ? "reply" : ""); |
2913 | 0 | pending.popFirst(); |
2914 | 0 | } |
2915 | 0 |
|
2916 | 0 | MOZ_CRASH_UNSAFE_OOL(why); |
2917 | 0 | } |
2918 | | |
2919 | | void |
2920 | | MessageChannel::DumpInterruptStack(const char* const pfx) const |
2921 | 0 | { |
2922 | 0 | NS_WARNING_ASSERTION( |
2923 | 0 | MessageLoop::current() != mWorkerLoop, |
2924 | 0 | "The worker thread had better be paused in a debugger!"); |
2925 | 0 |
|
2926 | 0 | printf_stderr("%sMessageChannel 'backtrace':\n", pfx); |
2927 | 0 |
|
2928 | 0 | // print a python-style backtrace, first frame to last |
2929 | 0 | for (uint32_t i = 0; i < mCxxStackFrames.length(); ++i) { |
2930 | 0 | int32_t id; |
2931 | 0 | const char* dir; |
2932 | 0 | const char* sems; |
2933 | 0 | const char* name; |
2934 | 0 | mCxxStackFrames[i].Describe(&id, &dir, &sems, &name); |
2935 | 0 |
|
2936 | 0 | printf_stderr("%s[(%u) %s %s %s(actor=%d) ]\n", pfx, |
2937 | 0 | i, dir, sems, name, id); |
2938 | 0 | } |
2939 | 0 | } |
2940 | | |
2941 | | int32_t |
2942 | | MessageChannel::GetTopmostMessageRoutingId() const |
2943 | 0 | { |
2944 | 0 | MOZ_RELEASE_ASSERT(MessageLoop::current() == mWorkerLoop); |
2945 | 0 | if (mCxxStackFrames.empty()) { |
2946 | 0 | return MSG_ROUTING_NONE; |
2947 | 0 | } |
2948 | 0 | const InterruptFrame& frame = mCxxStackFrames.back(); |
2949 | 0 | return frame.GetRoutingId(); |
2950 | 0 | } |
2951 | | |
2952 | | void |
2953 | | MessageChannel::EndTimeout() |
2954 | 0 | { |
2955 | 0 | mMonitor->AssertCurrentThreadOwns(); |
2956 | 0 |
|
2957 | 0 | IPC_LOG("Ending timeout of seqno=%d", mTimedOutMessageSeqno); |
2958 | 0 | mTimedOutMessageSeqno = 0; |
2959 | 0 | mTimedOutMessageNestedLevel = 0; |
2960 | 0 |
|
2961 | 0 | RepostAllMessages(); |
2962 | 0 | } |
2963 | | |
2964 | | void |
2965 | | MessageChannel::RepostAllMessages() |
2966 | 0 | { |
2967 | 0 | bool needRepost = false; |
2968 | 0 | for (MessageTask* task : mPending) { |
2969 | 0 | if (!task->IsScheduled()) { |
2970 | 0 | needRepost = true; |
2971 | 0 | break; |
2972 | 0 | } |
2973 | 0 | } |
2974 | 0 | if (!needRepost) { |
2975 | 0 | // If everything is already scheduled to run, do nothing. |
2976 | 0 | return; |
2977 | 0 | } |
2978 | 0 | |
2979 | 0 | // In some cases we may have deferred dispatch of some messages in the |
2980 | 0 | // queue. Now we want to run them again. However, we can't just re-post |
2981 | 0 | // those messages since the messages after them in mPending would then be |
2982 | 0 | // before them in the event queue. So instead we cancel everything and |
2983 | 0 | // re-post all messages in the correct order. |
2984 | 0 | MessageQueue queue = std::move(mPending); |
2985 | 0 | while (RefPtr<MessageTask> task = queue.popFirst()) { |
2986 | 0 | RefPtr<MessageTask> newTask = new MessageTask(this, std::move(task->Msg())); |
2987 | 0 | mPending.insertBack(newTask); |
2988 | 0 | newTask->Post(); |
2989 | 0 | } |
2990 | 0 |
|
2991 | 0 | AssertMaybeDeferredCountCorrect(); |
2992 | 0 | } |
2993 | | |
2994 | | void |
2995 | | MessageChannel::CancelTransaction(int transaction) |
2996 | 0 | { |
2997 | 0 | mMonitor->AssertCurrentThreadOwns(); |
2998 | 0 |
|
2999 | 0 | // When we cancel a transaction, we need to behave as if there's no longer |
3000 | 0 | // any IPC on the stack. Anything we were dispatching or sending will get |
3001 | 0 | // canceled. Consequently, we have to update the state variables below. |
3002 | 0 | // |
3003 | 0 | // We also need to ensure that when any IPC functions on the stack return, |
3004 | 0 | // they don't reset these values using an RAII class like AutoSetValue. To |
3005 | 0 | // avoid that, these RAII classes check if the variable they set has been |
3006 | 0 | // tampered with (by us). If so, they don't reset the variable to the old |
3007 | 0 | // value. |
3008 | 0 |
|
3009 | 0 | IPC_LOG("CancelTransaction: xid=%d", transaction); |
3010 | 0 |
|
3011 | 0 | // An unusual case: We timed out a transaction which the other side then |
3012 | 0 | // cancelled. In this case we just leave the timedout state and try to |
3013 | 0 | // forget this ever happened. |
3014 | 0 | if (transaction == mTimedOutMessageSeqno) { |
3015 | 0 | IPC_LOG("Cancelled timed out message %d", mTimedOutMessageSeqno); |
3016 | 0 | EndTimeout(); |
3017 | 0 |
|
3018 | 0 | // Normally mCurrentTransaction == 0 here. But it can be non-zero if: |
3019 | 0 | // 1. Parent sends NESTED_INSIDE_SYNC message H. |
3020 | 0 | // 2. Parent times out H. |
3021 | 0 | // 3. Child dispatches H and sends nested message H' (same transaction). |
3022 | 0 | // 4. Parent dispatches H' and cancels. |
3023 | 0 | MOZ_RELEASE_ASSERT(!mTransactionStack || mTransactionStack->TransactionID() == transaction); |
3024 | 0 | if (mTransactionStack) { |
3025 | 0 | mTransactionStack->Cancel(); |
3026 | 0 | } |
3027 | 0 | } else { |
3028 | 0 | MOZ_RELEASE_ASSERT(mTransactionStack->TransactionID() == transaction); |
3029 | 0 | mTransactionStack->Cancel(); |
3030 | 0 | } |
3031 | 0 |
|
3032 | 0 | bool foundSync = false; |
3033 | 0 | for (MessageTask* p = mPending.getFirst(); p; ) { |
3034 | 0 | Message &msg = p->Msg(); |
3035 | 0 |
|
3036 | 0 | // If there was a race between the parent and the child, then we may |
3037 | 0 | // have a queued sync message. We want to drop this message from the |
3038 | 0 | // queue since if will get cancelled along with the transaction being |
3039 | 0 | // cancelled. This happens if the message in the queue is NESTED_INSIDE_SYNC. |
3040 | 0 | if (msg.is_sync() && msg.nested_level() != IPC::Message::NOT_NESTED) { |
3041 | 0 | MOZ_RELEASE_ASSERT(!foundSync); |
3042 | 0 | MOZ_RELEASE_ASSERT(msg.transaction_id() != transaction); |
3043 | 0 | IPC_LOG("Removing msg from queue seqno=%d xid=%d", msg.seqno(), msg.transaction_id()); |
3044 | 0 | foundSync = true; |
3045 | 0 | if (!IsAlwaysDeferred(msg)) { |
3046 | 0 | mMaybeDeferredPendingCount--; |
3047 | 0 | } |
3048 | 0 | p = p->removeAndGetNext(); |
3049 | 0 | continue; |
3050 | 0 | } |
3051 | 0 |
|
3052 | 0 | p = p->getNext(); |
3053 | 0 | } |
3054 | 0 |
|
3055 | 0 | AssertMaybeDeferredCountCorrect(); |
3056 | 0 | } |
3057 | | |
3058 | | bool |
3059 | | MessageChannel::IsInTransaction() const |
3060 | 0 | { |
3061 | 0 | MonitorAutoLock lock(*mMonitor); |
3062 | 0 | return !!mTransactionStack; |
3063 | 0 | } |
3064 | | |
3065 | | void |
3066 | | MessageChannel::CancelCurrentTransaction() |
3067 | 0 | { |
3068 | 0 | MonitorAutoLock lock(*mMonitor); |
3069 | 0 | if (DispatchingSyncMessageNestedLevel() >= IPC::Message::NESTED_INSIDE_SYNC) { |
3070 | 0 | if (DispatchingSyncMessageNestedLevel() == IPC::Message::NESTED_INSIDE_CPOW || |
3071 | 0 | DispatchingAsyncMessageNestedLevel() == IPC::Message::NESTED_INSIDE_CPOW) |
3072 | 0 | { |
3073 | 0 | mListener->IntentionalCrash(); |
3074 | 0 | } |
3075 | 0 |
|
3076 | 0 | IPC_LOG("Cancel requested: current xid=%d", CurrentNestedInsideSyncTransaction()); |
3077 | 0 | MOZ_RELEASE_ASSERT(DispatchingSyncMessage()); |
3078 | 0 | CancelMessage *cancel = new CancelMessage(CurrentNestedInsideSyncTransaction()); |
3079 | 0 | CancelTransaction(CurrentNestedInsideSyncTransaction()); |
3080 | 0 | mLink->SendMessage(cancel); |
3081 | 0 | } |
3082 | 0 | } |
3083 | | |
3084 | | void |
3085 | | CancelCPOWs() |
3086 | 0 | { |
3087 | 0 | if (gParentProcessBlocker) { |
3088 | 0 | mozilla::Telemetry::Accumulate(mozilla::Telemetry::IPC_TRANSACTION_CANCEL, true); |
3089 | 0 | gParentProcessBlocker->CancelCurrentTransaction(); |
3090 | 0 | } |
3091 | 0 | } |
3092 | | |
3093 | | } // namespace ipc |
3094 | | } // namespace mozilla |