/src/mozilla-central/netwerk/base/nsSocketTransportService2.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // vim:set sw=4 sts=4 et cin: |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
4 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | #include "nsSocketTransportService2.h" |
7 | | #include "nsSocketTransport2.h" |
8 | | #include "IOActivityMonitor.h" |
9 | | #include "mozilla/IntegerPrintfMacros.h" |
10 | | #include "mozilla/Preferences.h" |
11 | | #include "nsIOService.h" |
12 | | #include "nsASocketHandler.h" |
13 | | #include "nsError.h" |
14 | | #include "prnetdb.h" |
15 | | #include "prerror.h" |
16 | | #include "nsIPrefService.h" |
17 | | #include "nsIPrefBranch.h" |
18 | | #include "nsServiceManagerUtils.h" |
19 | | #include "nsIObserverService.h" |
20 | | #include "mozilla/Atomics.h" |
21 | | #include "mozilla/Services.h" |
22 | | #include "mozilla/Likely.h" |
23 | | #include "mozilla/PublicSSL.h" |
24 | | #include "mozilla/ChaosMode.h" |
25 | | #include "mozilla/PodOperations.h" |
26 | | #include "mozilla/Telemetry.h" |
27 | | #include "nsThreadUtils.h" |
28 | | #include "nsIFile.h" |
29 | | #include "nsIWidget.h" |
30 | | |
31 | | #ifdef MOZ_TASK_TRACER |
32 | | #include "GeckoTaskTracer.h" |
33 | | #endif |
34 | | |
35 | | namespace mozilla { |
36 | | namespace net { |
37 | | |
38 | | LazyLogModule gSocketTransportLog("nsSocketTransport"); |
39 | | LazyLogModule gUDPSocketLog("UDPSocket"); |
40 | | LazyLogModule gTCPSocketLog("TCPSocket"); |
41 | | |
42 | | nsSocketTransportService *gSocketTransportService = nullptr; |
43 | | static Atomic<PRThread*, Relaxed> gSocketThread; |
44 | | |
45 | 3 | #define SEND_BUFFER_PREF "network.tcp.sendbuffer" |
46 | 3 | #define KEEPALIVE_ENABLED_PREF "network.tcp.keepalive.enabled" |
47 | 3 | #define KEEPALIVE_IDLE_TIME_PREF "network.tcp.keepalive.idle_time" |
48 | 3 | #define KEEPALIVE_RETRY_INTERVAL_PREF "network.tcp.keepalive.retry_interval" |
49 | 3 | #define KEEPALIVE_PROBE_COUNT_PREF "network.tcp.keepalive.probe_count" |
50 | 6 | #define SOCKET_LIMIT_TARGET 1000U |
51 | 3 | #define SOCKET_LIMIT_MIN 50U |
52 | 3 | #define MAX_TIME_BETWEEN_TWO_POLLS "network.sts.max_time_for_events_between_two_polls" |
53 | 3 | #define POLL_BUSY_WAIT_PERIOD "network.sts.poll_busy_wait_period" |
54 | 3 | #define POLL_BUSY_WAIT_PERIOD_TIMEOUT "network.sts.poll_busy_wait_period_timeout" |
55 | 3 | #define TELEMETRY_PREF "toolkit.telemetry.enabled" |
56 | 3 | #define MAX_TIME_FOR_PR_CLOSE_DURING_SHUTDOWN "network.sts.max_time_for_pr_close_during_shutdown" |
57 | 3 | #define POLLABLE_EVENT_TIMEOUT "network.sts.pollable_event_timeout" |
58 | 3 | #define ESNI_ENABLED "network.security.esni.enabled" |
59 | 3 | #define ESNI_DISABLED_MITM "security.pki.mitm_detected" |
60 | | |
61 | | #define REPAIR_POLLABLE_EVENT_TIME 10 |
62 | | |
63 | | uint32_t nsSocketTransportService::gMaxCount; |
64 | | PRCallOnceType nsSocketTransportService::gMaxCountInitOnce; |
65 | | |
66 | | // Utility functions |
67 | | bool |
68 | | OnSocketThread() |
69 | 12 | { |
70 | 12 | return PR_GetCurrentThread() == gSocketThread; |
71 | 12 | } |
72 | | |
73 | | //----------------------------------------------------------------------------- |
74 | | |
75 | | bool |
76 | | nsSocketTransportService::SocketContext::IsTimedOut(PRIntervalTime now) const |
77 | 0 | { |
78 | 0 | return TimeoutIn(now) == 0; |
79 | 0 | } |
80 | | |
81 | | void |
82 | | nsSocketTransportService::SocketContext::EnsureTimeout(PRIntervalTime now) |
83 | 0 | { |
84 | 0 | SOCKET_LOG(("SocketContext::EnsureTimeout socket=%p", mHandler)); |
85 | 0 | if (!mPollStartEpoch) { |
86 | 0 | SOCKET_LOG((" engaging")); |
87 | 0 | mPollStartEpoch = now; |
88 | 0 | } |
89 | 0 | } |
90 | | |
91 | | void |
92 | | nsSocketTransportService::SocketContext::DisengageTimeout() |
93 | 0 | { |
94 | 0 | SOCKET_LOG(("SocketContext::DisengageTimeout socket=%p", mHandler)); |
95 | 0 | mPollStartEpoch = 0; |
96 | 0 | } |
97 | | |
98 | | PRIntervalTime |
99 | | nsSocketTransportService::SocketContext::TimeoutIn(PRIntervalTime now) const |
100 | 0 | { |
101 | 0 | SOCKET_LOG(("SocketContext::TimeoutIn socket=%p, timeout=%us", |
102 | 0 | mHandler, mHandler->mPollTimeout)); |
103 | 0 |
|
104 | 0 | if (mHandler->mPollTimeout == UINT16_MAX || !mPollStartEpoch) { |
105 | 0 | SOCKET_LOG((" not engaged")); |
106 | 0 | return NS_SOCKET_POLL_TIMEOUT; |
107 | 0 | } |
108 | 0 |
|
109 | 0 | PRIntervalTime elapsed = (now - mPollStartEpoch); |
110 | 0 | PRIntervalTime timeout = PR_SecondsToInterval(mHandler->mPollTimeout); |
111 | 0 |
|
112 | 0 | if (elapsed >= timeout) { |
113 | 0 | SOCKET_LOG((" timed out!")); |
114 | 0 | return 0; |
115 | 0 | } |
116 | 0 | SOCKET_LOG((" remains %us", PR_IntervalToSeconds(timeout - elapsed))); |
117 | 0 | return timeout - elapsed; |
118 | 0 | } |
119 | | |
120 | | void |
121 | | nsSocketTransportService::SocketContext::MaybeResetEpoch() |
122 | 0 | { |
123 | 0 | if (mPollStartEpoch && mHandler->mPollTimeout == UINT16_MAX) { |
124 | 0 | mPollStartEpoch = 0; |
125 | 0 | } |
126 | 0 | } |
127 | | |
128 | | //----------------------------------------------------------------------------- |
129 | | // ctor/dtor (called on the main/UI thread by the service manager) |
130 | | |
131 | | nsSocketTransportService::nsSocketTransportService() |
132 | | : mThread(nullptr) |
133 | | , mLock("nsSocketTransportService::mLock") |
134 | | , mInitialized(false) |
135 | | , mShuttingDown(false) |
136 | | , mOffline(false) |
137 | | , mGoingOffline(false) |
138 | | , mRawThread(nullptr) |
139 | | , mActiveListSize(SOCKET_LIMIT_MIN) |
140 | | , mIdleListSize(SOCKET_LIMIT_MIN) |
141 | | , mActiveCount(0) |
142 | | , mIdleCount(0) |
143 | | , mSentBytesCount(0) |
144 | | , mReceivedBytesCount(0) |
145 | | , mSendBufferSize(0) |
146 | | , mKeepaliveIdleTimeS(600) |
147 | | , mKeepaliveRetryIntervalS(1) |
148 | | , mKeepaliveProbeCount(kDefaultTCPKeepCount) |
149 | | , mKeepaliveEnabledPref(false) |
150 | | , mPollableEventTimeout(TimeDuration::FromSeconds(6)) |
151 | | , mServingPendingQueue(false) |
152 | | , mMaxTimePerPollIter(100) |
153 | | , mTelemetryEnabledPref(false) |
154 | | , mMaxTimeForPrClosePref(PR_SecondsToInterval(5)) |
155 | | , mLastNetworkLinkChangeTime(0) |
156 | | , mNetworkLinkChangeBusyWaitPeriod(PR_SecondsToInterval(50)) |
157 | | , mNetworkLinkChangeBusyWaitTimeout(PR_SecondsToInterval(7)) |
158 | | , mSleepPhase(false) |
159 | | , mProbedMaxCount(false) |
160 | | #if defined(XP_WIN) |
161 | | , mPolling(false) |
162 | | #endif |
163 | | , mEsniEnabled(false) |
164 | | , mTrustedMitmDetected(false) |
165 | | , mNotTrustedMitmDetected(false) |
166 | 3 | { |
167 | 3 | NS_ASSERTION(NS_IsMainThread(), "wrong thread"); |
168 | 3 | |
169 | 3 | PR_CallOnce(&gMaxCountInitOnce, DiscoverMaxCount); |
170 | 3 | mActiveList = (SocketContext *) |
171 | 3 | moz_xmalloc(sizeof(SocketContext) * mActiveListSize); |
172 | 3 | mIdleList = (SocketContext *) |
173 | 3 | moz_xmalloc(sizeof(SocketContext) * mIdleListSize); |
174 | 3 | mPollList = (PRPollDesc *) |
175 | 3 | moz_xmalloc(sizeof(PRPollDesc) * (mActiveListSize + 1)); |
176 | 3 | |
177 | 3 | NS_ASSERTION(!gSocketTransportService, "must not instantiate twice"); |
178 | 3 | gSocketTransportService = this; |
179 | 3 | } |
180 | | |
181 | | nsSocketTransportService::~nsSocketTransportService() |
182 | 0 | { |
183 | 0 | NS_ASSERTION(NS_IsMainThread(), "wrong thread"); |
184 | 0 | NS_ASSERTION(!mInitialized, "not shutdown properly"); |
185 | 0 |
|
186 | 0 | free(mActiveList); |
187 | 0 | free(mIdleList); |
188 | 0 | free(mPollList); |
189 | 0 | gSocketTransportService = nullptr; |
190 | 0 | } |
191 | | |
192 | | //----------------------------------------------------------------------------- |
193 | | // event queue (any thread) |
194 | | |
195 | | already_AddRefed<nsIThread> |
196 | | nsSocketTransportService::GetThreadSafely() |
197 | 8 | { |
198 | 8 | MutexAutoLock lock(mLock); |
199 | 8 | nsCOMPtr<nsIThread> result = mThread; |
200 | 8 | return result.forget(); |
201 | 8 | } |
202 | | |
203 | | NS_IMETHODIMP |
204 | | nsSocketTransportService::DispatchFromScript(nsIRunnable *event, uint32_t flags) |
205 | 0 | { |
206 | 0 | nsCOMPtr<nsIRunnable> event_ref(event); |
207 | 0 | return Dispatch(event_ref.forget(), flags); |
208 | 0 | } |
209 | | |
210 | | NS_IMETHODIMP |
211 | | nsSocketTransportService::Dispatch(already_AddRefed<nsIRunnable> event, uint32_t flags) |
212 | 8 | { |
213 | 8 | nsCOMPtr<nsIRunnable> event_ref(event); |
214 | 8 | SOCKET_LOG(("STS dispatch [%p]\n", event_ref.get())); |
215 | 8 | |
216 | 8 | nsCOMPtr<nsIThread> thread = GetThreadSafely(); |
217 | 8 | nsresult rv; |
218 | 8 | rv = thread ? thread->Dispatch(event_ref.forget(), flags) : NS_ERROR_NOT_INITIALIZED; |
219 | 8 | if (rv == NS_ERROR_UNEXPECTED) { |
220 | 0 | // Thread is no longer accepting events. We must have just shut it |
221 | 0 | // down on the main thread. Pretend we never saw it. |
222 | 0 | rv = NS_ERROR_NOT_INITIALIZED; |
223 | 0 | } |
224 | 8 | return rv; |
225 | 8 | } |
226 | | |
227 | | NS_IMETHODIMP |
228 | | nsSocketTransportService::DelayedDispatch(already_AddRefed<nsIRunnable>, uint32_t) |
229 | 0 | { |
230 | 0 | return NS_ERROR_NOT_IMPLEMENTED; |
231 | 0 | } |
232 | | |
233 | | NS_IMETHODIMP |
234 | | nsSocketTransportService::IsOnCurrentThread(bool *result) |
235 | 0 | { |
236 | 0 | nsCOMPtr<nsIThread> thread = GetThreadSafely(); |
237 | 0 | NS_ENSURE_TRUE(thread, NS_ERROR_NOT_INITIALIZED); |
238 | 0 | return thread->IsOnCurrentThread(result); |
239 | 0 | } |
240 | | |
241 | | NS_IMETHODIMP_(bool) |
242 | | nsSocketTransportService::IsOnCurrentThreadInfallible() |
243 | 0 | { |
244 | 0 | nsCOMPtr<nsIThread> thread = GetThreadSafely(); |
245 | 0 | NS_ENSURE_TRUE(thread, false); |
246 | 0 | return thread->IsOnCurrentThread(); |
247 | 0 | } |
248 | | |
249 | | //----------------------------------------------------------------------------- |
250 | | // socket api (socket thread only) |
251 | | |
252 | | NS_IMETHODIMP |
253 | | nsSocketTransportService::NotifyWhenCanAttachSocket(nsIRunnable *event) |
254 | 0 | { |
255 | 0 | SOCKET_LOG(("nsSocketTransportService::NotifyWhenCanAttachSocket\n")); |
256 | 0 |
|
257 | 0 | MOZ_ASSERT(OnSocketThread(), "not on socket thread"); |
258 | 0 |
|
259 | 0 | if (CanAttachSocket()) { |
260 | 0 | return Dispatch(event, NS_DISPATCH_NORMAL); |
261 | 0 | } |
262 | 0 |
|
263 | 0 | auto *runnable = new LinkedRunnableEvent(event); |
264 | 0 | mPendingSocketQueue.insertBack(runnable); |
265 | 0 | return NS_OK; |
266 | 0 | } |
267 | | |
268 | | NS_IMETHODIMP |
269 | | nsSocketTransportService::AttachSocket(PRFileDesc *fd, nsASocketHandler *handler) |
270 | 0 | { |
271 | 0 | SOCKET_LOG(("nsSocketTransportService::AttachSocket [handler=%p]\n", handler)); |
272 | 0 |
|
273 | 0 | MOZ_ASSERT(OnSocketThread(), "not on socket thread"); |
274 | 0 |
|
275 | 0 | if (!CanAttachSocket()) { |
276 | 0 | return NS_ERROR_NOT_AVAILABLE; |
277 | 0 | } |
278 | 0 | |
279 | 0 | SocketContext sock; |
280 | 0 | sock.mFD = fd; |
281 | 0 | sock.mHandler = handler; |
282 | 0 | sock.mPollStartEpoch = 0; |
283 | 0 |
|
284 | 0 | nsresult rv = AddToIdleList(&sock); |
285 | 0 | if (NS_SUCCEEDED(rv)) |
286 | 0 | NS_ADDREF(handler); |
287 | 0 | return rv; |
288 | 0 | } |
289 | | |
290 | | // the number of sockets that can be attached at any given time is |
291 | | // limited. this is done because some operating systems (e.g., Win9x) |
292 | | // limit the number of sockets that can be created by an application. |
293 | | // AttachSocket will fail if the limit is exceeded. consumers should |
294 | | // call CanAttachSocket and check the result before creating a socket. |
295 | | |
296 | | bool |
297 | | nsSocketTransportService::CanAttachSocket() |
298 | 0 | { |
299 | 0 | static bool reported900FDLimit = false; |
300 | 0 |
|
301 | 0 | uint32_t total = mActiveCount + mIdleCount; |
302 | 0 | bool rv = total < gMaxCount; |
303 | 0 |
|
304 | 0 | if (mTelemetryEnabledPref && |
305 | 0 | (((total >= 900) || !rv) && !reported900FDLimit)) { |
306 | 0 | reported900FDLimit = true; |
307 | 0 | Telemetry::Accumulate(Telemetry::NETWORK_SESSION_AT_900FD, true); |
308 | 0 | } |
309 | 0 |
|
310 | 0 | return rv; |
311 | 0 | } |
312 | | |
313 | | nsresult |
314 | | nsSocketTransportService::DetachSocket(SocketContext *listHead, SocketContext *sock) |
315 | 0 | { |
316 | 0 | SOCKET_LOG(("nsSocketTransportService::DetachSocket [handler=%p]\n", sock->mHandler)); |
317 | 0 | MOZ_ASSERT((listHead == mActiveList) || (listHead == mIdleList), |
318 | 0 | "DetachSocket invalid head"); |
319 | 0 |
|
320 | 0 | { |
321 | | #ifdef MOZ_TASK_TRACER |
322 | | tasktracer::AutoSourceEvent taskTracerEvent(tasktracer::SourceEventType::SocketIO); |
323 | | #endif |
324 | | // inform the handler that this socket is going away |
325 | 0 | sock->mHandler->OnSocketDetached(sock->mFD); |
326 | 0 | } |
327 | 0 | mSentBytesCount += sock->mHandler->ByteCountSent(); |
328 | 0 | mReceivedBytesCount += sock->mHandler->ByteCountReceived(); |
329 | 0 |
|
330 | 0 | // cleanup |
331 | 0 | sock->mFD = nullptr; |
332 | 0 | NS_RELEASE(sock->mHandler); |
333 | 0 |
|
334 | 0 | if (listHead == mActiveList) |
335 | 0 | RemoveFromPollList(sock); |
336 | 0 | else |
337 | 0 | RemoveFromIdleList(sock); |
338 | 0 |
|
339 | 0 | // NOTE: sock is now an invalid pointer |
340 | 0 |
|
341 | 0 | // |
342 | 0 | // notify the first element on the pending socket queue... |
343 | 0 | // |
344 | 0 | nsCOMPtr<nsIRunnable> event; |
345 | 0 | LinkedRunnableEvent *runnable = mPendingSocketQueue.getFirst(); |
346 | 0 | if (runnable) { |
347 | 0 | event = runnable->TakeEvent(); |
348 | 0 | runnable->remove(); |
349 | 0 | delete runnable; |
350 | 0 | } |
351 | 0 | if (event) { |
352 | 0 | // move event from pending queue to dispatch queue |
353 | 0 | return Dispatch(event, NS_DISPATCH_NORMAL); |
354 | 0 | } |
355 | 0 | return NS_OK; |
356 | 0 | } |
357 | | |
358 | | nsresult |
359 | | nsSocketTransportService::AddToPollList(SocketContext *sock) |
360 | 0 | { |
361 | 0 | MOZ_ASSERT(!(static_cast<uint32_t>(sock - mActiveList) < mActiveListSize), |
362 | 0 | "AddToPollList Socket Already Active"); |
363 | 0 |
|
364 | 0 | SOCKET_LOG(("nsSocketTransportService::AddToPollList [handler=%p]\n", sock->mHandler)); |
365 | 0 | if (mActiveCount == mActiveListSize) { |
366 | 0 | SOCKET_LOG((" Active List size of %d met\n", mActiveCount)); |
367 | 0 | if (!GrowActiveList()) { |
368 | 0 | NS_ERROR("too many active sockets"); |
369 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
370 | 0 | } |
371 | 0 | } |
372 | 0 |
|
373 | 0 | uint32_t newSocketIndex = mActiveCount; |
374 | 0 | if (ChaosMode::isActive(ChaosFeature::NetworkScheduling)) { |
375 | 0 | newSocketIndex = ChaosMode::randomUint32LessThan(mActiveCount + 1); |
376 | 0 | PodMove(mActiveList + newSocketIndex + 1, mActiveList + newSocketIndex, |
377 | 0 | mActiveCount - newSocketIndex); |
378 | 0 | PodMove(mPollList + newSocketIndex + 2, mPollList + newSocketIndex + 1, |
379 | 0 | mActiveCount - newSocketIndex); |
380 | 0 | } |
381 | 0 |
|
382 | 0 | sock->EnsureTimeout(PR_IntervalNow()); |
383 | 0 | mActiveList[newSocketIndex] = *sock; |
384 | 0 | mActiveCount++; |
385 | 0 |
|
386 | 0 | mPollList[newSocketIndex + 1].fd = sock->mFD; |
387 | 0 | mPollList[newSocketIndex + 1].in_flags = sock->mHandler->mPollFlags; |
388 | 0 | mPollList[newSocketIndex + 1].out_flags = 0; |
389 | 0 |
|
390 | 0 | SOCKET_LOG((" active=%u idle=%u\n", mActiveCount, mIdleCount)); |
391 | 0 | return NS_OK; |
392 | 0 | } |
393 | | |
394 | | void |
395 | | nsSocketTransportService::RemoveFromPollList(SocketContext *sock) |
396 | 0 | { |
397 | 0 | SOCKET_LOG(("nsSocketTransportService::RemoveFromPollList [handler=%p]\n", sock->mHandler)); |
398 | 0 |
|
399 | 0 | uint32_t index = sock - mActiveList; |
400 | 0 | MOZ_ASSERT(index < mActiveListSize, "invalid index"); |
401 | 0 |
|
402 | 0 | SOCKET_LOG((" index=%u mActiveCount=%u\n", index, mActiveCount)); |
403 | 0 |
|
404 | 0 | if (index != mActiveCount-1) { |
405 | 0 | mActiveList[index] = mActiveList[mActiveCount-1]; |
406 | 0 | mPollList[index+1] = mPollList[mActiveCount]; |
407 | 0 | } |
408 | 0 | mActiveCount--; |
409 | 0 |
|
410 | 0 | SOCKET_LOG((" active=%u idle=%u\n", mActiveCount, mIdleCount)); |
411 | 0 | } |
412 | | |
413 | | nsresult |
414 | | nsSocketTransportService::AddToIdleList(SocketContext *sock) |
415 | 0 | { |
416 | 0 | MOZ_ASSERT(!(static_cast<uint32_t>(sock - mIdleList) < mIdleListSize), |
417 | 0 | "AddToIdlelList Socket Already Idle"); |
418 | 0 |
|
419 | 0 | SOCKET_LOG(("nsSocketTransportService::AddToIdleList [handler=%p]\n", sock->mHandler)); |
420 | 0 | if (mIdleCount == mIdleListSize) { |
421 | 0 | SOCKET_LOG((" Idle List size of %d met\n", mIdleCount)); |
422 | 0 | if (!GrowIdleList()) { |
423 | 0 | NS_ERROR("too many idle sockets"); |
424 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
425 | 0 | } |
426 | 0 | } |
427 | 0 |
|
428 | 0 | mIdleList[mIdleCount] = *sock; |
429 | 0 | mIdleCount++; |
430 | 0 |
|
431 | 0 | SOCKET_LOG((" active=%u idle=%u\n", mActiveCount, mIdleCount)); |
432 | 0 | return NS_OK; |
433 | 0 | } |
434 | | |
435 | | void |
436 | | nsSocketTransportService::RemoveFromIdleList(SocketContext *sock) |
437 | 0 | { |
438 | 0 | SOCKET_LOG(("nsSocketTransportService::RemoveFromIdleList [handler=%p]\n", sock->mHandler)); |
439 | 0 |
|
440 | 0 | uint32_t index = sock - mIdleList; |
441 | 0 | NS_ASSERTION(index < mIdleListSize, "invalid index in idle list"); |
442 | 0 |
|
443 | 0 | if (index != mIdleCount-1) |
444 | 0 | mIdleList[index] = mIdleList[mIdleCount-1]; |
445 | 0 | mIdleCount--; |
446 | 0 |
|
447 | 0 | SOCKET_LOG((" active=%u idle=%u\n", mActiveCount, mIdleCount)); |
448 | 0 | } |
449 | | |
450 | | void |
451 | | nsSocketTransportService::MoveToIdleList(SocketContext *sock) |
452 | 0 | { |
453 | 0 | nsresult rv = AddToIdleList(sock); |
454 | 0 | if (NS_FAILED(rv)) |
455 | 0 | DetachSocket(mActiveList, sock); |
456 | 0 | else |
457 | 0 | RemoveFromPollList(sock); |
458 | 0 | } |
459 | | |
460 | | void |
461 | | nsSocketTransportService::MoveToPollList(SocketContext *sock) |
462 | 0 | { |
463 | 0 | nsresult rv = AddToPollList(sock); |
464 | 0 | if (NS_FAILED(rv)) |
465 | 0 | DetachSocket(mIdleList, sock); |
466 | 0 | else |
467 | 0 | RemoveFromIdleList(sock); |
468 | 0 | } |
469 | | |
470 | | bool |
471 | | nsSocketTransportService::GrowActiveList() |
472 | 0 | { |
473 | 0 | int32_t toAdd = gMaxCount - mActiveListSize; |
474 | 0 | if (toAdd > 100) { |
475 | 0 | toAdd = 100; |
476 | 0 | } else if (toAdd < 1) { |
477 | 0 | MOZ_ASSERT(false, "CanAttachSocket() should prevent this"); |
478 | 0 | return false; |
479 | 0 | } |
480 | 0 |
|
481 | 0 | mActiveListSize += toAdd; |
482 | 0 | mActiveList = (SocketContext *) |
483 | 0 | moz_xrealloc(mActiveList, sizeof(SocketContext) * mActiveListSize); |
484 | 0 | mPollList = (PRPollDesc *) |
485 | 0 | moz_xrealloc(mPollList, sizeof(PRPollDesc) * (mActiveListSize + 1)); |
486 | 0 | return true; |
487 | 0 | } |
488 | | |
489 | | bool |
490 | | nsSocketTransportService::GrowIdleList() |
491 | 0 | { |
492 | 0 | int32_t toAdd = gMaxCount - mIdleListSize; |
493 | 0 | if (toAdd > 100) { |
494 | 0 | toAdd = 100; |
495 | 0 | } else if (toAdd < 1) { |
496 | 0 | MOZ_ASSERT(false, "CanAttachSocket() should prevent this"); |
497 | 0 | return false; |
498 | 0 | } |
499 | 0 |
|
500 | 0 | mIdleListSize += toAdd; |
501 | 0 | mIdleList = (SocketContext *) |
502 | 0 | moz_xrealloc(mIdleList, sizeof(SocketContext) * mIdleListSize); |
503 | 0 | return true; |
504 | 0 | } |
505 | | |
506 | | PRIntervalTime |
507 | | nsSocketTransportService::PollTimeout(PRIntervalTime now) |
508 | 4 | { |
509 | 4 | if (mActiveCount == 0) { |
510 | 4 | return NS_SOCKET_POLL_TIMEOUT; |
511 | 4 | } |
512 | 0 | |
513 | 0 | // compute minimum time before any socket timeout expires. |
514 | 0 | PRIntervalTime minR = NS_SOCKET_POLL_TIMEOUT; |
515 | 0 | for (uint32_t i=0; i<mActiveCount; ++i) { |
516 | 0 | const SocketContext &s = mActiveList[i]; |
517 | 0 | PRIntervalTime r = s.TimeoutIn(now); |
518 | 0 | if (r < minR) { |
519 | 0 | minR = r; |
520 | 0 | } |
521 | 0 | } |
522 | 0 | if (minR == NS_SOCKET_POLL_TIMEOUT) { |
523 | 0 | SOCKET_LOG(("poll timeout: none\n")); |
524 | 0 | return NS_SOCKET_POLL_TIMEOUT; |
525 | 0 | } |
526 | 0 | SOCKET_LOG(("poll timeout: %" PRIu32 "\n", PR_IntervalToSeconds(minR))); |
527 | 0 | return minR; |
528 | 0 | } |
529 | | |
530 | | int32_t |
531 | | nsSocketTransportService::Poll(TimeDuration *pollDuration, |
532 | | PRIntervalTime ts) |
533 | 7 | { |
534 | 7 | PRPollDesc *pollList; |
535 | 7 | uint32_t pollCount; |
536 | 7 | PRIntervalTime pollTimeout; |
537 | 7 | *pollDuration = nullptr; |
538 | 7 | |
539 | 7 | // If there are pending events for this thread then |
540 | 7 | // DoPollIteration() should service the network without blocking. |
541 | 7 | bool pendingEvents = false; |
542 | 7 | mRawThread->HasPendingEvents(&pendingEvents); |
543 | 7 | |
544 | 7 | if (mPollList[0].fd) { |
545 | 7 | mPollList[0].out_flags = 0; |
546 | 7 | pollList = mPollList; |
547 | 7 | pollCount = mActiveCount + 1; |
548 | 7 | pollTimeout = pendingEvents ? PR_INTERVAL_NO_WAIT : PollTimeout(ts); |
549 | 7 | } |
550 | 0 | else { |
551 | 0 | // no pollable event, so busy wait... |
552 | 0 | pollCount = mActiveCount; |
553 | 0 | if (pollCount) |
554 | 0 | pollList = &mPollList[1]; |
555 | 0 | else |
556 | 0 | pollList = nullptr; |
557 | 0 | pollTimeout = |
558 | 0 | pendingEvents ? PR_INTERVAL_NO_WAIT : PR_MillisecondsToInterval(25); |
559 | 0 | } |
560 | 7 | |
561 | 7 | if ((ts - mLastNetworkLinkChangeTime) < mNetworkLinkChangeBusyWaitPeriod) { |
562 | 0 | // Being here means we are few seconds after a network change has |
563 | 0 | // been detected. |
564 | 0 | PRIntervalTime to = mNetworkLinkChangeBusyWaitTimeout; |
565 | 0 | if (to) { |
566 | 0 | pollTimeout = std::min(to, pollTimeout); |
567 | 0 | SOCKET_LOG((" timeout shorthened after network change event")); |
568 | 0 | } |
569 | 0 | } |
570 | 7 | |
571 | 7 | TimeStamp pollStart; |
572 | 7 | if (mTelemetryEnabledPref) { |
573 | 7 | pollStart = TimeStamp::NowLoRes(); |
574 | 7 | } |
575 | 7 | |
576 | 7 | SOCKET_LOG((" timeout = %i milliseconds\n", |
577 | 7 | PR_IntervalToMilliseconds(pollTimeout))); |
578 | 7 | |
579 | 7 | int32_t rv = PR_Poll(pollList, pollCount, pollTimeout); |
580 | 7 | |
581 | 7 | if (mTelemetryEnabledPref && !pollStart.IsNull()) { |
582 | 4 | *pollDuration = TimeStamp::NowLoRes() - pollStart; |
583 | 4 | } |
584 | 7 | |
585 | 7 | SOCKET_LOG((" ...returned after %i milliseconds\n", |
586 | 7 | PR_IntervalToMilliseconds(PR_IntervalNow() - ts))); |
587 | 7 | |
588 | 7 | return rv; |
589 | 7 | } |
590 | | |
591 | | //----------------------------------------------------------------------------- |
592 | | // xpcom api |
593 | | |
594 | | NS_IMPL_ISUPPORTS(nsSocketTransportService, |
595 | | nsISocketTransportService, |
596 | | nsIRoutedSocketTransportService, |
597 | | nsIEventTarget, |
598 | | nsIThreadObserver, |
599 | | nsIRunnable, |
600 | | nsPISocketTransportService, |
601 | | nsIObserver) |
602 | | |
603 | | static const char* gCallbackPrefs[] = { |
604 | | SEND_BUFFER_PREF, |
605 | | KEEPALIVE_ENABLED_PREF, |
606 | | KEEPALIVE_IDLE_TIME_PREF, |
607 | | KEEPALIVE_RETRY_INTERVAL_PREF, |
608 | | KEEPALIVE_PROBE_COUNT_PREF, |
609 | | MAX_TIME_BETWEEN_TWO_POLLS, |
610 | | TELEMETRY_PREF, |
611 | | MAX_TIME_FOR_PR_CLOSE_DURING_SHUTDOWN, |
612 | | POLLABLE_EVENT_TIMEOUT, |
613 | | ESNI_ENABLED, |
614 | | ESNI_DISABLED_MITM, |
615 | | nullptr, |
616 | | }; |
617 | | |
618 | | /* static */ void |
619 | | nsSocketTransportService::PrefCallback(const char* aPref, nsSocketTransportService* aSelf) |
620 | 0 | { |
621 | 0 | aSelf->UpdatePrefs(); |
622 | 0 | } |
623 | | |
624 | | // called from main thread only |
625 | | NS_IMETHODIMP |
626 | | nsSocketTransportService::Init() |
627 | 6 | { |
628 | 6 | if (!NS_IsMainThread()) { |
629 | 0 | NS_ERROR("wrong thread"); |
630 | 0 | return NS_ERROR_UNEXPECTED; |
631 | 0 | } |
632 | 6 | |
633 | 6 | if (mInitialized) |
634 | 3 | return NS_OK; |
635 | 3 | |
636 | 3 | if (mShuttingDown) |
637 | 0 | return NS_ERROR_UNEXPECTED; |
638 | 3 | |
639 | 3 | nsCOMPtr<nsIThread> thread; |
640 | 3 | nsresult rv = NS_NewNamedThread("Socket Thread", getter_AddRefs(thread), this); |
641 | 3 | if (NS_FAILED(rv)) return rv; |
642 | 3 | |
643 | 3 | { |
644 | 3 | MutexAutoLock lock(mLock); |
645 | 3 | // Install our mThread, protecting against concurrent readers |
646 | 3 | thread.swap(mThread); |
647 | 3 | } |
648 | 3 | |
649 | 3 | Preferences::RegisterCallbacks(PrefCallback, gCallbackPrefs, this); |
650 | 3 | UpdatePrefs(); |
651 | 3 | |
652 | 3 | nsCOMPtr<nsIObserverService> obsSvc = services::GetObserverService(); |
653 | 3 | if (obsSvc) { |
654 | 3 | obsSvc->AddObserver(this, "profile-initial-state", false); |
655 | 3 | obsSvc->AddObserver(this, "last-pb-context-exited", false); |
656 | 3 | obsSvc->AddObserver(this, NS_WIDGET_SLEEP_OBSERVER_TOPIC, true); |
657 | 3 | obsSvc->AddObserver(this, NS_WIDGET_WAKE_OBSERVER_TOPIC, true); |
658 | 3 | obsSvc->AddObserver(this, "xpcom-shutdown-threads", false); |
659 | 3 | obsSvc->AddObserver(this, NS_NETWORK_LINK_TOPIC, false); |
660 | 3 | } |
661 | 3 | |
662 | 3 | mInitialized = true; |
663 | 3 | return NS_OK; |
664 | 3 | } |
665 | | |
666 | | // called from main thread only |
667 | | NS_IMETHODIMP |
668 | | nsSocketTransportService::Shutdown(bool aXpcomShutdown) |
669 | 0 | { |
670 | 0 | SOCKET_LOG(("nsSocketTransportService::Shutdown\n")); |
671 | 0 |
|
672 | 0 | NS_ENSURE_STATE(NS_IsMainThread()); |
673 | 0 |
|
674 | 0 | if (!mInitialized) |
675 | 0 | return NS_OK; |
676 | 0 | |
677 | 0 | if (mShuttingDown) |
678 | 0 | return NS_ERROR_UNEXPECTED; |
679 | 0 | |
680 | 0 | { |
681 | 0 | MutexAutoLock lock(mLock); |
682 | 0 |
|
683 | 0 | // signal the socket thread to shutdown |
684 | 0 | mShuttingDown = true; |
685 | 0 |
|
686 | 0 | if (mPollableEvent) { |
687 | 0 | mPollableEvent->Signal(); |
688 | 0 | } |
689 | 0 | } |
690 | 0 |
|
691 | 0 | if (!aXpcomShutdown) { |
692 | 0 | return ShutdownThread(); |
693 | 0 | } |
694 | 0 | |
695 | 0 | return NS_OK; |
696 | 0 | } |
697 | | |
698 | | nsresult |
699 | | nsSocketTransportService::ShutdownThread() |
700 | 0 | { |
701 | 0 | SOCKET_LOG(("nsSocketTransportService::ShutdownThread\n")); |
702 | 0 |
|
703 | 0 | NS_ENSURE_STATE(NS_IsMainThread()); |
704 | 0 |
|
705 | 0 | if (!mInitialized || !mShuttingDown) |
706 | 0 | return NS_OK; |
707 | 0 | |
708 | 0 | // join with thread |
709 | 0 | mThread->Shutdown(); |
710 | 0 | { |
711 | 0 | MutexAutoLock lock(mLock); |
712 | 0 | // Drop our reference to mThread and make sure that any concurrent |
713 | 0 | // readers are excluded |
714 | 0 | mThread = nullptr; |
715 | 0 | } |
716 | 0 |
|
717 | 0 | Preferences::UnregisterCallbacks(PrefCallback, gCallbackPrefs, this); |
718 | 0 |
|
719 | 0 | nsCOMPtr<nsIObserverService> obsSvc = services::GetObserverService(); |
720 | 0 | if (obsSvc) { |
721 | 0 | obsSvc->RemoveObserver(this, "profile-initial-state"); |
722 | 0 | obsSvc->RemoveObserver(this, "last-pb-context-exited"); |
723 | 0 | obsSvc->RemoveObserver(this, NS_WIDGET_SLEEP_OBSERVER_TOPIC); |
724 | 0 | obsSvc->RemoveObserver(this, NS_WIDGET_WAKE_OBSERVER_TOPIC); |
725 | 0 | obsSvc->RemoveObserver(this, "xpcom-shutdown-threads"); |
726 | 0 | obsSvc->RemoveObserver(this, NS_NETWORK_LINK_TOPIC); |
727 | 0 | } |
728 | 0 |
|
729 | 0 | if (mAfterWakeUpTimer) { |
730 | 0 | mAfterWakeUpTimer->Cancel(); |
731 | 0 | mAfterWakeUpTimer = nullptr; |
732 | 0 | } |
733 | 0 |
|
734 | 0 | IOActivityMonitor::Shutdown(); |
735 | 0 |
|
736 | 0 | mInitialized = false; |
737 | 0 | mShuttingDown = false; |
738 | 0 |
|
739 | 0 | return NS_OK; |
740 | 0 | } |
741 | | |
742 | | NS_IMETHODIMP |
743 | | nsSocketTransportService::GetOffline(bool *offline) |
744 | 0 | { |
745 | 0 | *offline = mOffline; |
746 | 0 | return NS_OK; |
747 | 0 | } |
748 | | |
749 | | NS_IMETHODIMP |
750 | | nsSocketTransportService::SetOffline(bool offline) |
751 | 3 | { |
752 | 3 | MutexAutoLock lock(mLock); |
753 | 3 | if (!mOffline && offline) { |
754 | 0 | // signal the socket thread to go offline, so it will detach sockets |
755 | 0 | mGoingOffline = true; |
756 | 0 | mOffline = true; |
757 | 0 | } |
758 | 3 | else if (mOffline && !offline) { |
759 | 0 | mOffline = false; |
760 | 0 | } |
761 | 3 | if (mPollableEvent) { |
762 | 0 | mPollableEvent->Signal(); |
763 | 0 | } |
764 | 3 | |
765 | 3 | return NS_OK; |
766 | 3 | } |
767 | | |
768 | | NS_IMETHODIMP |
769 | | nsSocketTransportService::GetKeepaliveIdleTime(int32_t *aKeepaliveIdleTimeS) |
770 | 0 | { |
771 | 0 | MOZ_ASSERT(aKeepaliveIdleTimeS); |
772 | 0 | if (NS_WARN_IF(!aKeepaliveIdleTimeS)) { |
773 | 0 | return NS_ERROR_NULL_POINTER; |
774 | 0 | } |
775 | 0 | *aKeepaliveIdleTimeS = mKeepaliveIdleTimeS; |
776 | 0 | return NS_OK; |
777 | 0 | } |
778 | | |
779 | | NS_IMETHODIMP |
780 | | nsSocketTransportService::GetKeepaliveRetryInterval(int32_t *aKeepaliveRetryIntervalS) |
781 | 0 | { |
782 | 0 | MOZ_ASSERT(aKeepaliveRetryIntervalS); |
783 | 0 | if (NS_WARN_IF(!aKeepaliveRetryIntervalS)) { |
784 | 0 | return NS_ERROR_NULL_POINTER; |
785 | 0 | } |
786 | 0 | *aKeepaliveRetryIntervalS = mKeepaliveRetryIntervalS; |
787 | 0 | return NS_OK; |
788 | 0 | } |
789 | | |
790 | | NS_IMETHODIMP |
791 | | nsSocketTransportService::GetKeepaliveProbeCount(int32_t *aKeepaliveProbeCount) |
792 | 0 | { |
793 | 0 | MOZ_ASSERT(aKeepaliveProbeCount); |
794 | 0 | if (NS_WARN_IF(!aKeepaliveProbeCount)) { |
795 | 0 | return NS_ERROR_NULL_POINTER; |
796 | 0 | } |
797 | 0 | *aKeepaliveProbeCount = mKeepaliveProbeCount; |
798 | 0 | return NS_OK; |
799 | 0 | } |
800 | | |
801 | | NS_IMETHODIMP |
802 | | nsSocketTransportService::CreateTransport(const char **types, |
803 | | uint32_t typeCount, |
804 | | const nsACString &host, |
805 | | int32_t port, |
806 | | nsIProxyInfo *proxyInfo, |
807 | | nsISocketTransport **result) |
808 | 0 | { |
809 | 0 | return CreateRoutedTransport(types, typeCount, host, port, NS_LITERAL_CSTRING(""), 0, |
810 | 0 | proxyInfo, result); |
811 | 0 | } |
812 | | |
813 | | NS_IMETHODIMP |
814 | | nsSocketTransportService::CreateRoutedTransport(const char **types, |
815 | | uint32_t typeCount, |
816 | | const nsACString &host, |
817 | | int32_t port, |
818 | | const nsACString &hostRoute, |
819 | | int32_t portRoute, |
820 | | nsIProxyInfo *proxyInfo, |
821 | | nsISocketTransport **result) |
822 | 0 | { |
823 | 0 | NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_INITIALIZED); |
824 | 0 | NS_ENSURE_TRUE(port >= 0 && port <= 0xFFFF, NS_ERROR_ILLEGAL_VALUE); |
825 | 0 |
|
826 | 0 | RefPtr<nsSocketTransport> trans = new nsSocketTransport(); |
827 | 0 | nsresult rv = trans->Init(types, typeCount, host, port, hostRoute, portRoute, proxyInfo); |
828 | 0 | if (NS_FAILED(rv)) { |
829 | 0 | return rv; |
830 | 0 | } |
831 | 0 | |
832 | 0 | trans.forget(result); |
833 | 0 | return NS_OK; |
834 | 0 | } |
835 | | |
836 | | NS_IMETHODIMP |
837 | | nsSocketTransportService::CreateUnixDomainTransport(nsIFile *aPath, |
838 | | nsISocketTransport **result) |
839 | 0 | { |
840 | 0 | #ifdef XP_UNIX |
841 | 0 | nsresult rv; |
842 | 0 |
|
843 | 0 | NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_INITIALIZED); |
844 | 0 |
|
845 | 0 | nsAutoCString path; |
846 | 0 | rv = aPath->GetNativePath(path); |
847 | 0 | if (NS_FAILED(rv)) |
848 | 0 | return rv; |
849 | 0 | |
850 | 0 | RefPtr<nsSocketTransport> trans = new nsSocketTransport(); |
851 | 0 |
|
852 | 0 | rv = trans->InitWithFilename(path.get()); |
853 | 0 | if (NS_FAILED(rv)) |
854 | 0 | return rv; |
855 | 0 | |
856 | 0 | trans.forget(result); |
857 | 0 | return NS_OK; |
858 | | #else |
859 | | return NS_ERROR_SOCKET_ADDRESS_NOT_SUPPORTED; |
860 | | #endif |
861 | | } |
862 | | |
863 | | NS_IMETHODIMP |
864 | | nsSocketTransportService::CreateUnixDomainAbstractAddressTransport( |
865 | | const nsACString& aName, |
866 | | nsISocketTransport **result) |
867 | 0 | { |
868 | 0 | // Abstract socket address is supported on Linux only |
869 | 0 | #ifdef XP_LINUX |
870 | 0 | RefPtr<nsSocketTransport> trans = new nsSocketTransport(); |
871 | 0 | // First character of Abstract socket address is null |
872 | 0 | UniquePtr<char[]> name(new char[aName.Length() + 1]); |
873 | 0 | *(name.get()) = 0; |
874 | 0 | memcpy(name.get() + 1, aName.BeginReading(), aName.Length()); |
875 | 0 | nsresult rv = trans->InitWithName(name.get(), aName.Length() + 1); |
876 | 0 | if (NS_FAILED(rv)) { |
877 | 0 | return rv; |
878 | 0 | } |
879 | 0 | |
880 | 0 | trans.forget(result); |
881 | 0 | return NS_OK; |
882 | | #else |
883 | | return NS_ERROR_SOCKET_ADDRESS_NOT_SUPPORTED; |
884 | | #endif |
885 | | } |
886 | | |
887 | | NS_IMETHODIMP |
888 | | nsSocketTransportService::OnDispatchedEvent() |
889 | 5 | { |
890 | 5 | #ifndef XP_WIN |
891 | 5 | // On windows poll can hang and this became worse when we introduced the |
892 | 5 | // patch for bug 698882 (see also bug 1292181), therefore we reverted the |
893 | 5 | // behavior on windows to be as before bug 698882, e.g. write to the socket |
894 | 5 | // also if an event dispatch is on the socket thread and writing to the |
895 | 5 | // socket for each event. |
896 | 5 | if (OnSocketThread()) { |
897 | 4 | // this check is redundant to one done inside ::Signal(), but |
898 | 4 | // we can do it here and skip obtaining the lock - given that |
899 | 4 | // this is a relatively common occurance its worth the |
900 | 4 | // redundant code |
901 | 4 | SOCKET_LOG(("OnDispatchedEvent Same Thread Skip Signal\n")); |
902 | 4 | return NS_OK; |
903 | 4 | } |
904 | | #else |
905 | | if (gIOService->IsNetTearingDown()) { |
906 | | // Poll can hang sometimes. If we are in shutdown, we are going to |
907 | | // start a watchdog. If we do not exit poll within |
908 | | // REPAIR_POLLABLE_EVENT_TIME signal a pollable event again. |
909 | | StartPollWatchdog(); |
910 | | } |
911 | | #endif |
912 | | |
913 | 1 | MutexAutoLock lock(mLock); |
914 | 1 | if (mPollableEvent) { |
915 | 1 | mPollableEvent->Signal(); |
916 | 1 | } |
917 | 1 | return NS_OK; |
918 | 1 | } |
919 | | |
920 | | NS_IMETHODIMP |
921 | | nsSocketTransportService::OnProcessNextEvent(nsIThreadInternal *thread, |
922 | | bool mayWait) |
923 | 8 | { |
924 | 8 | return NS_OK; |
925 | 8 | } |
926 | | |
927 | | NS_IMETHODIMP |
928 | | nsSocketTransportService::AfterProcessNextEvent(nsIThreadInternal* thread, |
929 | | bool eventWasProcessed) |
930 | 8 | { |
931 | 8 | return NS_OK; |
932 | 8 | } |
933 | | |
934 | | void |
935 | | nsSocketTransportService::MarkTheLastElementOfPendingQueue() |
936 | 4 | { |
937 | 4 | mServingPendingQueue = false; |
938 | 4 | } |
939 | | |
940 | | NS_IMETHODIMP |
941 | | nsSocketTransportService::Run() |
942 | 3 | { |
943 | 3 | SOCKET_LOG(("STS thread init %d sockets\n", gMaxCount)); |
944 | 3 | |
945 | | #if defined(XP_WIN) |
946 | | // see bug 1361495, gethostname() triggers winsock initialization. |
947 | | // so do it here (on parent and child) to protect against it being done first |
948 | | // accidentally on the main thread.. especially via PR_GetSystemInfo(). This |
949 | | // will also improve latency of first real winsock operation |
950 | | // .. |
951 | | // If STS-thread is no longer needed this should still be run before exiting |
952 | | |
953 | | char ignoredStackBuffer[255]; |
954 | | Unused << gethostname(ignoredStackBuffer, 255); |
955 | | #endif |
956 | | |
957 | 3 | psm::InitializeSSLServerCertVerificationThreads(); |
958 | 3 | |
959 | 3 | gSocketThread = PR_GetCurrentThread(); |
960 | 3 | |
961 | 3 | { |
962 | 3 | MutexAutoLock lock(mLock); |
963 | 3 | mPollableEvent.reset(new PollableEvent()); |
964 | 3 | // |
965 | 3 | // NOTE: per bug 190000, this failure could be caused by Zone-Alarm |
966 | 3 | // or similar software. |
967 | 3 | // |
968 | 3 | // NOTE: per bug 191739, this failure could also be caused by lack |
969 | 3 | // of a loopback device on Windows and OS/2 platforms (it creates |
970 | 3 | // a loopback socket pair on these platforms to implement a pollable |
971 | 3 | // event object). if we can't create a pollable event, then we'll |
972 | 3 | // have to "busy wait" to implement the socket event queue :-( |
973 | 3 | // |
974 | 3 | if (!mPollableEvent->Valid()) { |
975 | 0 | mPollableEvent = nullptr; |
976 | 0 | NS_WARNING("running socket transport thread without a pollable event"); |
977 | 0 | SOCKET_LOG(("running socket transport thread without a pollable event")); |
978 | 0 | } |
979 | 3 | |
980 | 3 | mPollList[0].fd = mPollableEvent ? mPollableEvent->PollableFD() : nullptr; |
981 | 3 | mPollList[0].in_flags = PR_POLL_READ | PR_POLL_EXCEPT; |
982 | 3 | mPollList[0].out_flags = 0; |
983 | 3 | } |
984 | 3 | |
985 | 3 | mRawThread = NS_GetCurrentThread(); |
986 | 3 | |
987 | 3 | // hook ourselves up to observe event processing for this thread |
988 | 3 | nsCOMPtr<nsIThreadInternal> threadInt = do_QueryInterface(mRawThread); |
989 | 3 | threadInt->SetObserver(this); |
990 | 3 | |
991 | 3 | // make sure the pseudo random number generator is seeded on this thread |
992 | 3 | srand(static_cast<unsigned>(PR_Now())); |
993 | 3 | |
994 | 3 | // For the calculation of the duration of the last cycle (i.e. the last for-loop |
995 | 3 | // iteration before shutdown). |
996 | 3 | TimeStamp startOfCycleForLastCycleCalc; |
997 | 3 | int numberOfPendingEventsLastCycle; |
998 | 3 | |
999 | 3 | // For measuring of the poll iteration duration without time spent blocked |
1000 | 3 | // in poll(). |
1001 | 3 | TimeStamp pollCycleStart; |
1002 | 3 | // Time blocked in poll(). |
1003 | 3 | TimeDuration singlePollDuration; |
1004 | 3 | |
1005 | 3 | // For calculating the time needed for a new element to run. |
1006 | 3 | TimeStamp startOfIteration; |
1007 | 3 | TimeStamp startOfNextIteration; |
1008 | 3 | int numberOfPendingEvents; |
1009 | 3 | |
1010 | 3 | // If there is too many pending events queued, we will run some poll() |
1011 | 3 | // between them and the following variable is cumulative time spent |
1012 | 3 | // blocking in poll(). |
1013 | 3 | TimeDuration pollDuration; |
1014 | 3 | |
1015 | 7 | for (;;) { |
1016 | 7 | bool pendingEvents = false; |
1017 | 7 | |
1018 | 7 | numberOfPendingEvents = 0; |
1019 | 7 | numberOfPendingEventsLastCycle = 0; |
1020 | 7 | if (mTelemetryEnabledPref) { |
1021 | 7 | startOfCycleForLastCycleCalc = TimeStamp::NowLoRes(); |
1022 | 7 | startOfNextIteration = TimeStamp::NowLoRes(); |
1023 | 7 | } |
1024 | 7 | pollDuration = nullptr; |
1025 | 7 | |
1026 | 7 | do { |
1027 | 7 | if (mTelemetryEnabledPref) { |
1028 | 7 | pollCycleStart = TimeStamp::NowLoRes(); |
1029 | 7 | } |
1030 | 7 | |
1031 | 7 | DoPollIteration(&singlePollDuration); |
1032 | 7 | |
1033 | 7 | if (mTelemetryEnabledPref && !pollCycleStart.IsNull()) { |
1034 | 4 | Telemetry::Accumulate(Telemetry::STS_POLL_BLOCK_TIME, |
1035 | 4 | singlePollDuration.ToMilliseconds()); |
1036 | 4 | Telemetry::AccumulateTimeDelta( |
1037 | 4 | Telemetry::STS_POLL_CYCLE, |
1038 | 4 | pollCycleStart + singlePollDuration, |
1039 | 4 | TimeStamp::NowLoRes()); |
1040 | 4 | pollDuration += singlePollDuration; |
1041 | 4 | } |
1042 | 7 | |
1043 | 7 | mRawThread->HasPendingEvents(&pendingEvents); |
1044 | 7 | if (pendingEvents) { |
1045 | 4 | if (!mServingPendingQueue) { |
1046 | 4 | nsresult rv = Dispatch( |
1047 | 4 | NewRunnableMethod("net::nsSocketTransportService::" |
1048 | 4 | "MarkTheLastElementOfPendingQueue", |
1049 | 4 | this, |
1050 | 4 | &nsSocketTransportService:: |
1051 | 4 | MarkTheLastElementOfPendingQueue), |
1052 | 4 | nsIEventTarget::DISPATCH_NORMAL); |
1053 | 4 | if (NS_FAILED(rv)) { |
1054 | 0 | NS_WARNING("Could not dispatch a new event on the " |
1055 | 0 | "socket thread."); |
1056 | 4 | } else { |
1057 | 4 | mServingPendingQueue = true; |
1058 | 4 | } |
1059 | 4 | |
1060 | 4 | if (mTelemetryEnabledPref) { |
1061 | 4 | startOfIteration = startOfNextIteration; |
1062 | 4 | // Everything that comes after this point will |
1063 | 4 | // be served in the next iteration. If no even |
1064 | 4 | // arrives, startOfNextIteration will be reset at the |
1065 | 4 | // beginning of each for-loop. |
1066 | 4 | startOfNextIteration = TimeStamp::NowLoRes(); |
1067 | 4 | } |
1068 | 4 | } |
1069 | 4 | TimeStamp eventQueueStart = TimeStamp::NowLoRes(); |
1070 | 8 | do { |
1071 | 8 | NS_ProcessNextEvent(mRawThread); |
1072 | 8 | numberOfPendingEvents++; |
1073 | 8 | pendingEvents = false; |
1074 | 8 | mRawThread->HasPendingEvents(&pendingEvents); |
1075 | 8 | } while (pendingEvents && mServingPendingQueue && |
1076 | 8 | ((TimeStamp::NowLoRes() - |
1077 | 4 | eventQueueStart).ToMilliseconds() < |
1078 | 4 | mMaxTimePerPollIter)); |
1079 | 4 | |
1080 | 4 | if (mTelemetryEnabledPref && !mServingPendingQueue && |
1081 | 4 | !startOfIteration.IsNull()) { |
1082 | 4 | Telemetry::AccumulateTimeDelta( |
1083 | 4 | Telemetry::STS_POLL_AND_EVENTS_CYCLE, |
1084 | 4 | startOfIteration + pollDuration, |
1085 | 4 | TimeStamp::NowLoRes()); |
1086 | 4 | |
1087 | 4 | Telemetry::Accumulate( |
1088 | 4 | Telemetry::STS_NUMBER_OF_PENDING_EVENTS, |
1089 | 4 | numberOfPendingEvents); |
1090 | 4 | |
1091 | 4 | numberOfPendingEventsLastCycle += numberOfPendingEvents; |
1092 | 4 | numberOfPendingEvents = 0; |
1093 | 4 | pollDuration = nullptr; |
1094 | 4 | } |
1095 | 4 | } |
1096 | 7 | } while (pendingEvents); |
1097 | 7 | |
1098 | 7 | bool goingOffline = false; |
1099 | 7 | // now that our event queue is empty, check to see if we should exit |
1100 | 7 | { |
1101 | 7 | MutexAutoLock lock(mLock); |
1102 | 7 | if (mShuttingDown) { |
1103 | 0 | if (mTelemetryEnabledPref && |
1104 | 0 | !startOfCycleForLastCycleCalc.IsNull()) { |
1105 | 0 | Telemetry::Accumulate( |
1106 | 0 | Telemetry::STS_NUMBER_OF_PENDING_EVENTS_IN_THE_LAST_CYCLE, |
1107 | 0 | numberOfPendingEventsLastCycle); |
1108 | 0 | Telemetry::AccumulateTimeDelta( |
1109 | 0 | Telemetry::STS_POLL_AND_EVENT_THE_LAST_CYCLE, |
1110 | 0 | startOfCycleForLastCycleCalc, |
1111 | 0 | TimeStamp::NowLoRes()); |
1112 | 0 | } |
1113 | 0 | break; |
1114 | 0 | } |
1115 | 7 | if (mGoingOffline) { |
1116 | 0 | mGoingOffline = false; |
1117 | 0 | goingOffline = true; |
1118 | 0 | } |
1119 | 7 | } |
1120 | 7 | // Avoid potential deadlock |
1121 | 7 | if (goingOffline) |
1122 | 0 | Reset(true); |
1123 | 7 | } |
1124 | 3 | |
1125 | 3 | SOCKET_LOG(("STS shutting down thread\n")); |
1126 | 3 | |
1127 | 3 | // detach all sockets, including locals |
1128 | 3 | Reset(false); |
1129 | 3 | |
1130 | 3 | // Final pass over the event queue. This makes sure that events posted by |
1131 | 3 | // socket detach handlers get processed. |
1132 | 3 | NS_ProcessPendingEvents(mRawThread); |
1133 | 3 | |
1134 | 3 | // Stopping the SLL threads can generate new events, so we need to |
1135 | 3 | // process them before nulling out gSocketThread, otherwise we can get |
1136 | 3 | // !onSocketThread assertions. |
1137 | 3 | psm::StopSSLServerCertVerificationThreads(); |
1138 | 3 | NS_ProcessPendingEvents(mRawThread); |
1139 | 3 | |
1140 | 3 | gSocketThread = nullptr; |
1141 | 3 | |
1142 | 3 | SOCKET_LOG(("STS thread exit\n")); |
1143 | 3 | |
1144 | 3 | return NS_OK; |
1145 | 3 | } |
1146 | | |
1147 | | void |
1148 | | nsSocketTransportService::DetachSocketWithGuard(bool aGuardLocals, |
1149 | | SocketContext *socketList, |
1150 | | int32_t index) |
1151 | 0 | { |
1152 | 0 | bool isGuarded = false; |
1153 | 0 | if (aGuardLocals) { |
1154 | 0 | socketList[index].mHandler->IsLocal(&isGuarded); |
1155 | 0 | if (!isGuarded) |
1156 | 0 | socketList[index].mHandler->KeepWhenOffline(&isGuarded); |
1157 | 0 | } |
1158 | 0 | if (!isGuarded) |
1159 | 0 | DetachSocket(socketList, &socketList[index]); |
1160 | 0 | } |
1161 | | |
1162 | | void |
1163 | | nsSocketTransportService::Reset(bool aGuardLocals) |
1164 | 0 | { |
1165 | 0 | // detach any sockets |
1166 | 0 | int32_t i; |
1167 | 0 | for (i = mActiveCount - 1; i >= 0; --i) { |
1168 | 0 | DetachSocketWithGuard(aGuardLocals, mActiveList, i); |
1169 | 0 | } |
1170 | 0 | for (i = mIdleCount - 1; i >= 0; --i) { |
1171 | 0 | DetachSocketWithGuard(aGuardLocals, mIdleList, i); |
1172 | 0 | } |
1173 | 0 | } |
1174 | | |
1175 | | nsresult |
1176 | | nsSocketTransportService::DoPollIteration(TimeDuration *pollDuration) |
1177 | 7 | { |
1178 | 7 | SOCKET_LOG(("STS poll iter\n")); |
1179 | 7 | |
1180 | 7 | PRIntervalTime now = PR_IntervalNow(); |
1181 | 7 | |
1182 | 7 | int32_t i, count; |
1183 | 7 | // |
1184 | 7 | // poll loop |
1185 | 7 | // |
1186 | 7 | // walk active list backwards to see if any sockets should actually be |
1187 | 7 | // idle, then walk the idle list backwards to see if any idle sockets |
1188 | 7 | // should become active. take care to check only idle sockets that |
1189 | 7 | // were idle to begin with ;-) |
1190 | 7 | // |
1191 | 7 | count = mIdleCount; |
1192 | 7 | for (i=mActiveCount-1; i>=0; --i) { |
1193 | 0 | //--- |
1194 | 0 | SOCKET_LOG((" active [%u] { handler=%p condition=%" PRIx32 " pollflags=%hu }\n", i, |
1195 | 0 | mActiveList[i].mHandler, |
1196 | 0 | static_cast<uint32_t>(mActiveList[i].mHandler->mCondition), |
1197 | 0 | mActiveList[i].mHandler->mPollFlags)); |
1198 | 0 | //--- |
1199 | 0 | if (NS_FAILED(mActiveList[i].mHandler->mCondition)) { |
1200 | 0 | DetachSocket(mActiveList, &mActiveList[i]); |
1201 | 0 | } else { |
1202 | 0 | uint16_t in_flags = mActiveList[i].mHandler->mPollFlags; |
1203 | 0 | if (in_flags == 0) { |
1204 | 0 | MoveToIdleList(&mActiveList[i]); |
1205 | 0 | } else { |
1206 | 0 | // update poll flags |
1207 | 0 | mPollList[i+1].in_flags = in_flags; |
1208 | 0 | mPollList[i+1].out_flags = 0; |
1209 | 0 | mActiveList[i].EnsureTimeout(now); |
1210 | 0 | } |
1211 | 0 | } |
1212 | 0 | } |
1213 | 7 | for (i=count-1; i>=0; --i) { |
1214 | 0 | //--- |
1215 | 0 | SOCKET_LOG((" idle [%u] { handler=%p condition=%" PRIx32 " pollflags=%hu }\n", i, |
1216 | 0 | mIdleList[i].mHandler, |
1217 | 0 | static_cast<uint32_t>(mIdleList[i].mHandler->mCondition), |
1218 | 0 | mIdleList[i].mHandler->mPollFlags)); |
1219 | 0 | //--- |
1220 | 0 | if (NS_FAILED(mIdleList[i].mHandler->mCondition)) |
1221 | 0 | DetachSocket(mIdleList, &mIdleList[i]); |
1222 | 0 | else if (mIdleList[i].mHandler->mPollFlags != 0) |
1223 | 0 | MoveToPollList(&mIdleList[i]); |
1224 | 0 | } |
1225 | 7 | |
1226 | 7 | { |
1227 | 7 | MutexAutoLock lock(mLock); |
1228 | 7 | if (mPollableEvent) { |
1229 | 7 | // we want to make sure the timeout is measured from the time |
1230 | 7 | // we enter poll(). This method resets the timestamp to 'now', |
1231 | 7 | // if we were first signalled between leaving poll() and here. |
1232 | 7 | // If we didn't do this and processing events took longer than |
1233 | 7 | // the allowed signal timeout, we would detect it as a |
1234 | 7 | // false-positive. AdjustFirstSignalTimestamp is then a no-op |
1235 | 7 | // until mPollableEvent->Clear() is called. |
1236 | 7 | mPollableEvent->AdjustFirstSignalTimestamp(); |
1237 | 7 | } |
1238 | 7 | } |
1239 | 7 | |
1240 | 7 | SOCKET_LOG((" calling PR_Poll [active=%u idle=%u]\n", mActiveCount, mIdleCount)); |
1241 | 7 | |
1242 | | #if defined(XP_WIN) |
1243 | | // 30 active connections is the historic limit before firefox 7's 256. A few |
1244 | | // windows systems have troubles with the higher limit, so actively probe a |
1245 | | // limit the first time we exceed 30. |
1246 | | if ((mActiveCount > 30) && !mProbedMaxCount) |
1247 | | ProbeMaxCount(); |
1248 | | #endif |
1249 | | |
1250 | 7 | // Measures seconds spent while blocked on PR_Poll |
1251 | 7 | int32_t n = 0; |
1252 | 7 | *pollDuration = nullptr; |
1253 | 7 | |
1254 | 7 | if (!gIOService->IsNetTearingDown()) { |
1255 | 7 | // Let's not do polling during shutdown. |
1256 | | #if defined(XP_WIN) |
1257 | | StartPolling(); |
1258 | | #endif |
1259 | | n = Poll(pollDuration, now); |
1260 | | #if defined(XP_WIN) |
1261 | | EndPolling(); |
1262 | | #endif |
1263 | | } |
1264 | 7 | |
1265 | 7 | now = PR_IntervalNow(); |
1266 | 7 | |
1267 | 7 | if (n < 0) { |
1268 | 0 | SOCKET_LOG((" PR_Poll error [%d] os error [%d]\n", PR_GetError(), |
1269 | 0 | PR_GetOSError())); |
1270 | 0 | } |
1271 | 7 | else { |
1272 | 7 | // |
1273 | 7 | // service "active" sockets... |
1274 | 7 | // |
1275 | 7 | uint32_t numberOfOnSocketReadyCalls = 0; |
1276 | 7 | for (i=0; i<int32_t(mActiveCount); ++i) { |
1277 | 0 | PRPollDesc &desc = mPollList[i+1]; |
1278 | 0 | SocketContext &s = mActiveList[i]; |
1279 | 0 | if (n > 0 && desc.out_flags != 0) { |
1280 | | #ifdef MOZ_TASK_TRACER |
1281 | | tasktracer::AutoSourceEvent taskTracerEvent(tasktracer::SourceEventType::SocketIO); |
1282 | | #endif |
1283 | | s.DisengageTimeout(); |
1284 | 0 | s.mHandler->OnSocketReady(desc.fd, desc.out_flags); |
1285 | 0 | numberOfOnSocketReadyCalls++; |
1286 | 0 | } else if (s.IsTimedOut(now)) { |
1287 | | #ifdef MOZ_TASK_TRACER |
1288 | | tasktracer::AutoSourceEvent taskTracerEvent(tasktracer::SourceEventType::SocketIO); |
1289 | | #endif |
1290 | 0 | SOCKET_LOG(("socket %p timed out", s.mHandler)); |
1291 | 0 | s.DisengageTimeout(); |
1292 | 0 | s.mHandler->OnSocketReady(desc.fd, -1); |
1293 | 0 | numberOfOnSocketReadyCalls++; |
1294 | 0 | } else { |
1295 | 0 | s.MaybeResetEpoch(); |
1296 | 0 | } |
1297 | 0 | } |
1298 | 7 | if (mTelemetryEnabledPref) { |
1299 | 4 | Telemetry::Accumulate( |
1300 | 4 | Telemetry::STS_NUMBER_OF_ONSOCKETREADY_CALLS, |
1301 | 4 | numberOfOnSocketReadyCalls); |
1302 | 4 | } |
1303 | 7 | |
1304 | 7 | // |
1305 | 7 | // check for "dead" sockets and remove them (need to do this in |
1306 | 7 | // reverse order obviously). |
1307 | 7 | // |
1308 | 7 | for (i=mActiveCount-1; i>=0; --i) { |
1309 | 0 | if (NS_FAILED(mActiveList[i].mHandler->mCondition)) |
1310 | 0 | DetachSocket(mActiveList, &mActiveList[i]); |
1311 | 0 | } |
1312 | 7 | |
1313 | 7 | { |
1314 | 7 | MutexAutoLock lock(mLock); |
1315 | 7 | // acknowledge pollable event (should not block) |
1316 | 7 | if (n != 0 && |
1317 | 7 | (mPollList[0].out_flags & (PR_POLL_READ | PR_POLL_EXCEPT)) && |
1318 | 7 | mPollableEvent && |
1319 | 7 | ((mPollList[0].out_flags & PR_POLL_EXCEPT) || !mPollableEvent->Clear())) { |
1320 | 0 | // On Windows, the TCP loopback connection in the |
1321 | 0 | // pollable event may become broken when a laptop |
1322 | 0 | // switches between wired and wireless networks or |
1323 | 0 | // wakes up from hibernation. We try to create a |
1324 | 0 | // new pollable event. If that fails, we fall back |
1325 | 0 | // on "busy wait". |
1326 | 0 | TryRepairPollableEvent(); |
1327 | 0 | } |
1328 | 7 | |
1329 | 7 | if (mPollableEvent && |
1330 | 7 | !mPollableEvent->IsSignallingAlive(mPollableEventTimeout)) { |
1331 | 0 | SOCKET_LOG(("Pollable event signalling failed/timed out")); |
1332 | 0 | TryRepairPollableEvent(); |
1333 | 0 | } |
1334 | 7 | } |
1335 | 7 | } |
1336 | 7 | |
1337 | 7 | return NS_OK; |
1338 | 7 | } |
1339 | | |
1340 | | void |
1341 | | nsSocketTransportService::UpdateSendBufferPref() |
1342 | 3 | { |
1343 | 3 | int32_t bufferSize; |
1344 | 3 | |
1345 | 3 | // If the pref is set, honor it. 0 means use OS defaults. |
1346 | 3 | nsresult rv = Preferences::GetInt(SEND_BUFFER_PREF, &bufferSize); |
1347 | 3 | if (NS_SUCCEEDED(rv)) { |
1348 | 0 | mSendBufferSize = bufferSize; |
1349 | 0 | return; |
1350 | 0 | } |
1351 | 3 | |
1352 | | #if defined(XP_WIN) |
1353 | | mSendBufferSize = 131072 * 4; |
1354 | | #endif |
1355 | | } |
1356 | | |
1357 | | nsresult |
1358 | | nsSocketTransportService::UpdatePrefs() |
1359 | 3 | { |
1360 | 3 | mSendBufferSize = 0; |
1361 | 3 | |
1362 | 3 | UpdateSendBufferPref(); |
1363 | 3 | |
1364 | 3 | // Default TCP Keepalive Values. |
1365 | 3 | int32_t keepaliveIdleTimeS; |
1366 | 3 | nsresult rv = Preferences::GetInt(KEEPALIVE_IDLE_TIME_PREF, |
1367 | 3 | &keepaliveIdleTimeS); |
1368 | 3 | if (NS_SUCCEEDED(rv)) |
1369 | 3 | mKeepaliveIdleTimeS = clamped(keepaliveIdleTimeS, |
1370 | 3 | 1, kMaxTCPKeepIdle); |
1371 | 3 | |
1372 | 3 | int32_t keepaliveRetryIntervalS; |
1373 | 3 | rv = Preferences::GetInt(KEEPALIVE_RETRY_INTERVAL_PREF, |
1374 | 3 | &keepaliveRetryIntervalS); |
1375 | 3 | if (NS_SUCCEEDED(rv)) |
1376 | 3 | mKeepaliveRetryIntervalS = clamped(keepaliveRetryIntervalS, |
1377 | 3 | 1, kMaxTCPKeepIntvl); |
1378 | 3 | |
1379 | 3 | int32_t keepaliveProbeCount; |
1380 | 3 | rv = Preferences::GetInt(KEEPALIVE_PROBE_COUNT_PREF, &keepaliveProbeCount); |
1381 | 3 | if (NS_SUCCEEDED(rv)) |
1382 | 3 | mKeepaliveProbeCount = clamped(keepaliveProbeCount, |
1383 | 3 | 1, kMaxTCPKeepCount); |
1384 | 3 | bool keepaliveEnabled = false; |
1385 | 3 | rv = Preferences::GetBool(KEEPALIVE_ENABLED_PREF, &keepaliveEnabled); |
1386 | 3 | if (NS_SUCCEEDED(rv) && keepaliveEnabled != mKeepaliveEnabledPref) { |
1387 | 3 | mKeepaliveEnabledPref = keepaliveEnabled; |
1388 | 3 | OnKeepaliveEnabledPrefChange(); |
1389 | 3 | } |
1390 | 3 | |
1391 | 3 | int32_t maxTimePref; |
1392 | 3 | rv = Preferences::GetInt(MAX_TIME_BETWEEN_TWO_POLLS, &maxTimePref); |
1393 | 3 | if (NS_SUCCEEDED(rv) && maxTimePref >= 0) { |
1394 | 3 | mMaxTimePerPollIter = maxTimePref; |
1395 | 3 | } |
1396 | 3 | |
1397 | 3 | int32_t pollBusyWaitPeriod; |
1398 | 3 | rv = Preferences::GetInt(POLL_BUSY_WAIT_PERIOD, &pollBusyWaitPeriod); |
1399 | 3 | if (NS_SUCCEEDED(rv) && pollBusyWaitPeriod > 0) { |
1400 | 3 | mNetworkLinkChangeBusyWaitPeriod = PR_SecondsToInterval(pollBusyWaitPeriod); |
1401 | 3 | } |
1402 | 3 | |
1403 | 3 | int32_t pollBusyWaitPeriodTimeout; |
1404 | 3 | rv = Preferences::GetInt(POLL_BUSY_WAIT_PERIOD_TIMEOUT, &pollBusyWaitPeriodTimeout); |
1405 | 3 | if (NS_SUCCEEDED(rv) && pollBusyWaitPeriodTimeout > 0) { |
1406 | 3 | mNetworkLinkChangeBusyWaitTimeout = PR_SecondsToInterval(pollBusyWaitPeriodTimeout); |
1407 | 3 | } |
1408 | 3 | |
1409 | 3 | bool telemetryPref = false; |
1410 | 3 | rv = Preferences::GetBool(TELEMETRY_PREF, &telemetryPref); |
1411 | 3 | if (NS_SUCCEEDED(rv)) { |
1412 | 3 | mTelemetryEnabledPref = telemetryPref; |
1413 | 3 | } |
1414 | 3 | |
1415 | 3 | int32_t maxTimeForPrClosePref; |
1416 | 3 | rv = Preferences::GetInt(MAX_TIME_FOR_PR_CLOSE_DURING_SHUTDOWN, |
1417 | 3 | &maxTimeForPrClosePref); |
1418 | 3 | if (NS_SUCCEEDED(rv) && maxTimeForPrClosePref >=0) { |
1419 | 3 | mMaxTimeForPrClosePref = PR_MillisecondsToInterval(maxTimeForPrClosePref); |
1420 | 3 | } |
1421 | 3 | |
1422 | 3 | int32_t pollableEventTimeout; |
1423 | 3 | rv = Preferences::GetInt(POLLABLE_EVENT_TIMEOUT, &pollableEventTimeout); |
1424 | 3 | if (NS_SUCCEEDED(rv) && pollableEventTimeout >= 0) { |
1425 | 3 | MutexAutoLock lock(mLock); |
1426 | 3 | mPollableEventTimeout = TimeDuration::FromSeconds(pollableEventTimeout); |
1427 | 3 | } |
1428 | 3 | |
1429 | 3 | bool esniPref = false; |
1430 | 3 | rv = Preferences::GetBool(ESNI_ENABLED, &esniPref); |
1431 | 3 | if (NS_SUCCEEDED(rv)) { |
1432 | 3 | mEsniEnabled = esniPref; |
1433 | 3 | } |
1434 | 3 | |
1435 | 3 | bool esniMitmPref = false; |
1436 | 3 | rv = Preferences::GetBool(ESNI_DISABLED_MITM, &esniMitmPref); |
1437 | 3 | if (NS_SUCCEEDED(rv)) { |
1438 | 3 | mTrustedMitmDetected = esniMitmPref; |
1439 | 3 | } |
1440 | 3 | |
1441 | 3 | return NS_OK; |
1442 | 3 | } |
1443 | | |
1444 | | void |
1445 | | nsSocketTransportService::OnKeepaliveEnabledPrefChange() |
1446 | 6 | { |
1447 | 6 | // Dispatch to socket thread if we're not executing there. |
1448 | 6 | if (!OnSocketThread()) { |
1449 | 3 | gSocketTransportService->Dispatch( |
1450 | 3 | NewRunnableMethod( |
1451 | 3 | "net::nsSocketTransportService::OnKeepaliveEnabledPrefChange", |
1452 | 3 | this, |
1453 | 3 | &nsSocketTransportService::OnKeepaliveEnabledPrefChange), |
1454 | 3 | NS_DISPATCH_NORMAL); |
1455 | 3 | return; |
1456 | 3 | } |
1457 | 3 | |
1458 | 3 | SOCKET_LOG(("nsSocketTransportService::OnKeepaliveEnabledPrefChange %s", |
1459 | 3 | mKeepaliveEnabledPref ? "enabled" : "disabled")); |
1460 | 3 | |
1461 | 3 | // Notify each socket that keepalive has been en/disabled globally. |
1462 | 3 | for (int32_t i = mActiveCount - 1; i >= 0; --i) { |
1463 | 0 | NotifyKeepaliveEnabledPrefChange(&mActiveList[i]); |
1464 | 0 | } |
1465 | 3 | for (int32_t i = mIdleCount - 1; i >= 0; --i) { |
1466 | 0 | NotifyKeepaliveEnabledPrefChange(&mIdleList[i]); |
1467 | 0 | } |
1468 | 3 | } |
1469 | | |
1470 | | void |
1471 | | nsSocketTransportService::NotifyKeepaliveEnabledPrefChange(SocketContext *sock) |
1472 | 0 | { |
1473 | 0 | MOZ_ASSERT(sock, "SocketContext cannot be null!"); |
1474 | 0 | MOZ_ASSERT(sock->mHandler, "SocketContext does not have a handler!"); |
1475 | 0 |
|
1476 | 0 | if (!sock || !sock->mHandler) { |
1477 | 0 | return; |
1478 | 0 | } |
1479 | 0 | |
1480 | | #ifdef MOZ_TASK_TRACER |
1481 | | tasktracer::AutoSourceEvent taskTracerEvent(tasktracer::SourceEventType::SocketIO); |
1482 | | #endif |
1483 | 0 | sock->mHandler->OnKeepaliveEnabledPrefChange(mKeepaliveEnabledPref); |
1484 | 0 | } |
1485 | | |
1486 | | NS_IMETHODIMP |
1487 | | nsSocketTransportService::Observe(nsISupports *subject, |
1488 | | const char *topic, |
1489 | | const char16_t *data) |
1490 | 0 | { |
1491 | 0 | SOCKET_LOG(("nsSocketTransportService::Observe topic=%s", topic)); |
1492 | 0 |
|
1493 | 0 | if (!strcmp(topic, "profile-initial-state")) { |
1494 | 0 | if (!Preferences::GetBool(IO_ACTIVITY_ENABLED_PREF, false)) { |
1495 | 0 | return NS_OK; |
1496 | 0 | } |
1497 | 0 | return net::IOActivityMonitor::Init(); |
1498 | 0 | } |
1499 | 0 | |
1500 | 0 | if (!strcmp(topic, "last-pb-context-exited")) { |
1501 | 0 | nsCOMPtr<nsIRunnable> ev = NewRunnableMethod( |
1502 | 0 | "net::nsSocketTransportService::ClosePrivateConnections", |
1503 | 0 | this, |
1504 | 0 | &nsSocketTransportService::ClosePrivateConnections); |
1505 | 0 | nsresult rv = Dispatch(ev, nsIEventTarget::DISPATCH_NORMAL); |
1506 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1507 | 0 | } |
1508 | 0 |
|
1509 | 0 | if (!strcmp(topic, NS_TIMER_CALLBACK_TOPIC)) { |
1510 | 0 | nsCOMPtr<nsITimer> timer = do_QueryInterface(subject); |
1511 | 0 | if (timer == mAfterWakeUpTimer) { |
1512 | 0 | mAfterWakeUpTimer = nullptr; |
1513 | 0 | mSleepPhase = false; |
1514 | 0 | } |
1515 | 0 |
|
1516 | | #if defined(XP_WIN) |
1517 | | if (timer == mPollRepairTimer) { |
1518 | | DoPollRepair(); |
1519 | | } |
1520 | | #endif |
1521 | |
|
1522 | 0 | } else if (!strcmp(topic, NS_WIDGET_SLEEP_OBSERVER_TOPIC)) { |
1523 | 0 | mSleepPhase = true; |
1524 | 0 | if (mAfterWakeUpTimer) { |
1525 | 0 | mAfterWakeUpTimer->Cancel(); |
1526 | 0 | mAfterWakeUpTimer = nullptr; |
1527 | 0 | } |
1528 | 0 | } else if (!strcmp(topic, NS_WIDGET_WAKE_OBSERVER_TOPIC)) { |
1529 | 0 | if (mSleepPhase && !mAfterWakeUpTimer) { |
1530 | 0 | NS_NewTimerWithObserver(getter_AddRefs(mAfterWakeUpTimer), |
1531 | 0 | this, 2000, nsITimer::TYPE_ONE_SHOT); |
1532 | 0 | } |
1533 | 0 | } else if (!strcmp(topic, "xpcom-shutdown-threads")) { |
1534 | 0 | ShutdownThread(); |
1535 | 0 | } else if (!strcmp(topic, NS_NETWORK_LINK_TOPIC)) { |
1536 | 0 | mLastNetworkLinkChangeTime = PR_IntervalNow(); |
1537 | 0 | mNotTrustedMitmDetected = false; |
1538 | 0 | } |
1539 | 0 |
|
1540 | 0 | return NS_OK; |
1541 | 0 | } |
1542 | | |
1543 | | void |
1544 | | nsSocketTransportService::ClosePrivateConnections() |
1545 | 0 | { |
1546 | 0 | // Must be called on the socket thread. |
1547 | | #ifdef DEBUG |
1548 | | bool onSTSThread; |
1549 | | IsOnCurrentThread(&onSTSThread); |
1550 | | MOZ_ASSERT(onSTSThread); |
1551 | | #endif |
1552 | |
|
1553 | 0 | for (int32_t i = mActiveCount - 1; i >= 0; --i) { |
1554 | 0 | if (mActiveList[i].mHandler->mIsPrivate) { |
1555 | 0 | DetachSocket(mActiveList, &mActiveList[i]); |
1556 | 0 | } |
1557 | 0 | } |
1558 | 0 | for (int32_t i = mIdleCount - 1; i >= 0; --i) { |
1559 | 0 | if (mIdleList[i].mHandler->mIsPrivate) { |
1560 | 0 | DetachSocket(mIdleList, &mIdleList[i]); |
1561 | 0 | } |
1562 | 0 | } |
1563 | 0 |
|
1564 | 0 | ClearPrivateSSLState(); |
1565 | 0 | } |
1566 | | |
1567 | | NS_IMETHODIMP |
1568 | | nsSocketTransportService::GetSendBufferSize(int32_t *value) |
1569 | 0 | { |
1570 | 0 | *value = mSendBufferSize; |
1571 | 0 | return NS_OK; |
1572 | 0 | } |
1573 | | |
1574 | | |
1575 | | /// ugly OS specific includes are placed at the bottom of the src for clarity |
1576 | | |
1577 | | #if defined(XP_WIN) |
1578 | | #include <windows.h> |
1579 | | #elif defined(XP_UNIX) && !defined(AIX) && !defined(NEXTSTEP) && !defined(QNX) |
1580 | | #include <sys/resource.h> |
1581 | | #endif |
1582 | | |
1583 | | // Right now the only need to do this is on windows. |
1584 | | #if defined(XP_WIN) |
1585 | | void |
1586 | | nsSocketTransportService::ProbeMaxCount() |
1587 | | { |
1588 | | MOZ_ASSERT(OnSocketThread(), "not on socket thread"); |
1589 | | |
1590 | | if (mProbedMaxCount) |
1591 | | return; |
1592 | | mProbedMaxCount = true; |
1593 | | |
1594 | | // Allocate and test a PR_Poll up to the gMaxCount number of unconnected |
1595 | | // sockets. See bug 692260 - windows should be able to handle 1000 sockets |
1596 | | // in select() without a problem, but LSPs have been known to balk at lower |
1597 | | // numbers. (64 in the bug). |
1598 | | |
1599 | | // Allocate |
1600 | | struct PRPollDesc pfd[SOCKET_LIMIT_TARGET]; |
1601 | | uint32_t numAllocated = 0; |
1602 | | |
1603 | | for (uint32_t index = 0 ; index < gMaxCount; ++index) { |
1604 | | pfd[index].in_flags = PR_POLL_READ | PR_POLL_WRITE | PR_POLL_EXCEPT; |
1605 | | pfd[index].out_flags = 0; |
1606 | | pfd[index].fd = PR_OpenTCPSocket(PR_AF_INET); |
1607 | | if (!pfd[index].fd) { |
1608 | | SOCKET_LOG(("Socket Limit Test index %d failed\n", index)); |
1609 | | if (index < SOCKET_LIMIT_MIN) |
1610 | | gMaxCount = SOCKET_LIMIT_MIN; |
1611 | | else |
1612 | | gMaxCount = index; |
1613 | | break; |
1614 | | } |
1615 | | ++numAllocated; |
1616 | | } |
1617 | | |
1618 | | // Test |
1619 | | static_assert(SOCKET_LIMIT_MIN >= 32U, "Minimum Socket Limit is >= 32"); |
1620 | | while (gMaxCount <= numAllocated) { |
1621 | | int32_t rv = PR_Poll(pfd, gMaxCount, PR_MillisecondsToInterval(0)); |
1622 | | |
1623 | | SOCKET_LOG(("Socket Limit Test poll() size=%d rv=%d\n", |
1624 | | gMaxCount, rv)); |
1625 | | |
1626 | | if (rv >= 0) |
1627 | | break; |
1628 | | |
1629 | | SOCKET_LOG(("Socket Limit Test poll confirmationSize=%d rv=%d error=%d\n", |
1630 | | gMaxCount, rv, PR_GetError())); |
1631 | | |
1632 | | gMaxCount -= 32; |
1633 | | if (gMaxCount <= SOCKET_LIMIT_MIN) { |
1634 | | gMaxCount = SOCKET_LIMIT_MIN; |
1635 | | break; |
1636 | | } |
1637 | | } |
1638 | | |
1639 | | // Free |
1640 | | for (uint32_t index = 0 ; index < numAllocated; ++index) |
1641 | | if (pfd[index].fd) |
1642 | | PR_Close(pfd[index].fd); |
1643 | | |
1644 | | Telemetry::Accumulate(Telemetry::NETWORK_PROBE_MAXCOUNT, gMaxCount); |
1645 | | SOCKET_LOG(("Socket Limit Test max was confirmed at %d\n", gMaxCount)); |
1646 | | } |
1647 | | #endif // windows |
1648 | | |
1649 | | PRStatus |
1650 | | nsSocketTransportService::DiscoverMaxCount() |
1651 | 3 | { |
1652 | 3 | gMaxCount = SOCKET_LIMIT_MIN; |
1653 | 3 | |
1654 | 3 | #if defined(XP_UNIX) && !defined(AIX) && !defined(NEXTSTEP) && !defined(QNX) |
1655 | 3 | // On unix and os x network sockets and file |
1656 | 3 | // descriptors are the same. OS X comes defaulted at 256, |
1657 | 3 | // most linux at 1000. We can reliably use [sg]rlimit to |
1658 | 3 | // query that and raise it if needed. |
1659 | 3 | |
1660 | 3 | struct rlimit rlimitData; |
1661 | 3 | if (getrlimit(RLIMIT_NOFILE, &rlimitData) == -1) // rlimit broken - use min |
1662 | 0 | return PR_SUCCESS; |
1663 | 3 | |
1664 | 3 | if (rlimitData.rlim_cur >= SOCKET_LIMIT_TARGET) { // larger than target! |
1665 | 3 | gMaxCount = SOCKET_LIMIT_TARGET; |
1666 | 3 | return PR_SUCCESS; |
1667 | 3 | } |
1668 | 0 | |
1669 | 0 | int32_t maxallowed = rlimitData.rlim_max; |
1670 | 0 | if ((uint32_t)maxallowed <= SOCKET_LIMIT_MIN) { |
1671 | 0 | return PR_SUCCESS; // so small treat as if rlimit is broken |
1672 | 0 | } |
1673 | 0 | |
1674 | 0 | if ((maxallowed == -1) || // no hard cap - ok to set target |
1675 | 0 | ((uint32_t)maxallowed >= SOCKET_LIMIT_TARGET)) { |
1676 | 0 | maxallowed = SOCKET_LIMIT_TARGET; |
1677 | 0 | } |
1678 | 0 |
|
1679 | 0 | rlimitData.rlim_cur = maxallowed; |
1680 | 0 | setrlimit(RLIMIT_NOFILE, &rlimitData); |
1681 | 0 | if ((getrlimit(RLIMIT_NOFILE, &rlimitData) != -1) && |
1682 | 0 | (rlimitData.rlim_cur > SOCKET_LIMIT_MIN)) { |
1683 | 0 | gMaxCount = rlimitData.rlim_cur; |
1684 | 0 | } |
1685 | 0 |
|
1686 | | #elif defined(XP_WIN) && !defined(WIN_CE) |
1687 | | // >= XP is confirmed to have at least 1000 |
1688 | | static_assert(SOCKET_LIMIT_TARGET <= 1000, "SOCKET_LIMIT_TARGET max value is 1000"); |
1689 | | gMaxCount = SOCKET_LIMIT_TARGET; |
1690 | | #else |
1691 | | // other platforms are harder to test - so leave at safe legacy value |
1692 | | #endif |
1693 | |
|
1694 | 0 | return PR_SUCCESS; |
1695 | 0 | } |
1696 | | |
1697 | | |
1698 | | // Used to return connection info to Dashboard.cpp |
1699 | | void |
1700 | | nsSocketTransportService::AnalyzeConnection(nsTArray<SocketInfo> *data, |
1701 | | struct SocketContext *context, bool aActive) |
1702 | 0 | { |
1703 | 0 | if (context->mHandler->mIsPrivate) |
1704 | 0 | return; |
1705 | 0 | PRFileDesc *aFD = context->mFD; |
1706 | 0 |
|
1707 | 0 | PRFileDesc *idLayer = PR_GetIdentitiesLayer(aFD, PR_NSPR_IO_LAYER); |
1708 | 0 |
|
1709 | 0 | NS_ENSURE_TRUE_VOID(idLayer); |
1710 | 0 |
|
1711 | 0 | bool tcp = PR_GetDescType(idLayer) == PR_DESC_SOCKET_TCP; |
1712 | 0 |
|
1713 | 0 | PRNetAddr peer_addr; |
1714 | 0 | PodZero(&peer_addr); |
1715 | 0 | PRStatus rv = PR_GetPeerName(aFD, &peer_addr); |
1716 | 0 | if (rv != PR_SUCCESS) |
1717 | 0 | return; |
1718 | 0 | |
1719 | 0 | char host[64] = {0}; |
1720 | 0 | rv = PR_NetAddrToString(&peer_addr, host, sizeof(host)); |
1721 | 0 | if (rv != PR_SUCCESS) |
1722 | 0 | return; |
1723 | 0 | |
1724 | 0 | uint16_t port; |
1725 | 0 | if (peer_addr.raw.family == PR_AF_INET) |
1726 | 0 | port = peer_addr.inet.port; |
1727 | 0 | else |
1728 | 0 | port = peer_addr.ipv6.port; |
1729 | 0 | port = PR_ntohs(port); |
1730 | 0 | uint64_t sent = context->mHandler->ByteCountSent(); |
1731 | 0 | uint64_t received = context->mHandler->ByteCountReceived(); |
1732 | 0 | SocketInfo info = { nsCString(host), sent, received, port, aActive, tcp }; |
1733 | 0 |
|
1734 | 0 | data->AppendElement(info); |
1735 | 0 | } |
1736 | | |
1737 | | void |
1738 | | nsSocketTransportService::GetSocketConnections(nsTArray<SocketInfo> *data) |
1739 | 0 | { |
1740 | 0 | MOZ_ASSERT(OnSocketThread(), "not on socket thread"); |
1741 | 0 | for (uint32_t i = 0; i < mActiveCount; i++) |
1742 | 0 | AnalyzeConnection(data, &mActiveList[i], true); |
1743 | 0 | for (uint32_t i = 0; i < mIdleCount; i++) |
1744 | 0 | AnalyzeConnection(data, &mIdleList[i], false); |
1745 | 0 | } |
1746 | | |
1747 | | #if defined(XP_WIN) |
1748 | | void |
1749 | | nsSocketTransportService::StartPollWatchdog() |
1750 | | { |
1751 | | // Start off the timer from a runnable off of the main thread in order to |
1752 | | // avoid a deadlock, see bug 1370448. |
1753 | | RefPtr<nsSocketTransportService> self(this); |
1754 | | NS_DispatchToMainThread(NS_NewRunnableFunction("nsSocketTransportService::StartPollWatchdog", |
1755 | | [self] { |
1756 | | MutexAutoLock lock(self->mLock); |
1757 | | |
1758 | | // Poll can hang sometimes. If we are in shutdown, we are going to start a |
1759 | | // watchdog. If we do not exit poll within REPAIR_POLLABLE_EVENT_TIME |
1760 | | // signal a pollable event again. |
1761 | | MOZ_ASSERT(gIOService->IsNetTearingDown()); |
1762 | | if (self->mPolling && !self->mPollRepairTimer) { |
1763 | | NS_NewTimerWithObserver(getter_AddRefs(self->mPollRepairTimer), |
1764 | | self, REPAIR_POLLABLE_EVENT_TIME, |
1765 | | nsITimer::TYPE_REPEATING_SLACK); |
1766 | | } |
1767 | | })); |
1768 | | } |
1769 | | |
1770 | | void |
1771 | | nsSocketTransportService::DoPollRepair() |
1772 | | { |
1773 | | MutexAutoLock lock(mLock); |
1774 | | if (mPolling && mPollableEvent) { |
1775 | | mPollableEvent->Signal(); |
1776 | | } else if (mPollRepairTimer) { |
1777 | | mPollRepairTimer->Cancel(); |
1778 | | } |
1779 | | } |
1780 | | |
1781 | | void |
1782 | | nsSocketTransportService::StartPolling() |
1783 | | { |
1784 | | MutexAutoLock lock(mLock); |
1785 | | mPolling = true; |
1786 | | } |
1787 | | |
1788 | | void |
1789 | | nsSocketTransportService::EndPolling() |
1790 | | { |
1791 | | MutexAutoLock lock(mLock); |
1792 | | mPolling = false; |
1793 | | if (mPollRepairTimer) { |
1794 | | mPollRepairTimer->Cancel(); |
1795 | | } |
1796 | | } |
1797 | | |
1798 | | #endif |
1799 | | |
1800 | | void nsSocketTransportService::TryRepairPollableEvent() |
1801 | 0 | { |
1802 | 0 | mLock.AssertCurrentThreadOwns(); |
1803 | 0 |
|
1804 | 0 | NS_WARNING("Trying to repair mPollableEvent"); |
1805 | 0 | mPollableEvent.reset(new PollableEvent()); |
1806 | 0 | if (!mPollableEvent->Valid()) { |
1807 | 0 | mPollableEvent = nullptr; |
1808 | 0 | } |
1809 | 0 | SOCKET_LOG(("running socket transport thread without " |
1810 | 0 | "a pollable event now valid=%d", !!mPollableEvent)); |
1811 | 0 | mPollList[0].fd = mPollableEvent ? mPollableEvent->PollableFD() : nullptr; |
1812 | 0 | mPollList[0].in_flags = PR_POLL_READ | PR_POLL_EXCEPT; |
1813 | 0 | mPollList[0].out_flags = 0; |
1814 | 0 | } |
1815 | | |
1816 | | } // namespace net |
1817 | | } // namespace mozilla |