/src/mozilla-central/gfx/layers/client/TextureClient.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "mozilla/layers/TextureClient.h" |
8 | | #include <stdint.h> // for uint8_t, uint32_t, etc |
9 | | #include "Layers.h" // for Layer, etc |
10 | | #include "gfx2DGlue.h" |
11 | | #include "gfxPlatform.h" // for gfxPlatform |
12 | | #include "mozilla/Atomics.h" |
13 | | #include "mozilla/SystemGroup.h" |
14 | | #include "mozilla/ipc/SharedMemory.h" // for SharedMemory, etc |
15 | | #include "mozilla/layers/CompositableForwarder.h" |
16 | | #include "mozilla/layers/ISurfaceAllocator.h" |
17 | | #include "mozilla/layers/ImageBridgeChild.h" |
18 | | #include "mozilla/layers/ImageDataSerializer.h" |
19 | | #include "mozilla/layers/PaintThread.h" |
20 | | #include "mozilla/layers/TextureClientRecycleAllocator.h" |
21 | | #include "mozilla/Mutex.h" |
22 | | #include "nsDebug.h" // for NS_ASSERTION, NS_WARNING, etc |
23 | | #include "nsISupportsImpl.h" // for MOZ_COUNT_CTOR, etc |
24 | | #include "ImageContainer.h" // for PlanarYCbCrData, etc |
25 | | #include "mozilla/gfx/2D.h" |
26 | | #include "mozilla/gfx/Logging.h" // for gfxDebug |
27 | | #include "mozilla/layers/TextureClientOGL.h" |
28 | | #include "mozilla/layers/PTextureChild.h" |
29 | | #include "mozilla/gfx/DataSurfaceHelpers.h" // for CreateDataSourceSurfaceByCloning |
30 | | #include "nsPrintfCString.h" // for nsPrintfCString |
31 | | #include "LayersLogging.h" // for AppendToString |
32 | | #include "gfxUtils.h" // for gfxUtils::GetAsLZ4Base64Str |
33 | | #include "IPDLActor.h" |
34 | | #include "BufferTexture.h" |
35 | | #include "gfxPrefs.h" |
36 | | #include "mozilla/layers/ShadowLayers.h" |
37 | | #include "mozilla/ipc/CrossProcessSemaphore.h" |
38 | | |
39 | | #ifdef XP_WIN |
40 | | #include "mozilla/gfx/DeviceManagerDx.h" |
41 | | #include "mozilla/layers/TextureD3D11.h" |
42 | | #include "mozilla/layers/TextureDIB.h" |
43 | | #include "gfxWindowsPlatform.h" |
44 | | #include "gfx2DGlue.h" |
45 | | #endif |
46 | | #ifdef MOZ_X11 |
47 | | #include "mozilla/layers/TextureClientX11.h" |
48 | | #include "GLXLibrary.h" |
49 | | #endif |
50 | | |
51 | | #ifdef XP_MACOSX |
52 | | #include "mozilla/layers/MacIOSurfaceTextureClientOGL.h" |
53 | | #endif |
54 | | |
55 | | #if 0 |
56 | | #define RECYCLE_LOG(...) printf_stderr(__VA_ARGS__) |
57 | | #else |
58 | | #define RECYCLE_LOG(...) do { } while (0) |
59 | | #endif |
60 | | |
61 | | namespace mozilla { |
62 | | namespace layers { |
63 | | |
64 | | using namespace mozilla::ipc; |
65 | | using namespace mozilla::gl; |
66 | | using namespace mozilla::gfx; |
67 | | |
68 | | struct TextureDeallocParams |
69 | | { |
70 | | TextureData* data; |
71 | | RefPtr<TextureChild> actor; |
72 | | RefPtr<LayersIPCChannel> allocator; |
73 | | bool clientDeallocation; |
74 | | bool syncDeallocation; |
75 | | bool workAroundSharedSurfaceOwnershipIssue; |
76 | | }; |
77 | | |
78 | | void DeallocateTextureClient(TextureDeallocParams params); |
79 | | |
80 | | /** |
81 | | * TextureChild is the content-side incarnation of the PTexture IPDL actor. |
82 | | * |
83 | | * TextureChild is used to synchronize a texture client and its corresponding |
84 | | * TextureHost if needed (a TextureClient that is not shared with the compositor |
85 | | * does not have a TextureChild) |
86 | | * |
87 | | * During the deallocation phase, a TextureChild may hold its recently destroyed |
88 | | * TextureClient's data until the compositor side confirmed that it is safe to |
89 | | * deallocte or recycle the it. |
90 | | */ |
91 | | class TextureChild final : PTextureChild |
92 | | { |
93 | | ~TextureChild() |
94 | 0 | { |
95 | 0 | // We should have deallocated mTextureData in ActorDestroy |
96 | 0 | MOZ_ASSERT(!mTextureData); |
97 | 0 | MOZ_ASSERT_IF(!mOwnerCalledDestroy, !mTextureClient); |
98 | 0 | } |
99 | | public: |
100 | | NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TextureChild) |
101 | | |
102 | | TextureChild() |
103 | | : mCompositableForwarder(nullptr) |
104 | | , mTextureForwarder(nullptr) |
105 | | , mTextureClient(nullptr) |
106 | | , mTextureData(nullptr) |
107 | | , mDestroyed(false) |
108 | | , mMainThreadOnly(false) |
109 | | , mIPCOpen(false) |
110 | | , mOwnsTextureData(false) |
111 | | , mOwnerCalledDestroy(false) |
112 | 0 | {} |
113 | | |
114 | 0 | mozilla::ipc::IPCResult Recv__delete__() override { return IPC_OK(); } |
115 | | |
116 | 0 | LayersIPCChannel* GetAllocator() { return mTextureForwarder; } |
117 | | |
118 | | void ActorDestroy(ActorDestroyReason why) override; |
119 | | |
120 | 0 | bool IPCOpen() const { return mIPCOpen; } |
121 | | |
122 | 0 | void Lock() const { if (mCompositableForwarder && mCompositableForwarder->GetTextureForwarder()->UsesImageBridge()) { mLock.Enter(); } } |
123 | | |
124 | 0 | void Unlock() const { if (mCompositableForwarder && mCompositableForwarder->GetTextureForwarder()->UsesImageBridge()) { mLock.Leave(); } } |
125 | | |
126 | | private: |
127 | | |
128 | | // AddIPDLReference and ReleaseIPDLReference are only to be called by CreateIPDLActor |
129 | | // and DestroyIPDLActor, respectively. We intentionally make them private to prevent misuse. |
130 | | // The purpose of these methods is to be aware of when the IPC system around this |
131 | | // actor goes down: mIPCOpen is then set to false. |
132 | 0 | void AddIPDLReference() { |
133 | 0 | MOZ_ASSERT(mIPCOpen == false); |
134 | 0 | mIPCOpen = true; |
135 | 0 | AddRef(); |
136 | 0 | } |
137 | 0 | void ReleaseIPDLReference() { |
138 | 0 | MOZ_ASSERT(mIPCOpen == false); |
139 | 0 | Release(); |
140 | 0 | } |
141 | | |
142 | | /// The normal way to destroy the actor. |
143 | | /// |
144 | | /// This will asynchronously send a Destroy message to the parent actor, whom |
145 | | /// will send the delete message. |
146 | | void Destroy(const TextureDeallocParams& aParams); |
147 | | |
148 | | // This lock is used order to prevent several threads to access the |
149 | | // TextureClient's data concurrently. In particular, it prevents shutdown |
150 | | // code to destroy a texture while another thread is reading or writing into |
151 | | // it. |
152 | | // In most places, the lock is held in short and bounded scopes in which we |
153 | | // don't block on any other resource. There are few exceptions to this, which |
154 | | // are discussed below. |
155 | | // |
156 | | // The locking pattern of TextureClient may in some case upset deadlock detection |
157 | | // tools such as TSan. |
158 | | // Typically our tile rendering code will lock all of its tiles, render into them |
159 | | // and unlock them all right after that, which looks something like: |
160 | | // |
161 | | // Lock tile A |
162 | | // Lock tile B |
163 | | // Lock tile C |
164 | | // Apply drawing commands to tiles A, B and C |
165 | | // Unlock tile A |
166 | | // Unlock tile B |
167 | | // Unlock tile C |
168 | | // |
169 | | // And later, we may end up rendering a tile buffer that has the same tiles, |
170 | | // in a different order, for example: |
171 | | // |
172 | | // Lock tile B |
173 | | // Lock tile A |
174 | | // Lock tile D |
175 | | // Apply drawing commands to tiles A, B and D |
176 | | // Unlock tile B |
177 | | // Unlock tile A |
178 | | // Unlock tile D |
179 | | // |
180 | | // This is because textures being expensive to create, we recycle them as much |
181 | | // as possible and they may reappear in the tile buffer in a different order. |
182 | | // |
183 | | // Unfortunately this is not very friendly to TSan's analysis, which will see |
184 | | // that B was once locked while A was locked, and then A locked while B was |
185 | | // locked. TSan identifies this as a potential dead-lock which would be the |
186 | | // case if this kind of inconsistent and dependent locking order was happening |
187 | | // concurrently. |
188 | | // In the case of TextureClient, dependent locking only ever happens on the |
189 | | // thread that draws into the texture (let's call it the producer thread). Other |
190 | | // threads may call into a method that can lock the texture in a short and |
191 | | // bounded scope inside of which it is not allowed to do anything that could |
192 | | // cause the thread to block. A given texture can only have one producer thread. |
193 | | // |
194 | | // Another example of TSan-unfriendly locking pattern is when copying a texture |
195 | | // into another, which also never happens outside of the producer thread. |
196 | | // Copying A into B looks like this: |
197 | | // |
198 | | // Lock texture B |
199 | | // Lock texture A |
200 | | // Copy A into B |
201 | | // Unlock A |
202 | | // Unlock B |
203 | | // |
204 | | // In a given frame we may need to copy A into B and in another frame copy |
205 | | // B into A. For example A and B can be the Front and Back buffers, alternating |
206 | | // roles and the copy is needed to avoid the cost of re-drawing the valid |
207 | | // region. |
208 | | // |
209 | | // The important rule is that all of the dependent locking must occur only |
210 | | // in the texture's producer thread to avoid deadlocks. |
211 | | mutable gfx::CriticalSection mLock; |
212 | | |
213 | | RefPtr<CompositableForwarder> mCompositableForwarder; |
214 | | RefPtr<TextureForwarder> mTextureForwarder; |
215 | | |
216 | | TextureClient* mTextureClient; |
217 | | TextureData* mTextureData; |
218 | | Atomic<bool> mDestroyed; |
219 | | bool mMainThreadOnly; |
220 | | bool mIPCOpen; |
221 | | bool mOwnsTextureData; |
222 | | bool mOwnerCalledDestroy; |
223 | | |
224 | | friend class TextureClient; |
225 | | friend void DeallocateTextureClient(TextureDeallocParams params); |
226 | | }; |
227 | | |
228 | | |
229 | | static void DestroyTextureData(TextureData* aTextureData, LayersIPCChannel* aAllocator, |
230 | | bool aDeallocate, bool aMainThreadOnly) |
231 | 0 | { |
232 | 0 | if (!aTextureData) { |
233 | 0 | return; |
234 | 0 | } |
235 | 0 | |
236 | 0 | if (aMainThreadOnly && !NS_IsMainThread()) { |
237 | 0 | RefPtr<LayersIPCChannel> allocatorRef = aAllocator; |
238 | 0 | SystemGroup::Dispatch(TaskCategory::Other, NS_NewRunnableFunction( |
239 | 0 | "layers::DestroyTextureData", |
240 | 0 | [aTextureData, allocatorRef, aDeallocate]() -> void { |
241 | 0 | DestroyTextureData(aTextureData, allocatorRef, aDeallocate, true); |
242 | 0 | })); |
243 | 0 | return; |
244 | 0 | } |
245 | 0 |
|
246 | 0 | if (aDeallocate) { |
247 | 0 | aTextureData->Deallocate(aAllocator); |
248 | 0 | } else { |
249 | 0 | aTextureData->Forget(aAllocator); |
250 | 0 | } |
251 | 0 | delete aTextureData; |
252 | 0 | } |
253 | | |
254 | | void |
255 | | TextureChild::ActorDestroy(ActorDestroyReason why) |
256 | 0 | { |
257 | 0 | AUTO_PROFILER_LABEL("TextureChild::ActorDestroy", GRAPHICS); |
258 | 0 | MOZ_ASSERT(mIPCOpen); |
259 | 0 | mIPCOpen = false; |
260 | 0 |
|
261 | 0 | if (mTextureData) { |
262 | 0 | DestroyTextureData(mTextureData, GetAllocator(), mOwnsTextureData, mMainThreadOnly); |
263 | 0 | mTextureData = nullptr; |
264 | 0 | } |
265 | 0 | } |
266 | | |
267 | | void |
268 | | TextureChild::Destroy(const TextureDeallocParams& aParams) |
269 | 0 | { |
270 | 0 | MOZ_ASSERT(!mOwnerCalledDestroy); |
271 | 0 | if (mOwnerCalledDestroy) { |
272 | 0 | return; |
273 | 0 | } |
274 | 0 | |
275 | 0 | mOwnerCalledDestroy = true; |
276 | 0 |
|
277 | 0 | if (!IPCOpen()) { |
278 | 0 | DestroyTextureData( |
279 | 0 | aParams.data, |
280 | 0 | aParams.allocator, |
281 | 0 | aParams.clientDeallocation, |
282 | 0 | mMainThreadOnly); |
283 | 0 | return; |
284 | 0 | } |
285 | 0 | |
286 | 0 | // DestroyTextureData will be called by TextureChild::ActorDestroy |
287 | 0 | mTextureData = aParams.data; |
288 | 0 | mOwnsTextureData = aParams.clientDeallocation; |
289 | 0 |
|
290 | 0 | if (!mCompositableForwarder || |
291 | 0 | !mCompositableForwarder->DestroyInTransaction(this)) |
292 | 0 | { |
293 | 0 | this->SendDestroy(); |
294 | 0 | } |
295 | 0 | } |
296 | | |
297 | | /* static */ Atomic<uint64_t> TextureClient::sSerialCounter(0); |
298 | | |
299 | | void DeallocateTextureClientSyncProxy(TextureDeallocParams params, |
300 | | ReentrantMonitor* aBarrier, bool* aDone) |
301 | 0 | { |
302 | 0 | DeallocateTextureClient(params); |
303 | 0 | ReentrantMonitorAutoEnter autoMon(*aBarrier); |
304 | 0 | *aDone = true; |
305 | 0 | aBarrier->NotifyAll(); |
306 | 0 | } |
307 | | |
308 | | /// The logic for synchronizing a TextureClient's deallocation goes here. |
309 | | /// |
310 | | /// This funciton takes care of dispatching work to the right thread using |
311 | | /// a synchronous proxy if needed, and handles client/host deallocation. |
312 | | void |
313 | | DeallocateTextureClient(TextureDeallocParams params) |
314 | 0 | { |
315 | 0 | if (!params.actor && !params.data) { |
316 | 0 | // Nothing to do |
317 | 0 | return; |
318 | 0 | } |
319 | 0 | |
320 | 0 | TextureChild* actor = params.actor; |
321 | 0 | MessageLoop* ipdlMsgLoop = nullptr; |
322 | 0 |
|
323 | 0 | if (params.allocator) { |
324 | 0 | ipdlMsgLoop = params.allocator->GetMessageLoop(); |
325 | 0 | if (!ipdlMsgLoop) { |
326 | 0 | // An allocator with no message loop means we are too late in the shutdown |
327 | 0 | // sequence. |
328 | 0 | gfxCriticalError() << "Texture deallocated too late during shutdown"; |
329 | 0 | return; |
330 | 0 | } |
331 | 0 | } |
332 | 0 |
|
333 | 0 | // First make sure that the work is happening on the IPDL thread. |
334 | 0 | if (ipdlMsgLoop && MessageLoop::current() != ipdlMsgLoop) { |
335 | 0 | if (params.syncDeallocation) { |
336 | 0 | bool done = false; |
337 | 0 | ReentrantMonitor barrier("DeallocateTextureClient"); |
338 | 0 | ReentrantMonitorAutoEnter autoMon(barrier); |
339 | 0 | ipdlMsgLoop->PostTask(NewRunnableFunction("DeallocateTextureClientSyncProxyRunnable", |
340 | 0 | DeallocateTextureClientSyncProxy, |
341 | 0 | params, &barrier, &done)); |
342 | 0 | while (!done) { |
343 | 0 | barrier.Wait(); |
344 | 0 | } |
345 | 0 | } else { |
346 | 0 | ipdlMsgLoop->PostTask(NewRunnableFunction("DeallocateTextureClientRunnable", |
347 | 0 | DeallocateTextureClient, |
348 | 0 | params)); |
349 | 0 | } |
350 | 0 | // The work has been forwarded to the IPDL thread, we are done. |
351 | 0 | return; |
352 | 0 | } |
353 | 0 |
|
354 | 0 | // Below this line, we are either in the IPDL thread or ther is no IPDL |
355 | 0 | // thread anymore. |
356 | 0 |
|
357 | 0 | if (!ipdlMsgLoop) { |
358 | 0 | // If we don't have a message loop we can't know for sure that we are in |
359 | 0 | // the IPDL thread and use the LayersIPCChannel. |
360 | 0 | // This should ideally not happen outside of gtest, but some shutdown raciness |
361 | 0 | // could put us in this situation. |
362 | 0 | params.allocator = nullptr; |
363 | 0 | } |
364 | 0 |
|
365 | 0 | if (!actor) { |
366 | 0 | // We don't have an IPDL actor, probably because we destroyed the TextureClient |
367 | 0 | // before sharing it with the compositor. It means the data cannot be owned by |
368 | 0 | // the TextureHost since we never created the TextureHost... |
369 | 0 | // ..except if the lovely mWorkaroundAnnoyingSharedSurfaceOwnershipIssues member |
370 | 0 | // is set to true. In this case we are in a special situation where this |
371 | 0 | // TextureClient is in wrapped into another TextureClient which assumes it owns |
372 | 0 | // our data. |
373 | 0 | bool shouldDeallocate = !params.workAroundSharedSurfaceOwnershipIssue; |
374 | 0 | DestroyTextureData(params.data, params.allocator, |
375 | 0 | shouldDeallocate, |
376 | 0 | false); // main-thread deallocation |
377 | 0 | return; |
378 | 0 | } |
379 | 0 | |
380 | 0 | actor->Destroy(params); |
381 | 0 | } |
382 | | |
383 | | void TextureClient::Destroy() |
384 | 0 | { |
385 | 0 | // Async paints should have been flushed by now. |
386 | 0 | MOZ_RELEASE_ASSERT(mPaintThreadRefs == 0); |
387 | 0 |
|
388 | 0 | if (mActor && !mIsLocked) { |
389 | 0 | mActor->Lock(); |
390 | 0 | } |
391 | 0 |
|
392 | 0 | mBorrowedDrawTarget = nullptr; |
393 | 0 | mReadLock = nullptr; |
394 | 0 |
|
395 | 0 | RefPtr<TextureChild> actor = mActor; |
396 | 0 | mActor = nullptr; |
397 | 0 |
|
398 | 0 | if (actor && !actor->mDestroyed.compareExchange(false, true)) { |
399 | 0 | actor->Unlock(); |
400 | 0 | actor = nullptr; |
401 | 0 | } |
402 | 0 |
|
403 | 0 | TextureData* data = mData; |
404 | 0 | if (!mWorkaroundAnnoyingSharedSurfaceLifetimeIssues) { |
405 | 0 | mData = nullptr; |
406 | 0 | } |
407 | 0 |
|
408 | 0 | if (data || actor) { |
409 | 0 | TextureDeallocParams params; |
410 | 0 | params.actor = actor; |
411 | 0 | params.allocator = mAllocator; |
412 | 0 | params.clientDeallocation = !!(mFlags & TextureFlags::DEALLOCATE_CLIENT); |
413 | 0 | params.workAroundSharedSurfaceOwnershipIssue = mWorkaroundAnnoyingSharedSurfaceOwnershipIssues; |
414 | 0 | if (mWorkaroundAnnoyingSharedSurfaceLifetimeIssues) { |
415 | 0 | params.data = nullptr; |
416 | 0 | } else { |
417 | 0 | params.data = data; |
418 | 0 | } |
419 | 0 | // At the moment we always deallocate synchronously when deallocating on the |
420 | 0 | // client side, but having asynchronous deallocate in some of the cases will |
421 | 0 | // be a worthwhile optimization. |
422 | 0 | params.syncDeallocation = !!(mFlags & TextureFlags::DEALLOCATE_CLIENT); |
423 | 0 |
|
424 | 0 | // Release the lock before calling DeallocateTextureClient because the latter |
425 | 0 | // may wait for the main thread which could create a dead-lock. |
426 | 0 |
|
427 | 0 | if (actor) { |
428 | 0 | actor->Unlock(); |
429 | 0 | } |
430 | 0 |
|
431 | 0 | DeallocateTextureClient(params); |
432 | 0 | } |
433 | 0 | } |
434 | | |
435 | | void |
436 | | TextureClient::LockActor() const |
437 | 0 | { |
438 | 0 | if (mActor) { |
439 | 0 | mActor->Lock(); |
440 | 0 | } |
441 | 0 | } |
442 | | |
443 | | void |
444 | | TextureClient::UnlockActor() const |
445 | 0 | { |
446 | 0 | if (mActor) { |
447 | 0 | mActor->Unlock(); |
448 | 0 | } |
449 | 0 | } |
450 | | |
451 | | bool |
452 | | TextureClient::IsReadLocked() const |
453 | 0 | { |
454 | 0 | if (!mReadLock) { |
455 | 0 | return false; |
456 | 0 | } |
457 | 0 | MOZ_ASSERT(mReadLock->AsNonBlockingLock(), "Can only check locked for non-blocking locks!"); |
458 | 0 | return mReadLock->AsNonBlockingLock()->GetReadCount() > 1; |
459 | 0 | } |
460 | | |
461 | | bool |
462 | | TextureClient::TryReadLock() |
463 | 0 | { |
464 | 0 | if (!mReadLock || mIsReadLocked) { |
465 | 0 | return true; |
466 | 0 | } |
467 | 0 | |
468 | 0 | if (mReadLock->AsNonBlockingLock()) { |
469 | 0 | if (IsReadLocked()) { |
470 | 0 | return false; |
471 | 0 | } |
472 | 0 | } |
473 | 0 | |
474 | 0 | if (!mReadLock->TryReadLock(TimeDuration::FromMilliseconds(500))) { |
475 | 0 | return false; |
476 | 0 | } |
477 | 0 | |
478 | 0 | mIsReadLocked = true; |
479 | 0 | return true; |
480 | 0 | } |
481 | | |
482 | | void |
483 | | TextureClient::ReadUnlock() |
484 | 0 | { |
485 | 0 | if (!mIsReadLocked) { |
486 | 0 | return; |
487 | 0 | } |
488 | 0 | MOZ_ASSERT(mReadLock); |
489 | 0 | mReadLock->ReadUnlock(); |
490 | 0 | mIsReadLocked = false; |
491 | 0 | } |
492 | | |
493 | | bool |
494 | | TextureClient::Lock(OpenMode aMode) |
495 | 0 | { |
496 | 0 | MOZ_ASSERT(IsValid()); |
497 | 0 | MOZ_ASSERT(!mIsLocked); |
498 | 0 | if (!IsValid()) { |
499 | 0 | return false; |
500 | 0 | } |
501 | 0 | if (mIsLocked) { |
502 | 0 | return mOpenMode == aMode; |
503 | 0 | } |
504 | 0 | |
505 | 0 | if ((aMode & OpenMode::OPEN_WRITE || !mInfo.canConcurrentlyReadLock) && !TryReadLock()) { |
506 | 0 | // Only warn if attempting to write. Attempting to read is acceptable usage. |
507 | 0 | if (aMode & OpenMode::OPEN_WRITE) { |
508 | 0 | NS_WARNING("Attempt to Lock a texture that is being read by the compositor!"); |
509 | 0 | } |
510 | 0 | return false; |
511 | 0 | } |
512 | 0 |
|
513 | 0 | LockActor(); |
514 | 0 |
|
515 | 0 | mIsLocked = mData->Lock(aMode); |
516 | 0 | mOpenMode = aMode; |
517 | 0 |
|
518 | 0 | auto format = GetFormat(); |
519 | 0 | if (mIsLocked && CanExposeDrawTarget() && |
520 | 0 | (aMode & OpenMode::OPEN_READ_WRITE) == OpenMode::OPEN_READ_WRITE && |
521 | 0 | NS_IsMainThread() && |
522 | 0 | // the formats that we apparently expect, in the cairo backend. Any other |
523 | 0 | // format will trigger an assertion in GfxFormatToCairoFormat. |
524 | 0 | (format == SurfaceFormat::A8R8G8B8_UINT32 || |
525 | 0 | format == SurfaceFormat::X8R8G8B8_UINT32 || |
526 | 0 | format == SurfaceFormat::A8 || |
527 | 0 | format == SurfaceFormat::R5G6B5_UINT16)) { |
528 | 0 | if (!BorrowDrawTarget()) { |
529 | 0 | // Failed to get a DrawTarget, means we won't be able to write into the |
530 | 0 | // texture, might as well fail now. |
531 | 0 | Unlock(); |
532 | 0 | return false; |
533 | 0 | } |
534 | 0 | } |
535 | 0 | |
536 | 0 | if (!mIsLocked) { |
537 | 0 | UnlockActor(); |
538 | 0 | ReadUnlock(); |
539 | 0 | } |
540 | 0 |
|
541 | 0 | return mIsLocked; |
542 | 0 | } |
543 | | |
544 | | void |
545 | | TextureClient::Unlock() |
546 | 0 | { |
547 | 0 | MOZ_ASSERT(IsValid()); |
548 | 0 | MOZ_ASSERT(mIsLocked); |
549 | 0 | if (!IsValid() || !mIsLocked) { |
550 | 0 | return; |
551 | 0 | } |
552 | 0 | |
553 | 0 | if (mBorrowedDrawTarget) { |
554 | 0 | if (!(mOpenMode & OpenMode::OPEN_ASYNC)) { |
555 | 0 | if (mOpenMode & OpenMode::OPEN_WRITE) { |
556 | 0 | mBorrowedDrawTarget->Flush(); |
557 | 0 | if (mReadbackSink && !mData->ReadBack(mReadbackSink)) { |
558 | 0 | // Fallback implementation for reading back, because mData does not |
559 | 0 | // have a backend-specific implementation and returned false. |
560 | 0 | RefPtr<SourceSurface> snapshot = mBorrowedDrawTarget->Snapshot(); |
561 | 0 | RefPtr<DataSourceSurface> dataSurf = snapshot->GetDataSurface(); |
562 | 0 | mReadbackSink->ProcessReadback(dataSurf); |
563 | 0 | } |
564 | 0 | } |
565 | 0 |
|
566 | 0 | mBorrowedDrawTarget->DetachAllSnapshots(); |
567 | 0 | // If this assertion is hit, it means something is holding a strong reference |
568 | 0 | // to our DrawTarget externally, which is not allowed. |
569 | 0 | MOZ_ASSERT(mBorrowedDrawTarget->refCount() <= mExpectedDtRefs); |
570 | 0 | } |
571 | 0 |
|
572 | 0 | mBorrowedDrawTarget = nullptr; |
573 | 0 | } |
574 | 0 |
|
575 | 0 | if (mOpenMode & OpenMode::OPEN_WRITE) { |
576 | 0 | mUpdated = true; |
577 | 0 | } |
578 | 0 |
|
579 | 0 | if (mData) { |
580 | 0 | mData->Unlock(); |
581 | 0 | } |
582 | 0 | mIsLocked = false; |
583 | 0 | mOpenMode = OpenMode::OPEN_NONE; |
584 | 0 |
|
585 | 0 | UnlockActor(); |
586 | 0 | ReadUnlock(); |
587 | 0 | } |
588 | | |
589 | | void |
590 | | TextureClient::EnableReadLock() |
591 | 0 | { |
592 | 0 | if (!mReadLock) { |
593 | 0 | if (mAllocator->GetTileLockAllocator()) { |
594 | 0 | mReadLock = NonBlockingTextureReadLock::Create(mAllocator); |
595 | 0 | } else { |
596 | 0 | // IPC is down |
597 | 0 | gfxCriticalError() << "TextureClient::EnableReadLock IPC is down"; |
598 | 0 | } |
599 | 0 | } |
600 | 0 | } |
601 | | |
602 | | bool |
603 | | TextureClient::OnForwardedToHost() |
604 | 0 | { |
605 | 0 | if (mData) { |
606 | 0 | mData->OnForwardedToHost(); |
607 | 0 | } |
608 | 0 |
|
609 | 0 | if (mReadLock && mUpdated) { |
610 | 0 | // Take a read lock on behalf of the TextureHost. The latter will unlock |
611 | 0 | // after the shared data is available again for drawing. |
612 | 0 | mReadLock->ReadLock(); |
613 | 0 | mUpdated = false; |
614 | 0 | return true; |
615 | 0 | } |
616 | 0 | |
617 | 0 | return false; |
618 | 0 | } |
619 | | |
620 | | TextureClient::~TextureClient() |
621 | 0 | { |
622 | 0 | // TextureClients should be kept alive while there are references on the |
623 | 0 | // paint thread. |
624 | 0 | MOZ_ASSERT(mPaintThreadRefs == 0); |
625 | 0 | mReadLock = nullptr; |
626 | 0 | Destroy(); |
627 | 0 | } |
628 | | |
629 | | void |
630 | | TextureClient::UpdateFromSurface(gfx::SourceSurface* aSurface) |
631 | 0 | { |
632 | 0 | MOZ_ASSERT(IsValid()); |
633 | 0 | MOZ_ASSERT(mIsLocked); |
634 | 0 | MOZ_ASSERT(aSurface); |
635 | 0 | // If you run into this assertion, make sure the texture was locked write-only |
636 | 0 | // rather than read-write. |
637 | 0 | MOZ_ASSERT(!mBorrowedDrawTarget); |
638 | 0 |
|
639 | 0 | // XXX - It would be better to first try the DrawTarget approach and fallback |
640 | 0 | // to the backend-specific implementation because the latter will usually do |
641 | 0 | // an expensive read-back + cpu-side copy if the texture is on the gpu. |
642 | 0 | // There is a bug with the DrawTarget approach, though specific to reading back |
643 | 0 | // from WebGL (where R and B channel end up inverted) to figure out first. |
644 | 0 | if (mData->UpdateFromSurface(aSurface)) { |
645 | 0 | return; |
646 | 0 | } |
647 | 0 | if (CanExposeDrawTarget() && NS_IsMainThread()) { |
648 | 0 | RefPtr<DrawTarget> dt = BorrowDrawTarget(); |
649 | 0 |
|
650 | 0 | MOZ_ASSERT(dt); |
651 | 0 | if (dt) { |
652 | 0 | dt->CopySurface(aSurface, |
653 | 0 | gfx::IntRect(gfx::IntPoint(0, 0), aSurface->GetSize()), |
654 | 0 | gfx::IntPoint(0, 0)); |
655 | 0 | return; |
656 | 0 | } |
657 | 0 | } |
658 | 0 | NS_WARNING("TextureClient::UpdateFromSurface failed"); |
659 | 0 | } |
660 | | |
661 | | |
662 | | already_AddRefed<TextureClient> |
663 | | TextureClient::CreateSimilar(LayersBackend aLayersBackend, |
664 | | TextureFlags aFlags, |
665 | | TextureAllocationFlags aAllocFlags) const |
666 | 0 | { |
667 | 0 | MOZ_ASSERT(IsValid()); |
668 | 0 |
|
669 | 0 | MOZ_ASSERT(!mIsLocked); |
670 | 0 | if (mIsLocked) { |
671 | 0 | return nullptr; |
672 | 0 | } |
673 | 0 | |
674 | 0 | LockActor(); |
675 | 0 | TextureData* data = mData->CreateSimilar(mAllocator, |
676 | 0 | aLayersBackend, |
677 | 0 | aFlags, |
678 | 0 | aAllocFlags); |
679 | 0 | UnlockActor(); |
680 | 0 |
|
681 | 0 | if (!data) { |
682 | 0 | return nullptr; |
683 | 0 | } |
684 | 0 | |
685 | 0 | return MakeAndAddRef<TextureClient>(data, aFlags, mAllocator); |
686 | 0 | } |
687 | | |
688 | | gfx::DrawTarget* |
689 | | TextureClient::BorrowDrawTarget() |
690 | 0 | { |
691 | 0 | MOZ_ASSERT(IsValid()); |
692 | 0 | MOZ_ASSERT(mIsLocked); |
693 | 0 | // TODO- We can't really assert that at the moment because there is code that Borrows |
694 | 0 | // the DrawTarget, just to get a snapshot, which is legit in term of OpenMode |
695 | 0 | // but we should have a way to get a SourceSurface directly instead. |
696 | 0 | //MOZ_ASSERT(mOpenMode & OpenMode::OPEN_WRITE); |
697 | 0 |
|
698 | 0 | if (!IsValid() || !mIsLocked) { |
699 | 0 | return nullptr; |
700 | 0 | } |
701 | 0 | |
702 | 0 | if (!mBorrowedDrawTarget) { |
703 | 0 | mBorrowedDrawTarget = mData->BorrowDrawTarget(); |
704 | | #ifdef DEBUG |
705 | | mExpectedDtRefs = mBorrowedDrawTarget ? mBorrowedDrawTarget->refCount() : 0; |
706 | | #endif |
707 | | } |
708 | 0 |
|
709 | 0 | return mBorrowedDrawTarget; |
710 | 0 | } |
711 | | |
712 | | bool |
713 | | TextureClient::BorrowMappedData(MappedTextureData& aMap) |
714 | 0 | { |
715 | 0 | MOZ_ASSERT(IsValid()); |
716 | 0 |
|
717 | 0 | // TODO - SharedRGBImage just accesses the buffer without properly locking |
718 | 0 | // the texture. It's bad. |
719 | 0 | //MOZ_ASSERT(mIsLocked); |
720 | 0 | //if (!mIsLocked) { |
721 | 0 | // return nullptr; |
722 | 0 | //} |
723 | 0 |
|
724 | 0 | return mData ? mData->BorrowMappedData(aMap) : false; |
725 | 0 | } |
726 | | |
727 | | bool |
728 | | TextureClient::BorrowMappedYCbCrData(MappedYCbCrTextureData& aMap) |
729 | 0 | { |
730 | 0 | MOZ_ASSERT(IsValid()); |
731 | 0 |
|
732 | 0 | return mData ? mData->BorrowMappedYCbCrData(aMap) : false; |
733 | 0 | } |
734 | | |
735 | | bool |
736 | | TextureClient::ToSurfaceDescriptor(SurfaceDescriptor& aOutDescriptor) |
737 | 0 | { |
738 | 0 | MOZ_ASSERT(IsValid()); |
739 | 0 |
|
740 | 0 | return mData ? mData->Serialize(aOutDescriptor) : false; |
741 | 0 | } |
742 | | |
743 | | // static |
744 | | PTextureChild* |
745 | | TextureClient::CreateIPDLActor() |
746 | 0 | { |
747 | 0 | TextureChild* c = new TextureChild(); |
748 | 0 | c->AddIPDLReference(); |
749 | 0 | return c; |
750 | 0 | } |
751 | | |
752 | | // static |
753 | | bool |
754 | | TextureClient::DestroyIPDLActor(PTextureChild* actor) |
755 | 0 | { |
756 | 0 | static_cast<TextureChild*>(actor)->ReleaseIPDLReference(); |
757 | 0 | return true; |
758 | 0 | } |
759 | | |
760 | | // static |
761 | | already_AddRefed<TextureClient> |
762 | | TextureClient::AsTextureClient(PTextureChild* actor) |
763 | 0 | { |
764 | 0 | if (!actor) { |
765 | 0 | return nullptr; |
766 | 0 | } |
767 | 0 | |
768 | 0 | TextureChild* tc = static_cast<TextureChild*>(actor); |
769 | 0 |
|
770 | 0 | tc->Lock(); |
771 | 0 |
|
772 | 0 | // Since TextureClient may be destroyed asynchronously with respect to its |
773 | 0 | // IPDL actor, we must acquire a reference within a lock. The mDestroyed bit |
774 | 0 | // tells us whether or not the main thread has disconnected the TextureClient |
775 | 0 | // from its actor. |
776 | 0 | if (tc->mDestroyed) { |
777 | 0 | tc->Unlock(); |
778 | 0 | return nullptr; |
779 | 0 | } |
780 | 0 | |
781 | 0 | RefPtr<TextureClient> texture = tc->mTextureClient; |
782 | 0 | tc->Unlock(); |
783 | 0 |
|
784 | 0 | return texture.forget(); |
785 | 0 | } |
786 | | |
787 | | bool |
788 | 0 | TextureClient::IsSharedWithCompositor() const { |
789 | 0 | return mActor && mActor->IPCOpen(); |
790 | 0 | } |
791 | | |
792 | | void |
793 | | TextureClient::AddFlags(TextureFlags aFlags) |
794 | 0 | { |
795 | 0 | MOZ_ASSERT(!IsSharedWithCompositor() || |
796 | 0 | ((GetFlags() & TextureFlags::RECYCLE) && !IsAddedToCompositableClient())); |
797 | 0 | mFlags |= aFlags; |
798 | 0 | } |
799 | | |
800 | | void |
801 | | TextureClient::RemoveFlags(TextureFlags aFlags) |
802 | 0 | { |
803 | 0 | MOZ_ASSERT(!IsSharedWithCompositor() || |
804 | 0 | ((GetFlags() & TextureFlags::RECYCLE) && !IsAddedToCompositableClient())); |
805 | 0 | mFlags &= ~aFlags; |
806 | 0 | } |
807 | | |
808 | | void |
809 | | TextureClient::RecycleTexture(TextureFlags aFlags) |
810 | 0 | { |
811 | 0 | MOZ_ASSERT(GetFlags() & TextureFlags::RECYCLE); |
812 | 0 | MOZ_ASSERT(!mIsLocked); |
813 | 0 |
|
814 | 0 | mAddedToCompositableClient = false; |
815 | 0 | if (mFlags != aFlags) { |
816 | 0 | mFlags = aFlags; |
817 | 0 | } |
818 | 0 | } |
819 | | |
820 | | void |
821 | | TextureClient::SetAddedToCompositableClient() |
822 | 0 | { |
823 | 0 | if (!mAddedToCompositableClient) { |
824 | 0 | mAddedToCompositableClient = true; |
825 | 0 | if(!(GetFlags() & TextureFlags::RECYCLE)) { |
826 | 0 | return; |
827 | 0 | } |
828 | 0 | MOZ_ASSERT(!mIsLocked); |
829 | 0 | LockActor(); |
830 | 0 | if (IsValid() && mActor && !mActor->mDestroyed && mActor->IPCOpen()) { |
831 | 0 | mActor->SendRecycleTexture(mFlags); |
832 | 0 | } |
833 | 0 | UnlockActor(); |
834 | 0 | } |
835 | 0 | } |
836 | | |
837 | | void CancelTextureClientRecycle(uint64_t aTextureId, LayersIPCChannel* aAllocator) |
838 | 0 | { |
839 | 0 | if (!aAllocator) { |
840 | 0 | return; |
841 | 0 | } |
842 | 0 | MessageLoop* msgLoop = nullptr; |
843 | 0 | msgLoop = aAllocator->GetMessageLoop(); |
844 | 0 | if (!msgLoop) { |
845 | 0 | return; |
846 | 0 | } |
847 | 0 | if (MessageLoop::current() == msgLoop) { |
848 | 0 | aAllocator->CancelWaitForRecycle(aTextureId); |
849 | 0 | } else { |
850 | 0 | msgLoop->PostTask(NewRunnableFunction("CancelTextureClientRecycleRunnable", |
851 | 0 | CancelTextureClientRecycle, |
852 | 0 | aTextureId, aAllocator)); |
853 | 0 | } |
854 | 0 | } |
855 | | |
856 | | void |
857 | | TextureClient::CancelWaitForRecycle() |
858 | 0 | { |
859 | 0 | if (GetFlags() & TextureFlags::RECYCLE) { |
860 | 0 | CancelTextureClientRecycle(mSerial, GetAllocator()); |
861 | 0 | return; |
862 | 0 | } |
863 | 0 | } |
864 | | |
865 | | /* static */ void |
866 | | TextureClient::TextureClientRecycleCallback(TextureClient* aClient, void* aClosure) |
867 | 0 | { |
868 | 0 | MOZ_ASSERT(aClient->GetRecycleAllocator()); |
869 | 0 | aClient->GetRecycleAllocator()->RecycleTextureClient(aClient); |
870 | 0 | } |
871 | | |
872 | | void |
873 | | TextureClient::SetRecycleAllocator(ITextureClientRecycleAllocator* aAllocator) |
874 | 0 | { |
875 | 0 | mRecycleAllocator = aAllocator; |
876 | 0 | if (aAllocator) { |
877 | 0 | SetRecycleCallback(TextureClientRecycleCallback, nullptr); |
878 | 0 | } else { |
879 | 0 | ClearRecycleCallback(); |
880 | 0 | } |
881 | 0 | } |
882 | | |
883 | | bool |
884 | | TextureClient::InitIPDLActor(CompositableForwarder* aForwarder) |
885 | 0 | { |
886 | 0 | MOZ_ASSERT(aForwarder && aForwarder->GetTextureForwarder()->GetMessageLoop() == mAllocator->GetMessageLoop()); |
887 | 0 |
|
888 | 0 | if (mActor && !mActor->IPCOpen()) { |
889 | 0 | return false; |
890 | 0 | } |
891 | 0 | |
892 | 0 | if (mActor && !mActor->mDestroyed) { |
893 | 0 | CompositableForwarder* currentFwd = mActor->mCompositableForwarder; |
894 | 0 | TextureForwarder* currentTexFwd = mActor->mTextureForwarder; |
895 | 0 | if (currentFwd != aForwarder) { |
896 | 0 | // It's a bit iffy but right now ShadowLayerForwarder inherits TextureForwarder |
897 | 0 | // even though it should not. ShadowLayerForwarder::GetTextureForwarder actually |
898 | 0 | // returns a pointer to the CompositorBridgeChild. |
899 | 0 | // It's Ok for a texture to move from a ShadowLayerForwarder to another, but |
900 | 0 | // not form a CompositorBridgeChild to another (they use different channels). |
901 | 0 | if (currentTexFwd && currentTexFwd != aForwarder->GetTextureForwarder()) { |
902 | 0 | gfxCriticalError() << "Attempt to move a texture to a different channel CF."; |
903 | 0 | return false; |
904 | 0 | } |
905 | 0 | if (currentFwd && currentFwd->GetCompositorBackendType() != aForwarder->GetCompositorBackendType()) { |
906 | 0 | gfxCriticalError() << "Attempt to move a texture to different compositor backend."; |
907 | 0 | return false; |
908 | 0 | } |
909 | 0 | if (ShadowLayerForwarder* forwarder = aForwarder->AsLayerForwarder()) { |
910 | 0 | // Do the DOM labeling. |
911 | 0 | if (nsIEventTarget* target = forwarder->GetEventTarget()) { |
912 | 0 | forwarder->GetCompositorBridgeChild()->ReplaceEventTargetForActor( |
913 | 0 | mActor, target); |
914 | 0 | } |
915 | 0 | } |
916 | 0 | mActor->mCompositableForwarder = aForwarder; |
917 | 0 | } |
918 | 0 | return true; |
919 | 0 | } |
920 | 0 | MOZ_ASSERT(!mActor || mActor->mDestroyed, "Cannot use a texture on several IPC channels."); |
921 | 0 |
|
922 | 0 | SurfaceDescriptor desc; |
923 | 0 | if (!ToSurfaceDescriptor(desc)) { |
924 | 0 | return false; |
925 | 0 | } |
926 | 0 | |
927 | 0 | // Try external image id allocation. |
928 | 0 | mExternalImageId = aForwarder->GetTextureForwarder()->GetNextExternalImageId(); |
929 | 0 |
|
930 | 0 | nsIEventTarget* target = nullptr; |
931 | 0 | // Get the layers id if the forwarder is a ShadowLayerForwarder. |
932 | 0 | if (ShadowLayerForwarder* forwarder = aForwarder->AsLayerForwarder()) { |
933 | 0 | target = forwarder->GetEventTarget(); |
934 | 0 | } |
935 | 0 |
|
936 | 0 | ReadLockDescriptor readLockDescriptor = null_t(); |
937 | 0 | if (mReadLock) { |
938 | 0 | mReadLock->Serialize(readLockDescriptor, GetAllocator()->GetParentPid()); |
939 | 0 | } |
940 | 0 |
|
941 | 0 | PTextureChild* actor = aForwarder->GetTextureForwarder()->CreateTexture( |
942 | 0 | desc, |
943 | 0 | readLockDescriptor, |
944 | 0 | aForwarder->GetCompositorBackendType(), |
945 | 0 | GetFlags(), |
946 | 0 | mSerial, |
947 | 0 | mExternalImageId, |
948 | 0 | target); |
949 | 0 |
|
950 | 0 | if (!actor) { |
951 | 0 | gfxCriticalNote << static_cast<int32_t>(desc.type()) << ", " |
952 | 0 | << static_cast<int32_t>(aForwarder->GetCompositorBackendType()) << ", " |
953 | 0 | << static_cast<uint32_t>(GetFlags()) |
954 | 0 | << ", " << mSerial; |
955 | 0 | return false; |
956 | 0 | } |
957 | 0 |
|
958 | 0 | mActor = static_cast<TextureChild*>(actor); |
959 | 0 | mActor->mCompositableForwarder = aForwarder; |
960 | 0 | mActor->mTextureForwarder = aForwarder->GetTextureForwarder(); |
961 | 0 | mActor->mTextureClient = this; |
962 | 0 | mActor->mMainThreadOnly = !!(mFlags & TextureFlags::DEALLOCATE_MAIN_THREAD); |
963 | 0 |
|
964 | 0 | // If the TextureClient is already locked, we have to lock TextureChild's mutex |
965 | 0 | // since it will be unlocked in TextureClient::Unlock. |
966 | 0 | if (mIsLocked) { |
967 | 0 | LockActor(); |
968 | 0 | } |
969 | 0 |
|
970 | 0 | return mActor->IPCOpen(); |
971 | 0 | } |
972 | | |
973 | | bool |
974 | | TextureClient::InitIPDLActor(KnowsCompositor* aForwarder) |
975 | 0 | { |
976 | 0 | MOZ_ASSERT(aForwarder && aForwarder->GetTextureForwarder()->GetMessageLoop() == mAllocator->GetMessageLoop()); |
977 | 0 | TextureForwarder* fwd = aForwarder->GetTextureForwarder(); |
978 | 0 | if (mActor && !mActor->mDestroyed) { |
979 | 0 | CompositableForwarder* currentFwd = mActor->mCompositableForwarder; |
980 | 0 | TextureForwarder* currentTexFwd = mActor->mTextureForwarder; |
981 | 0 |
|
982 | 0 | if (currentFwd) { |
983 | 0 | gfxCriticalError() << "Attempt to remove a texture from a CompositableForwarder."; |
984 | 0 | return false; |
985 | 0 | } |
986 | 0 |
|
987 | 0 | if (currentTexFwd && currentTexFwd != fwd) { |
988 | 0 | gfxCriticalError() << "Attempt to move a texture to a different channel TF."; |
989 | 0 | return false; |
990 | 0 | } |
991 | 0 | mActor->mTextureForwarder = fwd; |
992 | 0 | return true; |
993 | 0 | } |
994 | 0 | MOZ_ASSERT(!mActor || mActor->mDestroyed, "Cannot use a texture on several IPC channels."); |
995 | 0 |
|
996 | 0 | SurfaceDescriptor desc; |
997 | 0 | if (!ToSurfaceDescriptor(desc)) { |
998 | 0 | return false; |
999 | 0 | } |
1000 | 0 | |
1001 | 0 | // Try external image id allocation. |
1002 | 0 | mExternalImageId = aForwarder->GetTextureForwarder()->GetNextExternalImageId(); |
1003 | 0 |
|
1004 | 0 | ReadLockDescriptor readLockDescriptor = null_t(); |
1005 | 0 | if (mReadLock) { |
1006 | 0 | mReadLock->Serialize(readLockDescriptor, GetAllocator()->GetParentPid()); |
1007 | 0 | } |
1008 | 0 |
|
1009 | 0 | PTextureChild* actor = fwd->CreateTexture( |
1010 | 0 | desc, |
1011 | 0 | readLockDescriptor, |
1012 | 0 | aForwarder->GetCompositorBackendType(), |
1013 | 0 | GetFlags(), |
1014 | 0 | mSerial, |
1015 | 0 | mExternalImageId); |
1016 | 0 | if (!actor) { |
1017 | 0 | gfxCriticalNote << static_cast<int32_t>(desc.type()) << ", " |
1018 | 0 | << static_cast<int32_t>(aForwarder->GetCompositorBackendType()) << ", " |
1019 | 0 | << static_cast<uint32_t>(GetFlags()) |
1020 | 0 | << ", " << mSerial; |
1021 | 0 | return false; |
1022 | 0 | } |
1023 | 0 |
|
1024 | 0 | mActor = static_cast<TextureChild*>(actor); |
1025 | 0 | mActor->mTextureForwarder = fwd; |
1026 | 0 | mActor->mTextureClient = this; |
1027 | 0 | mActor->mMainThreadOnly = !!(mFlags & TextureFlags::DEALLOCATE_MAIN_THREAD); |
1028 | 0 |
|
1029 | 0 | // If the TextureClient is already locked, we have to lock TextureChild's mutex |
1030 | 0 | // since it will be unlocked in TextureClient::Unlock. |
1031 | 0 | if (mIsLocked) { |
1032 | 0 | LockActor(); |
1033 | 0 | } |
1034 | 0 |
|
1035 | 0 | return mActor->IPCOpen(); |
1036 | 0 | } |
1037 | | |
1038 | | PTextureChild* |
1039 | | TextureClient::GetIPDLActor() |
1040 | 0 | { |
1041 | 0 | return mActor; |
1042 | 0 | } |
1043 | | |
1044 | | static inline gfx::BackendType |
1045 | | BackendTypeForBackendSelector(LayersBackend aLayersBackend, BackendSelector aSelector) |
1046 | 0 | { |
1047 | 0 | switch (aSelector) { |
1048 | 0 | case BackendSelector::Canvas: |
1049 | 0 | return gfxPlatform::GetPlatform()->GetPreferredCanvasBackend(); |
1050 | 0 | case BackendSelector::Content: |
1051 | 0 | return gfxPlatform::GetPlatform()->GetContentBackendFor(aLayersBackend); |
1052 | 0 | default: |
1053 | 0 | MOZ_ASSERT_UNREACHABLE("Unknown backend selector"); |
1054 | 0 | return gfx::BackendType::NONE; |
1055 | 0 | } |
1056 | 0 | }; |
1057 | | |
1058 | | // static |
1059 | | already_AddRefed<TextureClient> |
1060 | | TextureClient::CreateForDrawing(KnowsCompositor* aAllocator, |
1061 | | gfx::SurfaceFormat aFormat, |
1062 | | gfx::IntSize aSize, |
1063 | | BackendSelector aSelector, |
1064 | | TextureFlags aTextureFlags, |
1065 | | TextureAllocationFlags aAllocFlags) |
1066 | 0 | { |
1067 | 0 | LayersBackend layersBackend = aAllocator->GetCompositorBackendType(); |
1068 | 0 | if (aAllocator->SupportsTextureDirectMapping() && |
1069 | 0 | std::max(aSize.width, aSize.height) <= aAllocator->GetMaxTextureSize()) { |
1070 | 0 | aAllocFlags = TextureAllocationFlags(aAllocFlags | ALLOC_ALLOW_DIRECT_MAPPING); |
1071 | 0 | } |
1072 | 0 | return TextureClient::CreateForDrawing(aAllocator->GetTextureForwarder(), |
1073 | 0 | aFormat, aSize, |
1074 | 0 | layersBackend, |
1075 | 0 | aAllocator->GetMaxTextureSize(), |
1076 | 0 | aSelector, |
1077 | 0 | aTextureFlags, |
1078 | 0 | aAllocFlags); |
1079 | 0 | } |
1080 | | |
1081 | | // static |
1082 | | already_AddRefed<TextureClient> |
1083 | | TextureClient::CreateForDrawing(TextureForwarder* aAllocator, |
1084 | | gfx::SurfaceFormat aFormat, |
1085 | | gfx::IntSize aSize, |
1086 | | LayersBackend aLayersBackend, |
1087 | | int32_t aMaxTextureSize, |
1088 | | BackendSelector aSelector, |
1089 | | TextureFlags aTextureFlags, |
1090 | | TextureAllocationFlags aAllocFlags) |
1091 | 0 | { |
1092 | 0 | gfx::BackendType moz2DBackend = BackendTypeForBackendSelector(aLayersBackend, aSelector); |
1093 | 0 |
|
1094 | 0 | // also test the validity of aAllocator |
1095 | 0 | if (!aAllocator || !aAllocator->IPCOpen()) { |
1096 | 0 | return nullptr; |
1097 | 0 | } |
1098 | 0 | |
1099 | 0 | if (!gfx::Factory::AllowedSurfaceSize(aSize)) { |
1100 | 0 | return nullptr; |
1101 | 0 | } |
1102 | 0 | |
1103 | 0 | TextureData* data = nullptr; |
1104 | 0 |
|
1105 | | #ifdef XP_WIN |
1106 | | if ((aLayersBackend == LayersBackend::LAYERS_D3D11 || |
1107 | | aLayersBackend == LayersBackend::LAYERS_WR) && |
1108 | | (moz2DBackend == gfx::BackendType::DIRECT2D || |
1109 | | moz2DBackend == gfx::BackendType::DIRECT2D1_1 || |
1110 | | (!!(aAllocFlags & ALLOC_FOR_OUT_OF_BAND_CONTENT) && |
1111 | | DeviceManagerDx::Get()->GetContentDevice())) && |
1112 | | aSize.width <= aMaxTextureSize && |
1113 | | aSize.height <= aMaxTextureSize && |
1114 | | !(aAllocFlags & ALLOC_UPDATE_FROM_SURFACE)) |
1115 | | { |
1116 | | data = DXGITextureData::Create(aSize, aFormat, aAllocFlags); |
1117 | | } |
1118 | | |
1119 | | if (aLayersBackend != LayersBackend::LAYERS_WR && |
1120 | | !data && aFormat == SurfaceFormat::B8G8R8X8 && |
1121 | | moz2DBackend == gfx::BackendType::CAIRO && |
1122 | | NS_IsMainThread()) { |
1123 | | data = DIBTextureData::Create(aSize, aFormat, aAllocator); |
1124 | | } |
1125 | | #endif |
1126 | |
|
1127 | 0 | #ifdef MOZ_X11 |
1128 | 0 | gfxSurfaceType type = |
1129 | 0 | gfxPlatform::GetPlatform()->ScreenReferenceSurface()->GetType(); |
1130 | 0 |
|
1131 | 0 | if (!data && aLayersBackend == LayersBackend::LAYERS_BASIC && |
1132 | 0 | moz2DBackend == gfx::BackendType::CAIRO && |
1133 | 0 | type == gfxSurfaceType::Xlib) |
1134 | 0 | { |
1135 | 0 | data = X11TextureData::Create(aSize, aFormat, aTextureFlags, aAllocator); |
1136 | 0 | } |
1137 | 0 | if (!data && aLayersBackend == LayersBackend::LAYERS_OPENGL && |
1138 | 0 | type == gfxSurfaceType::Xlib && |
1139 | 0 | aFormat != SurfaceFormat::A8 && |
1140 | 0 | gl::sGLXLibrary.UseTextureFromPixmap()) |
1141 | 0 | { |
1142 | 0 | data = X11TextureData::Create(aSize, aFormat, aTextureFlags, aAllocator); |
1143 | 0 | } |
1144 | 0 | #endif |
1145 | 0 |
|
1146 | | #ifdef XP_MACOSX |
1147 | | if (!data && gfxPrefs::UseIOSurfaceTextures()) { |
1148 | | data = MacIOSurfaceTextureData::Create(aSize, aFormat, moz2DBackend); |
1149 | | } |
1150 | | #endif |
1151 | |
|
1152 | | #ifdef MOZ_WIDGET_ANDROID |
1153 | | if (!data && gfxPrefs::UseSurfaceTextureTextures()) { |
1154 | | data = AndroidNativeWindowTextureData::Create(aSize, aFormat); |
1155 | | } |
1156 | | #endif |
1157 | |
|
1158 | 0 | if (data) { |
1159 | 0 | return MakeAndAddRef<TextureClient>(data, aTextureFlags, aAllocator); |
1160 | 0 | } |
1161 | 0 | |
1162 | 0 | // Can't do any better than a buffer texture client. |
1163 | 0 | return TextureClient::CreateForRawBufferAccess(aAllocator, aFormat, aSize, |
1164 | 0 | moz2DBackend, aLayersBackend, |
1165 | 0 | aTextureFlags, aAllocFlags); |
1166 | 0 | } |
1167 | | |
1168 | | // static |
1169 | | already_AddRefed<TextureClient> |
1170 | | TextureClient::CreateFromSurface(KnowsCompositor* aAllocator, |
1171 | | gfx::SourceSurface* aSurface, |
1172 | | BackendSelector aSelector, |
1173 | | TextureFlags aTextureFlags, |
1174 | | TextureAllocationFlags aAllocFlags) |
1175 | 0 | { |
1176 | 0 | // also test the validity of aAllocator |
1177 | 0 | if (!aAllocator || !aAllocator->GetTextureForwarder()->IPCOpen()) { |
1178 | 0 | return nullptr; |
1179 | 0 | } |
1180 | 0 | |
1181 | 0 | gfx::IntSize size = aSurface->GetSize(); |
1182 | 0 |
|
1183 | 0 | if (!gfx::Factory::AllowedSurfaceSize(size)) { |
1184 | 0 | return nullptr; |
1185 | 0 | } |
1186 | 0 | |
1187 | 0 | TextureData* data = nullptr; |
1188 | | #if defined(XP_WIN) |
1189 | | LayersBackend layersBackend = aAllocator->GetCompositorBackendType(); |
1190 | | gfx::BackendType moz2DBackend = BackendTypeForBackendSelector(layersBackend, aSelector); |
1191 | | |
1192 | | int32_t maxTextureSize = aAllocator->GetMaxTextureSize(); |
1193 | | |
1194 | | if ((layersBackend == LayersBackend::LAYERS_D3D11 || |
1195 | | layersBackend == LayersBackend::LAYERS_WR) && |
1196 | | (moz2DBackend == gfx::BackendType::DIRECT2D || |
1197 | | moz2DBackend == gfx::BackendType::DIRECT2D1_1 || |
1198 | | (!!(aAllocFlags & ALLOC_FOR_OUT_OF_BAND_CONTENT) && |
1199 | | DeviceManagerDx::Get()->GetContentDevice())) && |
1200 | | size.width <= maxTextureSize && |
1201 | | size.height <= maxTextureSize) |
1202 | | { |
1203 | | data = D3D11TextureData::Create(aSurface, aAllocFlags); |
1204 | | } |
1205 | | #endif |
1206 | |
|
1207 | 0 | if (data) { |
1208 | 0 | return MakeAndAddRef<TextureClient>(data, aTextureFlags, aAllocator->GetTextureForwarder()); |
1209 | 0 | } |
1210 | 0 | |
1211 | 0 | // Fall back to using UpdateFromSurface |
1212 | 0 | |
1213 | 0 | TextureAllocationFlags allocFlags = TextureAllocationFlags(aAllocFlags | ALLOC_UPDATE_FROM_SURFACE); |
1214 | 0 | RefPtr<TextureClient> client = CreateForDrawing(aAllocator, aSurface->GetFormat(), size, |
1215 | 0 | aSelector, aTextureFlags, allocFlags); |
1216 | 0 | if (!client) { |
1217 | 0 | return nullptr; |
1218 | 0 | } |
1219 | 0 | |
1220 | 0 | TextureClientAutoLock autoLock(client, OpenMode::OPEN_WRITE_ONLY); |
1221 | 0 | if (!autoLock.Succeeded()) { |
1222 | 0 | return nullptr; |
1223 | 0 | } |
1224 | 0 | |
1225 | 0 | client->UpdateFromSurface(aSurface); |
1226 | 0 | return client.forget(); |
1227 | 0 | } |
1228 | | |
1229 | | // static |
1230 | | already_AddRefed<TextureClient> |
1231 | | TextureClient::CreateForRawBufferAccess(KnowsCompositor* aAllocator, |
1232 | | gfx::SurfaceFormat aFormat, |
1233 | | gfx::IntSize aSize, |
1234 | | gfx::BackendType aMoz2DBackend, |
1235 | | TextureFlags aTextureFlags, |
1236 | | TextureAllocationFlags aAllocFlags) |
1237 | 0 | { |
1238 | 0 | // If we exceed the max texture size for the GPU, then just fall back to no |
1239 | 0 | // texture direct mapping. If it becomes a problem we can implement tiling |
1240 | 0 | // logic inside DirectMapTextureSource to allow this. |
1241 | 0 | bool supportsTextureDirectMapping = aAllocator->SupportsTextureDirectMapping() && |
1242 | 0 | std::max(aSize.width, aSize.height) <= aAllocator->GetMaxTextureSize(); |
1243 | 0 | if (supportsTextureDirectMapping) { |
1244 | 0 | aAllocFlags = TextureAllocationFlags(aAllocFlags | ALLOC_ALLOW_DIRECT_MAPPING); |
1245 | 0 | } else { |
1246 | 0 | aAllocFlags = TextureAllocationFlags(aAllocFlags & ~ALLOC_ALLOW_DIRECT_MAPPING); |
1247 | 0 | } |
1248 | 0 | return CreateForRawBufferAccess(aAllocator->GetTextureForwarder(), |
1249 | 0 | aFormat, aSize, aMoz2DBackend, |
1250 | 0 | aAllocator->GetCompositorBackendType(), |
1251 | 0 | aTextureFlags, aAllocFlags); |
1252 | 0 | } |
1253 | | |
1254 | | // static |
1255 | | already_AddRefed<TextureClient> |
1256 | | TextureClient::CreateForRawBufferAccess(LayersIPCChannel* aAllocator, |
1257 | | gfx::SurfaceFormat aFormat, |
1258 | | gfx::IntSize aSize, |
1259 | | gfx::BackendType aMoz2DBackend, |
1260 | | LayersBackend aLayersBackend, |
1261 | | TextureFlags aTextureFlags, |
1262 | | TextureAllocationFlags aAllocFlags) |
1263 | 0 | { |
1264 | 0 | // also test the validity of aAllocator |
1265 | 0 | if (!aAllocator || !aAllocator->IPCOpen()) { |
1266 | 0 | return nullptr; |
1267 | 0 | } |
1268 | 0 | |
1269 | 0 | if (aAllocFlags & ALLOC_DISALLOW_BUFFERTEXTURECLIENT) { |
1270 | 0 | return nullptr; |
1271 | 0 | } |
1272 | 0 | |
1273 | 0 | if (!gfx::Factory::AllowedSurfaceSize(aSize)) { |
1274 | 0 | return nullptr; |
1275 | 0 | } |
1276 | 0 | |
1277 | 0 | if (aFormat == SurfaceFormat::B8G8R8X8) { |
1278 | 0 | // Skia doesn't support RGBX, so ensure we clear the buffer for the proper alpha values. |
1279 | 0 | aAllocFlags = TextureAllocationFlags(aAllocFlags | ALLOC_CLEAR_BUFFER); |
1280 | 0 | } |
1281 | 0 |
|
1282 | 0 | // Note that we ignore the backend type if we get here. It should only be D2D |
1283 | 0 | // or Skia, and D2D does not support data surfaces. Therefore it is safe to |
1284 | 0 | // force the buffer to be Skia. |
1285 | 0 | NS_WARNING_ASSERTION(aMoz2DBackend == gfx::BackendType::SKIA || |
1286 | 0 | aMoz2DBackend == gfx::BackendType::DIRECT2D || |
1287 | 0 | aMoz2DBackend == gfx::BackendType::DIRECT2D1_1, |
1288 | 0 | "Unsupported TextureClient backend type"); |
1289 | 0 |
|
1290 | 0 | TextureData* texData = BufferTextureData::Create(aSize, aFormat, gfx::BackendType::SKIA, |
1291 | 0 | aLayersBackend, aTextureFlags, |
1292 | 0 | aAllocFlags, aAllocator); |
1293 | 0 | if (!texData) { |
1294 | 0 | return nullptr; |
1295 | 0 | } |
1296 | 0 | |
1297 | 0 | return MakeAndAddRef<TextureClient>(texData, aTextureFlags, aAllocator); |
1298 | 0 | } |
1299 | | |
1300 | | // static |
1301 | | already_AddRefed<TextureClient> |
1302 | | TextureClient::CreateForYCbCr(KnowsCompositor* aAllocator, |
1303 | | gfx::IntSize aYSize, |
1304 | | uint32_t aYStride, |
1305 | | gfx::IntSize aCbCrSize, |
1306 | | uint32_t aCbCrStride, |
1307 | | StereoMode aStereoMode, |
1308 | | YUVColorSpace aYUVColorSpace, |
1309 | | uint32_t aBitDepth, |
1310 | | TextureFlags aTextureFlags) |
1311 | 0 | { |
1312 | 0 | if (!aAllocator || !aAllocator->GetLayersIPCActor()->IPCOpen()) { |
1313 | 0 | return nullptr; |
1314 | 0 | } |
1315 | 0 | |
1316 | 0 | if (!gfx::Factory::AllowedSurfaceSize(aYSize)) { |
1317 | 0 | return nullptr; |
1318 | 0 | } |
1319 | 0 | |
1320 | 0 | TextureData* data = |
1321 | 0 | BufferTextureData::CreateForYCbCr(aAllocator, |
1322 | 0 | aYSize, aYStride, |
1323 | 0 | aCbCrSize, aCbCrStride, |
1324 | 0 | aStereoMode, aYUVColorSpace, |
1325 | 0 | aBitDepth, aTextureFlags); |
1326 | 0 | if (!data) { |
1327 | 0 | return nullptr; |
1328 | 0 | } |
1329 | 0 | |
1330 | 0 | return MakeAndAddRef<TextureClient>(data, aTextureFlags, |
1331 | 0 | aAllocator->GetTextureForwarder()); |
1332 | 0 | } |
1333 | | |
1334 | | TextureClient::TextureClient(TextureData* aData, |
1335 | | TextureFlags aFlags, |
1336 | | LayersIPCChannel* aAllocator) |
1337 | | : AtomicRefCountedWithFinalize("TextureClient") |
1338 | | , mAllocator(aAllocator) |
1339 | | , mActor(nullptr) |
1340 | | , mData(aData) |
1341 | | , mFlags(aFlags) |
1342 | | , mOpenMode(OpenMode::OPEN_NONE) |
1343 | | #ifdef DEBUG |
1344 | | , mExpectedDtRefs(0) |
1345 | | #endif |
1346 | | , mIsLocked(false) |
1347 | | , mIsReadLocked(false) |
1348 | | , mUpdated(false) |
1349 | | , mAddedToCompositableClient(false) |
1350 | | , mWorkaroundAnnoyingSharedSurfaceLifetimeIssues(false) |
1351 | | , mWorkaroundAnnoyingSharedSurfaceOwnershipIssues(false) |
1352 | | , mFwdTransactionId(0) |
1353 | | , mSerial(++sSerialCounter) |
1354 | | #ifdef GFX_DEBUG_TRACK_CLIENTS_IN_POOL |
1355 | | , mPoolTracker(nullptr) |
1356 | | #endif |
1357 | 0 | { |
1358 | 0 | mData->FillInfo(mInfo); |
1359 | 0 | mFlags |= mData->GetTextureFlags(); |
1360 | 0 |
|
1361 | 0 | if (mFlags & TextureFlags::NON_BLOCKING_READ_LOCK) { |
1362 | 0 | MOZ_ASSERT(!(mFlags & TextureFlags::BLOCKING_READ_LOCK)); |
1363 | 0 | EnableReadLock(); |
1364 | 0 | } else if (mFlags & TextureFlags::BLOCKING_READ_LOCK) { |
1365 | 0 | MOZ_ASSERT(!(mFlags & TextureFlags::NON_BLOCKING_READ_LOCK)); |
1366 | 0 | EnableBlockingReadLock(); |
1367 | 0 | } |
1368 | 0 | } |
1369 | | |
1370 | | bool TextureClient::CopyToTextureClient(TextureClient* aTarget, |
1371 | | const gfx::IntRect* aRect, |
1372 | | const gfx::IntPoint* aPoint) |
1373 | 0 | { |
1374 | 0 | MOZ_ASSERT(IsLocked()); |
1375 | 0 | MOZ_ASSERT(aTarget->IsLocked()); |
1376 | 0 |
|
1377 | 0 | if (!aTarget->CanExposeDrawTarget() || !CanExposeDrawTarget()) { |
1378 | 0 | return false; |
1379 | 0 | } |
1380 | 0 | |
1381 | 0 | RefPtr<DrawTarget> destinationTarget = aTarget->BorrowDrawTarget(); |
1382 | 0 | if (!destinationTarget) { |
1383 | 0 | gfxWarning() << "TextureClient::CopyToTextureClient (dest) failed in BorrowDrawTarget"; |
1384 | 0 | return false; |
1385 | 0 | } |
1386 | 0 |
|
1387 | 0 | RefPtr<DrawTarget> sourceTarget = BorrowDrawTarget(); |
1388 | 0 | if (!sourceTarget) { |
1389 | 0 | gfxWarning() << "TextureClient::CopyToTextureClient (src) failed in BorrowDrawTarget"; |
1390 | 0 | return false; |
1391 | 0 | } |
1392 | 0 |
|
1393 | 0 | RefPtr<gfx::SourceSurface> source = sourceTarget->Snapshot(); |
1394 | 0 | destinationTarget->CopySurface(source, |
1395 | 0 | aRect ? *aRect : gfx::IntRect(gfx::IntPoint(0, 0), GetSize()), |
1396 | 0 | aPoint ? *aPoint : gfx::IntPoint(0, 0)); |
1397 | 0 | return true; |
1398 | 0 | } |
1399 | | |
1400 | | already_AddRefed<gfx::DataSourceSurface> |
1401 | | TextureClient::GetAsSurface() |
1402 | 0 | { |
1403 | 0 | if (!Lock(OpenMode::OPEN_READ)) { |
1404 | 0 | return nullptr; |
1405 | 0 | } |
1406 | 0 | RefPtr<gfx::DataSourceSurface> data; |
1407 | 0 | { // scope so that the DrawTarget is destroyed before Unlock() |
1408 | 0 | RefPtr<gfx::DrawTarget> dt = BorrowDrawTarget(); |
1409 | 0 | if (dt) { |
1410 | 0 | RefPtr<gfx::SourceSurface> surf = dt->Snapshot(); |
1411 | 0 | if (surf) { |
1412 | 0 | data = surf->GetDataSurface(); |
1413 | 0 | } |
1414 | 0 | } |
1415 | 0 | } |
1416 | 0 | Unlock(); |
1417 | 0 | return data.forget(); |
1418 | 0 | } |
1419 | | |
1420 | | void |
1421 | | TextureClient::PrintInfo(std::stringstream& aStream, const char* aPrefix) |
1422 | 0 | { |
1423 | 0 | aStream << aPrefix; |
1424 | 0 | aStream << nsPrintfCString("TextureClient (0x%p)", this).get(); |
1425 | 0 | AppendToString(aStream, GetSize(), " [size=", "]"); |
1426 | 0 | AppendToString(aStream, GetFormat(), " [format=", "]"); |
1427 | 0 | AppendToString(aStream, mFlags, " [flags=", "]"); |
1428 | 0 |
|
1429 | | #ifdef MOZ_DUMP_PAINTING |
1430 | | if (gfxPrefs::LayersDumpTexture()) { |
1431 | | nsAutoCString pfx(aPrefix); |
1432 | | pfx += " "; |
1433 | | |
1434 | | aStream << "\n" << pfx.get() << "Surface: "; |
1435 | | RefPtr<gfx::DataSourceSurface> dSurf = GetAsSurface(); |
1436 | | if (dSurf) { |
1437 | | aStream << gfxUtils::GetAsLZ4Base64Str(dSurf).get(); |
1438 | | } |
1439 | | } |
1440 | | #endif |
1441 | | } |
1442 | | |
1443 | | void |
1444 | | TextureClient::GPUVideoDesc(SurfaceDescriptorGPUVideo* const aOutDesc) |
1445 | 0 | { |
1446 | 0 | const auto handle = GetSerial(); |
1447 | 0 |
|
1448 | 0 | GPUVideoSubDescriptor subDesc = null_t(); |
1449 | 0 | MOZ_RELEASE_ASSERT(mData); |
1450 | 0 | mData->GetSubDescriptor(&subDesc); |
1451 | 0 |
|
1452 | 0 | *aOutDesc = SurfaceDescriptorGPUVideo(handle, std::move(subDesc)); |
1453 | 0 | } |
1454 | | |
1455 | | class MemoryTextureReadLock : public NonBlockingTextureReadLock { |
1456 | | public: |
1457 | | MemoryTextureReadLock(); |
1458 | | |
1459 | | ~MemoryTextureReadLock(); |
1460 | | |
1461 | | virtual bool ReadLock() override; |
1462 | | |
1463 | | virtual int32_t ReadUnlock() override; |
1464 | | |
1465 | | virtual int32_t GetReadCount() override; |
1466 | | |
1467 | 0 | virtual LockType GetType() override { return TYPE_NONBLOCKING_MEMORY; } |
1468 | | |
1469 | 0 | virtual bool IsValid() const override { return true; }; |
1470 | | |
1471 | | virtual bool Serialize(ReadLockDescriptor& aOutput, base::ProcessId aOther) override; |
1472 | | |
1473 | | Atomic<int32_t> mReadCount; |
1474 | | }; |
1475 | | |
1476 | | // The cross-prcess implementation of TextureReadLock. |
1477 | | // |
1478 | | // Since we don't use cross-process reference counting for the ReadLock objects, |
1479 | | // we use the lock's internal counter as a way to know when to deallocate the |
1480 | | // underlying shmem section: when the counter is equal to 1, it means that the |
1481 | | // lock is not "held" (the texture is writable), when the counter is equal to 0 |
1482 | | // it means that we can safely deallocate the shmem section without causing a race |
1483 | | // condition with the other process. |
1484 | | class ShmemTextureReadLock : public NonBlockingTextureReadLock { |
1485 | | public: |
1486 | | struct ShmReadLockInfo { |
1487 | | int32_t readCount; |
1488 | | }; |
1489 | | |
1490 | | explicit ShmemTextureReadLock(LayersIPCChannel* aAllocator); |
1491 | | |
1492 | | ~ShmemTextureReadLock(); |
1493 | | |
1494 | | virtual bool ReadLock() override; |
1495 | | |
1496 | | virtual int32_t ReadUnlock() override; |
1497 | | |
1498 | | virtual int32_t GetReadCount() override; |
1499 | | |
1500 | 0 | virtual bool IsValid() const override { return mAllocSuccess; }; |
1501 | | |
1502 | 0 | virtual LockType GetType() override { return TYPE_NONBLOCKING_SHMEM; } |
1503 | | |
1504 | | virtual bool Serialize(ReadLockDescriptor& aOutput, base::ProcessId aOther) override; |
1505 | | |
1506 | 0 | mozilla::layers::ShmemSection& GetShmemSection() { return mShmemSection; } |
1507 | | |
1508 | | explicit ShmemTextureReadLock(const mozilla::layers::ShmemSection& aShmemSection) |
1509 | | : mShmemSection(aShmemSection) |
1510 | | , mAllocSuccess(true) |
1511 | 0 | { |
1512 | 0 | MOZ_COUNT_CTOR(ShmemTextureReadLock); |
1513 | 0 | } |
1514 | | |
1515 | | ShmReadLockInfo* GetShmReadLockInfoPtr() |
1516 | 0 | { |
1517 | 0 | return reinterpret_cast<ShmReadLockInfo*> |
1518 | 0 | (mShmemSection.shmem().get<char>() + mShmemSection.offset()); |
1519 | 0 | } |
1520 | | |
1521 | | RefPtr<LayersIPCChannel> mClientAllocator; |
1522 | | mozilla::layers::ShmemSection mShmemSection; |
1523 | | bool mAllocSuccess; |
1524 | | }; |
1525 | | |
1526 | | class CrossProcessSemaphoreReadLock : public TextureReadLock |
1527 | | { |
1528 | | public: |
1529 | | CrossProcessSemaphoreReadLock() |
1530 | | : mSemaphore(CrossProcessSemaphore::Create("TextureReadLock", 1)) |
1531 | | , mShared(false) |
1532 | 0 | {} |
1533 | | explicit CrossProcessSemaphoreReadLock(CrossProcessSemaphoreHandle aHandle) |
1534 | | : mSemaphore(CrossProcessSemaphore::Create(aHandle)) |
1535 | | , mShared(false) |
1536 | 0 | {} |
1537 | | |
1538 | | virtual bool ReadLock() override |
1539 | 0 | { |
1540 | 0 | if (!IsValid()) { |
1541 | 0 | return false; |
1542 | 0 | } |
1543 | 0 | return mSemaphore->Wait(); |
1544 | 0 | } |
1545 | | virtual bool TryReadLock(TimeDuration aTimeout) override |
1546 | 0 | { |
1547 | 0 | if (!IsValid()) { |
1548 | 0 | return false; |
1549 | 0 | } |
1550 | 0 | return mSemaphore->Wait(Some(aTimeout)); |
1551 | 0 | } |
1552 | | virtual int32_t ReadUnlock() override |
1553 | 0 | { |
1554 | 0 | if (!IsValid()) { |
1555 | 0 | return 1; |
1556 | 0 | } |
1557 | 0 | mSemaphore->Signal(); |
1558 | 0 | return 1; |
1559 | 0 | } |
1560 | 0 | virtual bool IsValid() const override { return !!mSemaphore; } |
1561 | | |
1562 | | virtual bool Serialize(ReadLockDescriptor& aOutput, base::ProcessId aOther) override; |
1563 | | |
1564 | 0 | virtual LockType GetType() override { return TYPE_CROSS_PROCESS_SEMAPHORE; } |
1565 | | |
1566 | | UniquePtr<CrossProcessSemaphore> mSemaphore; |
1567 | | bool mShared; |
1568 | | }; |
1569 | | |
1570 | | // static |
1571 | | already_AddRefed<TextureReadLock> |
1572 | | TextureReadLock::Deserialize(const ReadLockDescriptor& aDescriptor, ISurfaceAllocator* aAllocator) |
1573 | 0 | { |
1574 | 0 | switch (aDescriptor.type()) { |
1575 | 0 | case ReadLockDescriptor::TShmemSection: { |
1576 | 0 | const ShmemSection& section = aDescriptor.get_ShmemSection(); |
1577 | 0 | MOZ_RELEASE_ASSERT(section.shmem().IsReadable()); |
1578 | 0 | return MakeAndAddRef<ShmemTextureReadLock>(section); |
1579 | 0 | } |
1580 | 0 | case ReadLockDescriptor::Tuintptr_t: { |
1581 | 0 | if (!aAllocator->IsSameProcess()) { |
1582 | 0 | // Trying to use a memory based lock instead of a shmem based one in |
1583 | 0 | // the cross-process case is a bad security violation. |
1584 | 0 | NS_ERROR("A client process may be trying to peek at the host's address space!"); |
1585 | 0 | return nullptr; |
1586 | 0 | } |
1587 | 0 | RefPtr<TextureReadLock> lock = reinterpret_cast<MemoryTextureReadLock*>( |
1588 | 0 | aDescriptor.get_uintptr_t() |
1589 | 0 | ); |
1590 | 0 |
|
1591 | 0 | MOZ_ASSERT(lock); |
1592 | 0 | if (lock) { |
1593 | 0 | // The corresponding AddRef is in MemoryTextureReadLock::Serialize |
1594 | 0 | lock.get()->Release(); |
1595 | 0 | } |
1596 | 0 |
|
1597 | 0 | return lock.forget(); |
1598 | 0 | } |
1599 | 0 | case ReadLockDescriptor::TCrossProcessSemaphoreDescriptor: { |
1600 | 0 | return MakeAndAddRef<CrossProcessSemaphoreReadLock>(aDescriptor.get_CrossProcessSemaphoreDescriptor().sem()); |
1601 | 0 | } |
1602 | 0 | case ReadLockDescriptor::Tnull_t: { |
1603 | 0 | return nullptr; |
1604 | 0 | } |
1605 | 0 | default: { |
1606 | 0 | // Invalid descriptor. |
1607 | 0 | MOZ_DIAGNOSTIC_ASSERT(false); |
1608 | 0 | } |
1609 | 0 | } |
1610 | 0 | return nullptr; |
1611 | 0 | } |
1612 | | // static |
1613 | | already_AddRefed<TextureReadLock> |
1614 | | NonBlockingTextureReadLock::Create(LayersIPCChannel* aAllocator) |
1615 | 0 | { |
1616 | 0 | if (aAllocator->IsSameProcess()) { |
1617 | 0 | // If our compositor is in the same process, we can save some cycles by not |
1618 | 0 | // using shared memory. |
1619 | 0 | return MakeAndAddRef<MemoryTextureReadLock>(); |
1620 | 0 | } |
1621 | 0 | |
1622 | 0 | return MakeAndAddRef<ShmemTextureReadLock>(aAllocator); |
1623 | 0 | } |
1624 | | |
1625 | | MemoryTextureReadLock::MemoryTextureReadLock() |
1626 | | : mReadCount(1) |
1627 | 0 | { |
1628 | 0 | MOZ_COUNT_CTOR(MemoryTextureReadLock); |
1629 | 0 | } |
1630 | | |
1631 | | MemoryTextureReadLock::~MemoryTextureReadLock() |
1632 | 0 | { |
1633 | 0 | // One read count that is added in constructor. |
1634 | 0 | MOZ_ASSERT(mReadCount == 1); |
1635 | 0 | MOZ_COUNT_DTOR(MemoryTextureReadLock); |
1636 | 0 | } |
1637 | | |
1638 | | bool |
1639 | | MemoryTextureReadLock::Serialize(ReadLockDescriptor& aOutput, base::ProcessId aOther) |
1640 | 0 | { |
1641 | 0 | // AddRef here and Release when receiving on the host side to make sure the |
1642 | 0 | // reference count doesn't go to zero before the host receives the message. |
1643 | 0 | // see TextureReadLock::Deserialize |
1644 | 0 | this->AddRef(); |
1645 | 0 | aOutput = ReadLockDescriptor(uintptr_t(this)); |
1646 | 0 | return true; |
1647 | 0 | } |
1648 | | |
1649 | | bool |
1650 | | MemoryTextureReadLock::ReadLock() |
1651 | 0 | { |
1652 | 0 | NS_ASSERT_OWNINGTHREAD(MemoryTextureReadLock); |
1653 | 0 |
|
1654 | 0 | ++mReadCount; |
1655 | 0 | return true; |
1656 | 0 | } |
1657 | | |
1658 | | int32_t |
1659 | | MemoryTextureReadLock::ReadUnlock() |
1660 | 0 | { |
1661 | 0 | int32_t readCount = --mReadCount; |
1662 | 0 | MOZ_ASSERT(readCount >= 0); |
1663 | 0 |
|
1664 | 0 | return readCount; |
1665 | 0 | } |
1666 | | |
1667 | | int32_t |
1668 | | MemoryTextureReadLock::GetReadCount() |
1669 | 0 | { |
1670 | 0 | NS_ASSERT_OWNINGTHREAD(MemoryTextureReadLock); |
1671 | 0 | return mReadCount; |
1672 | 0 | } |
1673 | | |
1674 | | ShmemTextureReadLock::ShmemTextureReadLock(LayersIPCChannel* aAllocator) |
1675 | | : mClientAllocator(aAllocator) |
1676 | | , mAllocSuccess(false) |
1677 | 0 | { |
1678 | 0 | MOZ_COUNT_CTOR(ShmemTextureReadLock); |
1679 | 0 | MOZ_ASSERT(mClientAllocator); |
1680 | 0 | MOZ_ASSERT(mClientAllocator->GetTileLockAllocator()); |
1681 | 0 | #define MOZ_ALIGN_WORD(x) (((x) + 3) & ~3) |
1682 | 0 | if (mClientAllocator->GetTileLockAllocator()->AllocShmemSection( |
1683 | 0 | MOZ_ALIGN_WORD(sizeof(ShmReadLockInfo)), &mShmemSection)) { |
1684 | 0 | ShmReadLockInfo* info = GetShmReadLockInfoPtr(); |
1685 | 0 | info->readCount = 1; |
1686 | 0 | mAllocSuccess = true; |
1687 | 0 | } |
1688 | 0 | } |
1689 | | |
1690 | | ShmemTextureReadLock::~ShmemTextureReadLock() |
1691 | 0 | { |
1692 | 0 | if (mClientAllocator) { |
1693 | 0 | // Release one read count that is added in constructor. |
1694 | 0 | // The count is kept for calling GetReadCount() by TextureClientPool. |
1695 | 0 | ReadUnlock(); |
1696 | 0 | } |
1697 | 0 | MOZ_COUNT_DTOR(ShmemTextureReadLock); |
1698 | 0 | } |
1699 | | |
1700 | | bool |
1701 | | ShmemTextureReadLock::Serialize(ReadLockDescriptor& aOutput, base::ProcessId aOther) |
1702 | 0 | { |
1703 | 0 | aOutput = ReadLockDescriptor(GetShmemSection()); |
1704 | 0 | return true; |
1705 | 0 | } |
1706 | | |
1707 | | bool |
1708 | 0 | ShmemTextureReadLock::ReadLock() { |
1709 | 0 | NS_ASSERT_OWNINGTHREAD(ShmemTextureReadLock); |
1710 | 0 | if (!mAllocSuccess) { |
1711 | 0 | return false; |
1712 | 0 | } |
1713 | 0 | ShmReadLockInfo* info = GetShmReadLockInfoPtr(); |
1714 | 0 | PR_ATOMIC_INCREMENT(&info->readCount); |
1715 | 0 | return true; |
1716 | 0 | } |
1717 | | |
1718 | | int32_t |
1719 | 0 | ShmemTextureReadLock::ReadUnlock() { |
1720 | 0 | if (!mAllocSuccess) { |
1721 | 0 | return 0; |
1722 | 0 | } |
1723 | 0 | ShmReadLockInfo* info = GetShmReadLockInfoPtr(); |
1724 | 0 | int32_t readCount = PR_ATOMIC_DECREMENT(&info->readCount); |
1725 | 0 | MOZ_ASSERT(readCount >= 0); |
1726 | 0 | if (readCount <= 0) { |
1727 | 0 | if (mClientAllocator && mClientAllocator->GetTileLockAllocator()) { |
1728 | 0 | mClientAllocator->GetTileLockAllocator()->DeallocShmemSection(mShmemSection); |
1729 | 0 | } else { |
1730 | 0 | // we are on the compositor process, or IPC is down. |
1731 | 0 | FixedSizeSmallShmemSectionAllocator::FreeShmemSection(mShmemSection); |
1732 | 0 | } |
1733 | 0 | } |
1734 | 0 | return readCount; |
1735 | 0 | } |
1736 | | |
1737 | | int32_t |
1738 | 0 | ShmemTextureReadLock::GetReadCount() { |
1739 | 0 | NS_ASSERT_OWNINGTHREAD(ShmemTextureReadLock); |
1740 | 0 | if (!mAllocSuccess) { |
1741 | 0 | return 0; |
1742 | 0 | } |
1743 | 0 | ShmReadLockInfo* info = GetShmReadLockInfoPtr(); |
1744 | 0 | return info->readCount; |
1745 | 0 | } |
1746 | | |
1747 | | bool |
1748 | | CrossProcessSemaphoreReadLock::Serialize(ReadLockDescriptor& aOutput, base::ProcessId aOther) |
1749 | 0 | { |
1750 | 0 | if (!mShared && IsValid()) { |
1751 | 0 | aOutput = ReadLockDescriptor(CrossProcessSemaphoreDescriptor(mSemaphore->ShareToProcess(aOther))); |
1752 | 0 | mSemaphore->CloseHandle(); |
1753 | 0 | mShared = true; |
1754 | 0 | return true; |
1755 | 0 | } else { |
1756 | 0 | return mShared; |
1757 | 0 | } |
1758 | 0 | } |
1759 | | |
1760 | | void |
1761 | | TextureClient::EnableBlockingReadLock() |
1762 | 0 | { |
1763 | 0 | if (!mReadLock) { |
1764 | 0 | mReadLock = new CrossProcessSemaphoreReadLock(); |
1765 | 0 | } |
1766 | 0 | } |
1767 | | |
1768 | | void |
1769 | | TextureClient::AddPaintThreadRef() |
1770 | 0 | { |
1771 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
1772 | 0 | mPaintThreadRefs += 1; |
1773 | 0 | } |
1774 | | |
1775 | | void |
1776 | | TextureClient::DropPaintThreadRef() |
1777 | 0 | { |
1778 | 0 | MOZ_RELEASE_ASSERT(PaintThread::Get()->IsOnPaintWorkerThread()); |
1779 | 0 | MOZ_RELEASE_ASSERT(mPaintThreadRefs >= 1); |
1780 | 0 | mPaintThreadRefs -= 1; |
1781 | 0 | } |
1782 | | |
1783 | | bool |
1784 | | UpdateYCbCrTextureClient(TextureClient* aTexture, const PlanarYCbCrData& aData) |
1785 | 0 | { |
1786 | 0 | MOZ_ASSERT(aTexture); |
1787 | 0 | MOZ_ASSERT(aTexture->IsLocked()); |
1788 | 0 | MOZ_ASSERT(aTexture->GetFormat() == gfx::SurfaceFormat::YUV, "This textureClient can only use YCbCr data"); |
1789 | 0 | MOZ_ASSERT(!aTexture->IsImmutable()); |
1790 | 0 | MOZ_ASSERT(aTexture->IsValid()); |
1791 | 0 | MOZ_ASSERT(aData.mCbSkip == aData.mCrSkip); |
1792 | 0 |
|
1793 | 0 | MappedYCbCrTextureData mapped; |
1794 | 0 | if (!aTexture->BorrowMappedYCbCrData(mapped)) { |
1795 | 0 | NS_WARNING("Failed to extract YCbCr info!"); |
1796 | 0 | return false; |
1797 | 0 | } |
1798 | 0 |
|
1799 | 0 | MappedYCbCrTextureData srcData; |
1800 | 0 | srcData.y.data = aData.mYChannel; |
1801 | 0 | srcData.y.size = aData.mYSize; |
1802 | 0 | srcData.y.stride = aData.mYStride; |
1803 | 0 | srcData.y.skip = aData.mYSkip; |
1804 | 0 | MOZ_ASSERT(aData.mBitDepth == 8 || (aData.mBitDepth > 8 && aData.mBitDepth <= 16)); |
1805 | 0 | srcData.y.bytesPerPixel = (aData.mBitDepth > 8) ? 2 : 1; |
1806 | 0 | srcData.cb.data = aData.mCbChannel; |
1807 | 0 | srcData.cb.size = aData.mCbCrSize; |
1808 | 0 | srcData.cb.stride = aData.mCbCrStride; |
1809 | 0 | srcData.cb.skip = aData.mCbSkip; |
1810 | 0 | srcData.cb.bytesPerPixel = (aData.mBitDepth > 8) ? 2 : 1; |
1811 | 0 | srcData.cr.data = aData.mCrChannel; |
1812 | 0 | srcData.cr.size = aData.mCbCrSize; |
1813 | 0 | srcData.cr.stride = aData.mCbCrStride; |
1814 | 0 | srcData.cr.skip = aData.mCrSkip; |
1815 | 0 | srcData.cr.bytesPerPixel = (aData.mBitDepth > 8) ? 2 : 1; |
1816 | 0 | srcData.metadata = nullptr; |
1817 | 0 |
|
1818 | 0 | if (!srcData.CopyInto(mapped)) { |
1819 | 0 | NS_WARNING("Failed to copy image data!"); |
1820 | 0 | return false; |
1821 | 0 | } |
1822 | 0 |
|
1823 | 0 | if (TextureRequiresLocking(aTexture->GetFlags())) { |
1824 | 0 | // We don't have support for proper locking yet, so we'll |
1825 | 0 | // have to be immutable instead. |
1826 | 0 | aTexture->MarkImmutable(); |
1827 | 0 | } |
1828 | 0 | return true; |
1829 | 0 | } |
1830 | | |
1831 | | already_AddRefed<TextureClient> |
1832 | | TextureClient::CreateWithData(TextureData* aData, TextureFlags aFlags, LayersIPCChannel* aAllocator) |
1833 | 0 | { |
1834 | 0 | if (!aData) { |
1835 | 0 | return nullptr; |
1836 | 0 | } |
1837 | 0 | return MakeAndAddRef<TextureClient>(aData, aFlags, aAllocator); |
1838 | 0 | } |
1839 | | |
1840 | | template<class PixelDataType> |
1841 | | static void |
1842 | | copyData(PixelDataType* aDst, |
1843 | | const MappedYCbCrChannelData& aChannelDst, |
1844 | | PixelDataType* aSrc, |
1845 | | const MappedYCbCrChannelData& aChannelSrc) |
1846 | 0 | { |
1847 | 0 | uint8_t* srcByte = reinterpret_cast<uint8_t*>(aSrc); |
1848 | 0 | const int32_t srcSkip = aChannelSrc.skip + 1; |
1849 | 0 | uint8_t* dstByte = reinterpret_cast<uint8_t*>(aDst); |
1850 | 0 | const int32_t dstSkip = aChannelDst.skip + 1; |
1851 | 0 | for (int32_t i = 0; i < aChannelSrc.size.height; ++i) { |
1852 | 0 | for (int32_t j = 0; j < aChannelSrc.size.width; ++j) { |
1853 | 0 | *aDst = *aSrc; |
1854 | 0 | aSrc += srcSkip; |
1855 | 0 | aDst += dstSkip; |
1856 | 0 | } |
1857 | 0 | srcByte += aChannelSrc.stride; |
1858 | 0 | aSrc = reinterpret_cast<PixelDataType*>(srcByte); |
1859 | 0 | dstByte += aChannelDst.stride; |
1860 | 0 | aDst = reinterpret_cast<PixelDataType*>(dstByte); |
1861 | 0 | } |
1862 | 0 | } Unexecuted instantiation: Unified_cpp_gfx_layers5.cpp:void mozilla::layers::copyData<unsigned char>(unsigned char*, mozilla::layers::MappedYCbCrChannelData const&, unsigned char*, mozilla::layers::MappedYCbCrChannelData const&) Unexecuted instantiation: Unified_cpp_gfx_layers5.cpp:void mozilla::layers::copyData<unsigned short>(unsigned short*, mozilla::layers::MappedYCbCrChannelData const&, unsigned short*, mozilla::layers::MappedYCbCrChannelData const&) |
1863 | | |
1864 | | bool |
1865 | | MappedYCbCrChannelData::CopyInto(MappedYCbCrChannelData& aDst) |
1866 | 0 | { |
1867 | 0 | if (!data || !aDst.data || size != aDst.size) { |
1868 | 0 | return false; |
1869 | 0 | } |
1870 | 0 | |
1871 | 0 | if (stride == aDst.stride && skip == aDst.skip) { |
1872 | 0 | // fast path! |
1873 | 0 | // We assume that the padding in the destination is there for alignment |
1874 | 0 | // purposes and doesn't contain useful data. |
1875 | 0 | memcpy(aDst.data, data, stride * size.height); |
1876 | 0 | return true; |
1877 | 0 | } |
1878 | 0 | |
1879 | 0 | if (aDst.skip == 0 && skip == 0) { |
1880 | 0 | // fast-ish path |
1881 | 0 | for (int32_t i = 0; i < size.height; ++i) { |
1882 | 0 | memcpy(aDst.data + i * aDst.stride, |
1883 | 0 | data + i * stride, |
1884 | 0 | size.width * bytesPerPixel); |
1885 | 0 | } |
1886 | 0 | return true; |
1887 | 0 | } |
1888 | 0 |
|
1889 | 0 | MOZ_ASSERT(bytesPerPixel == 1 || bytesPerPixel == 2); |
1890 | 0 | // slow path |
1891 | 0 | if (bytesPerPixel == 1) { |
1892 | 0 | copyData(aDst.data, aDst, data, *this); |
1893 | 0 | } else if (bytesPerPixel == 2) { |
1894 | 0 | if (skip != 0) { |
1895 | 0 | // The skip value definition doesn't specify if it's in bytes, or in |
1896 | 0 | // "pixels". We will assume the later. There are currently no decoders |
1897 | 0 | // returning HDR content with a skip value different than zero anyway. |
1898 | 0 | NS_WARNING("skip value non zero for HDR content, please verify code " |
1899 | 0 | "(see bug 1421187)"); |
1900 | 0 | } |
1901 | 0 | copyData(reinterpret_cast<uint16_t*>(aDst.data), |
1902 | 0 | aDst, |
1903 | 0 | reinterpret_cast<uint16_t*>(data), |
1904 | 0 | *this); |
1905 | 0 | } |
1906 | 0 | return true; |
1907 | 0 | } |
1908 | | |
1909 | | } // namespace layers |
1910 | | } // namespace mozilla |