/src/mozilla-central/gfx/layers/wr/IpcResourceUpdateQueue.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "IpcResourceUpdateQueue.h" |
8 | | #include <string.h> |
9 | | #include <algorithm> |
10 | | #include "mozilla/Maybe.h" |
11 | | #include "mozilla/ipc/SharedMemory.h" |
12 | | #include "mozilla/layers/PTextureChild.h" |
13 | | #include "mozilla/layers/WebRenderBridgeChild.h" |
14 | | |
15 | | namespace mozilla { |
16 | | namespace wr { |
17 | | |
18 | | using namespace mozilla::layers; |
19 | | |
20 | | ShmSegmentsWriter::ShmSegmentsWriter(layers::WebRenderBridgeChild* aAllocator, size_t aChunkSize) |
21 | | : mShmAllocator(aAllocator) |
22 | | , mCursor(0) |
23 | | , mChunkSize(aChunkSize) |
24 | 0 | { |
25 | 0 | MOZ_ASSERT(mShmAllocator); |
26 | 0 | } |
27 | | |
28 | | ShmSegmentsWriter::~ShmSegmentsWriter() |
29 | 0 | { |
30 | 0 | Clear(); |
31 | 0 | } |
32 | | |
33 | | layers::OffsetRange |
34 | | ShmSegmentsWriter::Write(Range<uint8_t> aBytes) |
35 | 0 | { |
36 | 0 | const size_t start = mCursor; |
37 | 0 | const size_t length = aBytes.length(); |
38 | 0 |
|
39 | 0 | if (length >= mChunkSize * 4) { |
40 | 0 | auto range = AllocLargeChunk(length); |
41 | 0 | if (range.length()) { |
42 | 0 | // Allocation was successful |
43 | 0 | uint8_t* dstPtr = mLargeAllocs.LastElement().get<uint8_t>(); |
44 | 0 | memcpy(dstPtr, aBytes.begin().get(), length); |
45 | 0 | } |
46 | 0 | return range; |
47 | 0 | } |
48 | 0 |
|
49 | 0 | int remainingBytesToCopy = length; |
50 | 0 |
|
51 | 0 | size_t srcCursor = 0; |
52 | 0 | size_t dstCursor = mCursor; |
53 | 0 | size_t currAllocLen = mSmallAllocs.Length(); |
54 | 0 |
|
55 | 0 | while (remainingBytesToCopy > 0) { |
56 | 0 | if (dstCursor >= mSmallAllocs.Length() * mChunkSize) { |
57 | 0 | if (!AllocChunk()) { |
58 | 0 | // Allocation failed, so roll back to the state at the start of this |
59 | 0 | // Write() call and abort. |
60 | 0 | for (size_t i = mSmallAllocs.Length() ; currAllocLen < i ; i--) { |
61 | 0 | MOZ_ASSERT(i > 0); |
62 | 0 | RefCountedShmem& shm = mSmallAllocs.ElementAt(i - 1); |
63 | 0 | RefCountedShm::Dealloc(mShmAllocator, shm); |
64 | 0 | mSmallAllocs.RemoveElementAt(i - 1); |
65 | 0 | } |
66 | 0 | MOZ_ASSERT(mSmallAllocs.Length() == currAllocLen); |
67 | 0 | return layers::OffsetRange(0, start, 0); |
68 | 0 | } |
69 | 0 | // Allocation succeeded, so dstCursor should now be pointing to |
70 | 0 | // something inside the allocation buffer |
71 | 0 | MOZ_ASSERT(dstCursor < (mSmallAllocs.Length() * mChunkSize)); |
72 | 0 | } |
73 | 0 |
|
74 | 0 | const size_t dstMaxOffset = mChunkSize * mSmallAllocs.Length(); |
75 | 0 | const size_t dstBaseOffset = mChunkSize * (mSmallAllocs.Length() - 1); |
76 | 0 |
|
77 | 0 | MOZ_ASSERT(dstCursor >= dstBaseOffset); |
78 | 0 | MOZ_ASSERT(dstCursor <= dstMaxOffset); |
79 | 0 |
|
80 | 0 | size_t availableRange = dstMaxOffset - dstCursor; |
81 | 0 | size_t copyRange = std::min<int>(availableRange, remainingBytesToCopy); |
82 | 0 |
|
83 | 0 | uint8_t* srcPtr = &aBytes[srcCursor]; |
84 | 0 | uint8_t* dstPtr = RefCountedShm::GetBytes(mSmallAllocs.LastElement()) + (dstCursor - dstBaseOffset); |
85 | 0 |
|
86 | 0 | memcpy(dstPtr, srcPtr, copyRange); |
87 | 0 |
|
88 | 0 | srcCursor += copyRange; |
89 | 0 | dstCursor += copyRange; |
90 | 0 | remainingBytesToCopy -= copyRange; |
91 | 0 |
|
92 | 0 | // sanity check |
93 | 0 | MOZ_ASSERT(remainingBytesToCopy >= 0); |
94 | 0 | } |
95 | 0 |
|
96 | 0 | mCursor += length; |
97 | 0 |
|
98 | 0 | return layers::OffsetRange(0, start, length); |
99 | 0 | } |
100 | | |
101 | | bool |
102 | | ShmSegmentsWriter::AllocChunk() |
103 | 0 | { |
104 | 0 | RefCountedShmem shm; |
105 | 0 | if (!mShmAllocator->AllocResourceShmem(mChunkSize, shm)) { |
106 | 0 | gfxCriticalNote << "ShmSegmentsWriter failed to allocate chunk #" << mSmallAllocs.Length(); |
107 | 0 | MOZ_ASSERT(false, "ShmSegmentsWriter fails to allocate chunk"); |
108 | 0 | return false; |
109 | 0 | } |
110 | 0 | RefCountedShm::AddRef(shm); |
111 | 0 | mSmallAllocs.AppendElement(shm); |
112 | 0 | return true; |
113 | 0 | } |
114 | | |
115 | | layers::OffsetRange |
116 | | ShmSegmentsWriter::AllocLargeChunk(size_t aSize) |
117 | 0 | { |
118 | 0 | ipc::Shmem shm; |
119 | 0 | auto shmType = ipc::SharedMemory::SharedMemoryType::TYPE_BASIC; |
120 | 0 | if (!mShmAllocator->AllocShmem(aSize, shmType, &shm)) { |
121 | 0 | gfxCriticalNote << "ShmSegmentsWriter failed to allocate large chunk of size " << aSize; |
122 | 0 | MOZ_ASSERT(false, "ShmSegmentsWriter fails to allocate large chunk"); |
123 | 0 | return layers::OffsetRange(0, 0, 0); |
124 | 0 | } |
125 | 0 | mLargeAllocs.AppendElement(shm); |
126 | 0 |
|
127 | 0 | return layers::OffsetRange(mLargeAllocs.Length(), 0, aSize); |
128 | 0 | } |
129 | | |
130 | | void |
131 | | ShmSegmentsWriter::Flush(nsTArray<RefCountedShmem>& aSmallAllocs, nsTArray<ipc::Shmem>& aLargeAllocs) |
132 | 0 | { |
133 | 0 | MOZ_ASSERT(aSmallAllocs.IsEmpty()); |
134 | 0 | MOZ_ASSERT(aLargeAllocs.IsEmpty()); |
135 | 0 | mSmallAllocs.SwapElements(aSmallAllocs); |
136 | 0 | mLargeAllocs.SwapElements(aLargeAllocs); |
137 | 0 | mCursor = 0; |
138 | 0 | } |
139 | | |
140 | | bool |
141 | | ShmSegmentsWriter::IsEmpty() const |
142 | 0 | { |
143 | 0 | return mCursor == 0; |
144 | 0 | } |
145 | | |
146 | | void |
147 | | ShmSegmentsWriter::Clear() |
148 | 0 | { |
149 | 0 | if (mShmAllocator) { |
150 | 0 | IpcResourceUpdateQueue::ReleaseShmems(mShmAllocator, mSmallAllocs); |
151 | 0 | IpcResourceUpdateQueue::ReleaseShmems(mShmAllocator, mLargeAllocs); |
152 | 0 | } |
153 | 0 | mCursor = 0; |
154 | 0 | } |
155 | | |
156 | | ShmSegmentsReader::ShmSegmentsReader(const nsTArray<RefCountedShmem>& aSmallShmems, |
157 | | const nsTArray<ipc::Shmem>& aLargeShmems) |
158 | | : mSmallAllocs(aSmallShmems) |
159 | | , mLargeAllocs(aLargeShmems) |
160 | | , mChunkSize(0) |
161 | 0 | { |
162 | 0 | if (mSmallAllocs.IsEmpty()) { |
163 | 0 | return; |
164 | 0 | } |
165 | 0 | |
166 | 0 | mChunkSize = RefCountedShm::GetSize(mSmallAllocs[0]); |
167 | 0 |
|
168 | 0 | // Check that all shmems are readable and have the same size. If anything |
169 | 0 | // isn't right, set mChunkSize to zero which signifies that the reader is |
170 | 0 | // in an invalid state and Read calls will return false; |
171 | 0 | for (const auto& shm : mSmallAllocs) { |
172 | 0 | if (!RefCountedShm::IsValid(shm) |
173 | 0 | || RefCountedShm::GetSize(shm) != mChunkSize |
174 | 0 | || RefCountedShm::GetBytes(shm) == nullptr) { |
175 | 0 | mChunkSize = 0; |
176 | 0 | return; |
177 | 0 | } |
178 | 0 | } |
179 | 0 |
|
180 | 0 | for (const auto& shm : mLargeAllocs) { |
181 | 0 | if (!shm.IsReadable() |
182 | 0 | || shm.get<uint8_t>() == nullptr) { |
183 | 0 | mChunkSize = 0; |
184 | 0 | return; |
185 | 0 | } |
186 | 0 | } |
187 | 0 | } |
188 | | |
189 | | bool |
190 | | ShmSegmentsReader::ReadLarge(const layers::OffsetRange& aRange, wr::Vec<uint8_t>& aInto) |
191 | 0 | { |
192 | 0 | // source = zero is for small allocs. |
193 | 0 | MOZ_RELEASE_ASSERT(aRange.source() != 0); |
194 | 0 | if (aRange.source() > mLargeAllocs.Length()) { |
195 | 0 | return false; |
196 | 0 | } |
197 | 0 | size_t id = aRange.source() - 1; |
198 | 0 | const ipc::Shmem& shm = mLargeAllocs[id]; |
199 | 0 | if (shm.Size<uint8_t>() < aRange.length()) { |
200 | 0 | return false; |
201 | 0 | } |
202 | 0 | |
203 | 0 | uint8_t* srcPtr = shm.get<uint8_t>(); |
204 | 0 | aInto.PushBytes(Range<uint8_t>(srcPtr, aRange.length())); |
205 | 0 |
|
206 | 0 | return true; |
207 | 0 | } |
208 | | |
209 | | bool |
210 | | ShmSegmentsReader::Read(const layers::OffsetRange& aRange, wr::Vec<uint8_t>& aInto) |
211 | 0 | { |
212 | 0 | if (aRange.length() == 0) { |
213 | 0 | return true; |
214 | 0 | } |
215 | 0 | |
216 | 0 | if (aRange.source() != 0) { |
217 | 0 | return ReadLarge(aRange, aInto); |
218 | 0 | } |
219 | 0 | |
220 | 0 | if (mChunkSize == 0) { |
221 | 0 | return false; |
222 | 0 | } |
223 | 0 | |
224 | 0 | if (aRange.start() + aRange.length() > mChunkSize * mSmallAllocs.Length()) { |
225 | 0 | return false; |
226 | 0 | } |
227 | 0 | |
228 | 0 | size_t initialLength = aInto.Length(); |
229 | 0 |
|
230 | 0 | size_t srcCursor = aRange.start(); |
231 | 0 | int remainingBytesToCopy = aRange.length(); |
232 | 0 | while (remainingBytesToCopy > 0) { |
233 | 0 | const size_t shm_idx = srcCursor / mChunkSize; |
234 | 0 | const size_t ptrOffset = srcCursor % mChunkSize; |
235 | 0 | const size_t copyRange = std::min<int>(remainingBytesToCopy, mChunkSize - ptrOffset); |
236 | 0 | uint8_t* srcPtr = RefCountedShm::GetBytes(mSmallAllocs[shm_idx]) + ptrOffset; |
237 | 0 |
|
238 | 0 | aInto.PushBytes(Range<uint8_t>(srcPtr, copyRange)); |
239 | 0 |
|
240 | 0 | srcCursor += copyRange; |
241 | 0 | remainingBytesToCopy -= copyRange; |
242 | 0 | } |
243 | 0 |
|
244 | 0 | return aInto.Length() - initialLength == aRange.length(); |
245 | 0 | } |
246 | | |
247 | | IpcResourceUpdateQueue::IpcResourceUpdateQueue(layers::WebRenderBridgeChild* aAllocator, |
248 | | size_t aChunkSize) |
249 | | : mWriter(std::move(aAllocator), aChunkSize) |
250 | 0 | {} |
251 | | |
252 | | bool |
253 | | IpcResourceUpdateQueue::AddImage(ImageKey key, const ImageDescriptor& aDescriptor, |
254 | | Range<uint8_t> aBytes) |
255 | 0 | { |
256 | 0 | auto bytes = mWriter.Write(aBytes); |
257 | 0 | if (!bytes.length()) { |
258 | 0 | return false; |
259 | 0 | } |
260 | 0 | mUpdates.AppendElement(layers::OpAddImage(aDescriptor, bytes, 0, key)); |
261 | 0 | return true; |
262 | 0 | } |
263 | | |
264 | | bool |
265 | | IpcResourceUpdateQueue::AddBlobImage(ImageKey key, const ImageDescriptor& aDescriptor, |
266 | | Range<uint8_t> aBytes) |
267 | 0 | { |
268 | 0 | MOZ_RELEASE_ASSERT(aDescriptor.width > 0 && aDescriptor.height > 0); |
269 | 0 | auto bytes = mWriter.Write(aBytes); |
270 | 0 | if (!bytes.length()) { |
271 | 0 | return false; |
272 | 0 | } |
273 | 0 | mUpdates.AppendElement(layers::OpAddBlobImage(aDescriptor, bytes, 0, key)); |
274 | 0 | return true; |
275 | 0 | } |
276 | | |
277 | | void |
278 | | IpcResourceUpdateQueue::AddExternalImage(wr::ExternalImageId aExtId, wr::ImageKey aKey) |
279 | 0 | { |
280 | 0 | mUpdates.AppendElement(layers::OpAddExternalImage(aExtId, aKey)); |
281 | 0 | } |
282 | | |
283 | | void |
284 | | IpcResourceUpdateQueue::PushExternalImageForTexture(wr::ExternalImageId aExtId, |
285 | | wr::ImageKey aKey, |
286 | | layers::TextureClient* aTexture, |
287 | | bool aIsUpdate) |
288 | 0 | { |
289 | 0 | MOZ_ASSERT(aTexture); |
290 | 0 | MOZ_ASSERT(aTexture->GetIPDLActor()); |
291 | 0 | MOZ_RELEASE_ASSERT(aTexture->GetIPDLActor()->GetIPCChannel() == mWriter.WrBridge()->GetIPCChannel()); |
292 | 0 | mUpdates.AppendElement(layers::OpPushExternalImageForTexture(aExtId, aKey, nullptr, aTexture->GetIPDLActor(), aIsUpdate)); |
293 | 0 | } |
294 | | |
295 | | bool |
296 | | IpcResourceUpdateQueue::UpdateImageBuffer(ImageKey aKey, |
297 | | const ImageDescriptor& aDescriptor, |
298 | | Range<uint8_t> aBytes) |
299 | 0 | { |
300 | 0 | auto bytes = mWriter.Write(aBytes); |
301 | 0 | if (!bytes.length()) { |
302 | 0 | return false; |
303 | 0 | } |
304 | 0 | mUpdates.AppendElement(layers::OpUpdateImage(aDescriptor, bytes, aKey)); |
305 | 0 | return true; |
306 | 0 | } |
307 | | |
308 | | bool |
309 | | IpcResourceUpdateQueue::UpdateBlobImage(ImageKey aKey, |
310 | | const ImageDescriptor& aDescriptor, |
311 | | Range<uint8_t> aBytes, |
312 | | ImageIntRect aDirtyRect) |
313 | 0 | { |
314 | 0 | auto bytes = mWriter.Write(aBytes); |
315 | 0 | if (!bytes.length()) { |
316 | 0 | return false; |
317 | 0 | } |
318 | 0 | mUpdates.AppendElement(layers::OpUpdateBlobImage(aDescriptor, bytes, aKey, aDirtyRect)); |
319 | 0 | return true; |
320 | 0 | } |
321 | | |
322 | | void |
323 | | IpcResourceUpdateQueue::UpdateExternalImage(wr::ExternalImageId aExtId, |
324 | | wr::ImageKey aKey, |
325 | | ImageIntRect aDirtyRect) |
326 | 0 | { |
327 | 0 | mUpdates.AppendElement(layers::OpUpdateExternalImage(aExtId, aKey, aDirtyRect)); |
328 | 0 | } |
329 | | |
330 | | void |
331 | | IpcResourceUpdateQueue::SetImageVisibleArea(ImageKey aKey, const gfx::Rect& aArea) |
332 | 0 | { |
333 | 0 | mUpdates.AppendElement(layers::OpSetImageVisibleArea(aArea, aKey)); |
334 | 0 | } |
335 | | |
336 | | void |
337 | | IpcResourceUpdateQueue::DeleteImage(ImageKey aKey) |
338 | 0 | { |
339 | 0 | mUpdates.AppendElement(layers::OpDeleteImage(aKey)); |
340 | 0 | } |
341 | | |
342 | | bool |
343 | | IpcResourceUpdateQueue::AddRawFont(wr::FontKey aKey, Range<uint8_t> aBytes, uint32_t aIndex) |
344 | 0 | { |
345 | 0 | auto bytes = mWriter.Write(aBytes); |
346 | 0 | if (!bytes.length()) { |
347 | 0 | return false; |
348 | 0 | } |
349 | 0 | mUpdates.AppendElement(layers::OpAddRawFont(bytes, aIndex, aKey)); |
350 | 0 | return true; |
351 | 0 | } |
352 | | |
353 | | bool |
354 | | IpcResourceUpdateQueue::AddFontDescriptor(wr::FontKey aKey, Range<uint8_t> aBytes, uint32_t aIndex) |
355 | 0 | { |
356 | 0 | auto bytes = mWriter.Write(aBytes); |
357 | 0 | if (!bytes.length()) { |
358 | 0 | return false; |
359 | 0 | } |
360 | 0 | mUpdates.AppendElement(layers::OpAddFontDescriptor(bytes, aIndex, aKey)); |
361 | 0 | return true; |
362 | 0 | } |
363 | | |
364 | | void |
365 | | IpcResourceUpdateQueue::DeleteFont(wr::FontKey aKey) |
366 | 0 | { |
367 | 0 | mUpdates.AppendElement(layers::OpDeleteFont(aKey)); |
368 | 0 | } |
369 | | |
370 | | void |
371 | | IpcResourceUpdateQueue::AddFontInstance(wr::FontInstanceKey aKey, |
372 | | wr::FontKey aFontKey, |
373 | | float aGlyphSize, |
374 | | const wr::FontInstanceOptions* aOptions, |
375 | | const wr::FontInstancePlatformOptions* aPlatformOptions, |
376 | | Range<const gfx::FontVariation> aVariations) |
377 | 0 | { |
378 | 0 | auto bytes = mWriter.WriteAsBytes(aVariations); |
379 | 0 | mUpdates.AppendElement(layers::OpAddFontInstance( |
380 | 0 | aOptions ? Some(*aOptions) : Nothing(), |
381 | 0 | aPlatformOptions ? Some(*aPlatformOptions) : Nothing(), |
382 | 0 | bytes, |
383 | 0 | aKey, aFontKey, |
384 | 0 | aGlyphSize |
385 | 0 | )); |
386 | 0 | } |
387 | | |
388 | | void |
389 | | IpcResourceUpdateQueue::DeleteFontInstance(wr::FontInstanceKey aKey) |
390 | 0 | { |
391 | 0 | mUpdates.AppendElement(layers::OpDeleteFontInstance(aKey)); |
392 | 0 | } |
393 | | |
394 | | void |
395 | | IpcResourceUpdateQueue::Flush(nsTArray<layers::OpUpdateResource>& aUpdates, |
396 | | nsTArray<layers::RefCountedShmem>& aSmallAllocs, |
397 | | nsTArray<ipc::Shmem>& aLargeAllocs) |
398 | 0 | { |
399 | 0 | aUpdates.Clear(); |
400 | 0 | mUpdates.SwapElements(aUpdates); |
401 | 0 | mWriter.Flush(aSmallAllocs, aLargeAllocs); |
402 | 0 | } |
403 | | |
404 | | bool |
405 | | IpcResourceUpdateQueue::IsEmpty() const |
406 | 0 | { |
407 | 0 | if (mUpdates.Length() == 0) { |
408 | 0 | MOZ_ASSERT(mWriter.IsEmpty()); |
409 | 0 | return true; |
410 | 0 | } |
411 | 0 | return false; |
412 | 0 | } |
413 | | |
414 | | void |
415 | | IpcResourceUpdateQueue::Clear() |
416 | 0 | { |
417 | 0 | mWriter.Clear(); |
418 | 0 | mUpdates.Clear(); |
419 | 0 | } |
420 | | |
421 | | //static |
422 | | void |
423 | | IpcResourceUpdateQueue::ReleaseShmems(ipc::IProtocol* aShmAllocator, nsTArray<layers::RefCountedShmem>& aShms) |
424 | 0 | { |
425 | 0 | for (auto& shm : aShms) { |
426 | 0 | if (RefCountedShm::IsValid(shm) && RefCountedShm::Release(shm) == 0) { |
427 | 0 | RefCountedShm::Dealloc(aShmAllocator, shm); |
428 | 0 | } |
429 | 0 | } |
430 | 0 | aShms.Clear(); |
431 | 0 | } |
432 | | |
433 | | //static |
434 | | void |
435 | | IpcResourceUpdateQueue::ReleaseShmems(ipc::IProtocol* aShmAllocator, nsTArray<ipc::Shmem>& aShms) |
436 | 0 | { |
437 | 0 | for (auto& shm : aShms) { |
438 | 0 | aShmAllocator->DeallocShmem(shm); |
439 | 0 | } |
440 | 0 | aShms.Clear(); |
441 | 0 | } |
442 | | |
443 | | } // namespace |
444 | | } // namespace |