/src/mozilla-central/image/imgFrame.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "imgFrame.h" |
8 | | #include "ImageRegion.h" |
9 | | #include "ShutdownTracker.h" |
10 | | #include "SurfaceCache.h" |
11 | | |
12 | | #include "prenv.h" |
13 | | |
14 | | #include "gfx2DGlue.h" |
15 | | #include "gfxPlatform.h" |
16 | | #include "gfxPrefs.h" |
17 | | #include "gfxUtils.h" |
18 | | |
19 | | #include "GeckoProfiler.h" |
20 | | #include "MainThreadUtils.h" |
21 | | #include "mozilla/CheckedInt.h" |
22 | | #include "mozilla/gfx/gfxVars.h" |
23 | | #include "mozilla/gfx/Tools.h" |
24 | | #include "mozilla/gfx/SourceSurfaceRawData.h" |
25 | | #include "mozilla/layers/SourceSurfaceSharedData.h" |
26 | | #include "mozilla/layers/SourceSurfaceVolatileData.h" |
27 | | #include "mozilla/Likely.h" |
28 | | #include "mozilla/MemoryReporting.h" |
29 | | #include "nsMargin.h" |
30 | | #include "nsThreadUtils.h" |
31 | | |
32 | | namespace mozilla { |
33 | | |
34 | | using namespace gfx; |
35 | | |
36 | | namespace image { |
37 | | |
38 | | static void |
39 | | ScopedMapRelease(void* aMap) |
40 | 0 | { |
41 | 0 | delete static_cast<DataSourceSurface::ScopedMap*>(aMap); |
42 | 0 | } |
43 | | |
44 | | static int32_t |
45 | | VolatileSurfaceStride(const IntSize& size, SurfaceFormat format) |
46 | 0 | { |
47 | 0 | // Stride must be a multiple of four or cairo will complain. |
48 | 0 | return (size.width * BytesPerPixel(format) + 0x3) & ~0x3; |
49 | 0 | } |
50 | | |
51 | | static already_AddRefed<DataSourceSurface> |
52 | | CreateLockedSurface(DataSourceSurface *aSurface, |
53 | | const IntSize& size, |
54 | | SurfaceFormat format) |
55 | 0 | { |
56 | 0 | // Shared memory is never released until the surface itself is released |
57 | 0 | if (aSurface->GetType() == SurfaceType::DATA_SHARED) { |
58 | 0 | RefPtr<DataSourceSurface> surf(aSurface); |
59 | 0 | return surf.forget(); |
60 | 0 | } |
61 | 0 | |
62 | 0 | DataSourceSurface::ScopedMap* smap = |
63 | 0 | new DataSourceSurface::ScopedMap(aSurface, DataSourceSurface::READ_WRITE); |
64 | 0 | if (smap->IsMapped()) { |
65 | 0 | // The ScopedMap is held by this DataSourceSurface. |
66 | 0 | RefPtr<DataSourceSurface> surf = |
67 | 0 | Factory::CreateWrappingDataSourceSurface(smap->GetData(), |
68 | 0 | aSurface->Stride(), |
69 | 0 | size, |
70 | 0 | format, |
71 | 0 | &ScopedMapRelease, |
72 | 0 | static_cast<void*>(smap)); |
73 | 0 | if (surf) { |
74 | 0 | return surf.forget(); |
75 | 0 | } |
76 | 0 | } |
77 | 0 | |
78 | 0 | delete smap; |
79 | 0 | return nullptr; |
80 | 0 | } |
81 | | |
82 | | static bool |
83 | | ShouldUseHeap(const IntSize& aSize, |
84 | | int32_t aStride, |
85 | | bool aIsAnimated) |
86 | 0 | { |
87 | 0 | // On some platforms (i.e. Android), a volatile buffer actually keeps a file |
88 | 0 | // handle active. We would like to avoid too many since we could easily |
89 | 0 | // exhaust the pool. However, other platforms we do not have the file handle |
90 | 0 | // problem, and additionally we may avoid a superfluous memset since the |
91 | 0 | // volatile memory starts out as zero-filled. Hence the knobs below. |
92 | 0 |
|
93 | 0 | // For as long as an animated image is retained, its frames will never be |
94 | 0 | // released to let the OS purge volatile buffers. |
95 | 0 | if (aIsAnimated && gfxPrefs::ImageMemAnimatedUseHeap()) { |
96 | 0 | return true; |
97 | 0 | } |
98 | 0 | |
99 | 0 | // Lets us avoid too many small images consuming all of the handles. The |
100 | 0 | // actual allocation checks for overflow. |
101 | 0 | int32_t bufferSize = (aStride * aSize.width) / 1024; |
102 | 0 | if (bufferSize < gfxPrefs::ImageMemVolatileMinThresholdKB()) { |
103 | 0 | return true; |
104 | 0 | } |
105 | 0 | |
106 | 0 | return false; |
107 | 0 | } |
108 | | |
109 | | static already_AddRefed<DataSourceSurface> |
110 | | AllocateBufferForImage(const IntSize& size, |
111 | | SurfaceFormat format, |
112 | | bool aIsAnimated = false, |
113 | | bool aIsFullFrame = true) |
114 | 0 | { |
115 | 0 | int32_t stride = VolatileSurfaceStride(size, format); |
116 | 0 |
|
117 | 0 | if (gfxVars::GetUseWebRenderOrDefault() && |
118 | 0 | gfxPrefs::ImageMemShared() && aIsFullFrame) { |
119 | 0 | RefPtr<SourceSurfaceSharedData> newSurf = new SourceSurfaceSharedData(); |
120 | 0 | if (newSurf->Init(size, stride, format)) { |
121 | 0 | return newSurf.forget(); |
122 | 0 | } |
123 | 0 | } else if (ShouldUseHeap(size, stride, aIsAnimated)) { |
124 | 0 | RefPtr<SourceSurfaceAlignedRawData> newSurf = |
125 | 0 | new SourceSurfaceAlignedRawData(); |
126 | 0 | if (newSurf->Init(size, format, false, 0, stride)) { |
127 | 0 | return newSurf.forget(); |
128 | 0 | } |
129 | 0 | } else { |
130 | 0 | RefPtr<SourceSurfaceVolatileData> newSurf= new SourceSurfaceVolatileData(); |
131 | 0 | if (newSurf->Init(size, stride, format)) { |
132 | 0 | return newSurf.forget(); |
133 | 0 | } |
134 | 0 | } |
135 | 0 | return nullptr; |
136 | 0 | } |
137 | | |
138 | | static bool |
139 | | ClearSurface(DataSourceSurface* aSurface, const IntSize& aSize, SurfaceFormat aFormat) |
140 | 0 | { |
141 | 0 | int32_t stride = aSurface->Stride(); |
142 | 0 | uint8_t* data = aSurface->GetData(); |
143 | 0 | MOZ_ASSERT(data); |
144 | 0 |
|
145 | 0 | if (aFormat == SurfaceFormat::B8G8R8X8) { |
146 | 0 | // Skia doesn't support RGBX surfaces, so ensure the alpha value is set |
147 | 0 | // to opaque white. While it would be nice to only do this for Skia, |
148 | 0 | // imgFrame can run off main thread and past shutdown where |
149 | 0 | // we might not have gfxPlatform, so just memset everytime instead. |
150 | 0 | memset(data, 0xFF, stride * aSize.height); |
151 | 0 | } else if (aSurface->OnHeap()) { |
152 | 0 | // We only need to memset it if the buffer was allocated on the heap. |
153 | 0 | // Otherwise, it's allocated via mmap and refers to a zeroed page and will |
154 | 0 | // be COW once it's written to. |
155 | 0 | memset(data, 0, stride * aSize.height); |
156 | 0 | } |
157 | 0 |
|
158 | 0 | return true; |
159 | 0 | } |
160 | | |
161 | | static bool AllowedImageAndFrameDimensions(const nsIntSize& aImageSize, |
162 | | const nsIntRect& aFrameRect) |
163 | 0 | { |
164 | 0 | if (!SurfaceCache::IsLegalSize(aImageSize)) { |
165 | 0 | return false; |
166 | 0 | } |
167 | 0 | if (!SurfaceCache::IsLegalSize(aFrameRect.Size())) { |
168 | 0 | return false; |
169 | 0 | } |
170 | 0 | nsIntRect imageRect(0, 0, aImageSize.width, aImageSize.height); |
171 | 0 | if (!imageRect.Contains(aFrameRect)) { |
172 | 0 | NS_WARNING("Animated image frame does not fit inside bounds of image"); |
173 | 0 | } |
174 | 0 | return true; |
175 | 0 | } |
176 | | |
177 | | imgFrame::imgFrame() |
178 | | : mMonitor("imgFrame") |
179 | | , mDecoded(0, 0, 0, 0) |
180 | | , mLockCount(0) |
181 | | , mAborted(false) |
182 | | , mFinished(false) |
183 | | , mOptimizable(false) |
184 | | , mTimeout(FrameTimeout::FromRawMilliseconds(100)) |
185 | | , mDisposalMethod(DisposalMethod::NOT_SPECIFIED) |
186 | | , mBlendMethod(BlendMethod::OVER) |
187 | | , mFormat(SurfaceFormat::UNKNOWN) |
188 | | , mPalettedImageData(nullptr) |
189 | | , mPaletteDepth(0) |
190 | | , mNonPremult(false) |
191 | | , mIsFullFrame(false) |
192 | | , mCompositingFailed(false) |
193 | 0 | { |
194 | 0 | } |
195 | | |
196 | | imgFrame::~imgFrame() |
197 | 0 | { |
198 | | #ifdef DEBUG |
199 | | MonitorAutoLock lock(mMonitor); |
200 | | MOZ_ASSERT(mAborted || AreAllPixelsWritten()); |
201 | | MOZ_ASSERT(mAborted || mFinished); |
202 | | #endif |
203 | |
|
204 | 0 | free(mPalettedImageData); |
205 | 0 | mPalettedImageData = nullptr; |
206 | 0 | } |
207 | | |
208 | | nsresult |
209 | | imgFrame::InitForDecoder(const nsIntSize& aImageSize, |
210 | | const nsIntRect& aRect, |
211 | | SurfaceFormat aFormat, |
212 | | uint8_t aPaletteDepth /* = 0 */, |
213 | | bool aNonPremult /* = false */, |
214 | | const Maybe<AnimationParams>& aAnimParams /* = Nothing() */, |
215 | | bool aIsFullFrame /* = false */) |
216 | 0 | { |
217 | 0 | // Assert for properties that should be verified by decoders, |
218 | 0 | // warn for properties related to bad content. |
219 | 0 | if (!AllowedImageAndFrameDimensions(aImageSize, aRect)) { |
220 | 0 | NS_WARNING("Should have legal image size"); |
221 | 0 | mAborted = true; |
222 | 0 | return NS_ERROR_FAILURE; |
223 | 0 | } |
224 | 0 |
|
225 | 0 | mImageSize = aImageSize; |
226 | 0 | mFrameRect = aRect; |
227 | 0 |
|
228 | 0 | // May be updated shortly after InitForDecoder by BlendAnimationFilter |
229 | 0 | // because it needs to take into consideration the previous frames to |
230 | 0 | // properly calculate. We start with the whole frame as dirty. |
231 | 0 | mDirtyRect = aRect; |
232 | 0 |
|
233 | 0 | if (aAnimParams) { |
234 | 0 | mBlendRect = aAnimParams->mBlendRect; |
235 | 0 | mTimeout = aAnimParams->mTimeout; |
236 | 0 | mBlendMethod = aAnimParams->mBlendMethod; |
237 | 0 | mDisposalMethod = aAnimParams->mDisposalMethod; |
238 | 0 | mIsFullFrame = aAnimParams->mFrameNum == 0 || aIsFullFrame; |
239 | 0 | } else { |
240 | 0 | mBlendRect = aRect; |
241 | 0 | mIsFullFrame = true; |
242 | 0 | } |
243 | 0 |
|
244 | 0 | // We only allow a non-trivial frame rect (i.e., a frame rect that doesn't |
245 | 0 | // cover the entire image) for paletted animation frames. We never draw those |
246 | 0 | // frames directly; we just use FrameAnimator to composite them and produce a |
247 | 0 | // BGRA surface that we actually draw. We enforce this here to make sure that |
248 | 0 | // imgFrame::Draw(), which is responsible for drawing all other kinds of |
249 | 0 | // frames, never has to deal with a non-trivial frame rect. |
250 | 0 | if (aPaletteDepth == 0 && |
251 | 0 | !mFrameRect.IsEqualEdges(IntRect(IntPoint(), mImageSize))) { |
252 | 0 | MOZ_ASSERT_UNREACHABLE("Creating a non-paletted imgFrame with a " |
253 | 0 | "non-trivial frame rect"); |
254 | 0 | return NS_ERROR_FAILURE; |
255 | 0 | } |
256 | 0 |
|
257 | 0 | mFormat = aFormat; |
258 | 0 | mPaletteDepth = aPaletteDepth; |
259 | 0 | mNonPremult = aNonPremult; |
260 | 0 |
|
261 | 0 | if (aPaletteDepth != 0) { |
262 | 0 | // We're creating for a paletted image. |
263 | 0 | if (aPaletteDepth > 8) { |
264 | 0 | NS_WARNING("Should have legal palette depth"); |
265 | 0 | NS_ERROR("This Depth is not supported"); |
266 | 0 | mAborted = true; |
267 | 0 | return NS_ERROR_FAILURE; |
268 | 0 | } |
269 | 0 |
|
270 | 0 | // Use the fallible allocator here. Paletted images always use 1 byte per |
271 | 0 | // pixel, so calculating the amount of memory we need is straightforward. |
272 | 0 | size_t dataSize = PaletteDataLength() + mFrameRect.Area(); |
273 | 0 | mPalettedImageData = static_cast<uint8_t*>(calloc(dataSize, sizeof(uint8_t))); |
274 | 0 | if (!mPalettedImageData) { |
275 | 0 | NS_WARNING("Call to calloc for paletted image data should succeed"); |
276 | 0 | } |
277 | 0 | NS_ENSURE_TRUE(mPalettedImageData, NS_ERROR_OUT_OF_MEMORY); |
278 | 0 | } else { |
279 | 0 | MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitForDecoder() twice?"); |
280 | 0 |
|
281 | 0 | bool postFirstFrame = aAnimParams && aAnimParams->mFrameNum > 0; |
282 | 0 | mRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat, |
283 | 0 | postFirstFrame, mIsFullFrame); |
284 | 0 | if (!mRawSurface) { |
285 | 0 | mAborted = true; |
286 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
287 | 0 | } |
288 | 0 | |
289 | 0 | mLockedSurface = CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat); |
290 | 0 | if (!mLockedSurface) { |
291 | 0 | NS_WARNING("Failed to create LockedSurface"); |
292 | 0 | mAborted = true; |
293 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
294 | 0 | } |
295 | 0 |
|
296 | 0 | if (!ClearSurface(mRawSurface, mFrameRect.Size(), mFormat)) { |
297 | 0 | NS_WARNING("Could not clear allocated buffer"); |
298 | 0 | mAborted = true; |
299 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
300 | 0 | } |
301 | 0 | } |
302 | 0 |
|
303 | 0 | return NS_OK; |
304 | 0 | } |
305 | | |
306 | | nsresult |
307 | | imgFrame::InitWithDrawable(gfxDrawable* aDrawable, |
308 | | const nsIntSize& aSize, |
309 | | const SurfaceFormat aFormat, |
310 | | SamplingFilter aSamplingFilter, |
311 | | uint32_t aImageFlags, |
312 | | gfx::BackendType aBackend) |
313 | 0 | { |
314 | 0 | // Assert for properties that should be verified by decoders, |
315 | 0 | // warn for properties related to bad content. |
316 | 0 | if (!SurfaceCache::IsLegalSize(aSize)) { |
317 | 0 | NS_WARNING("Should have legal image size"); |
318 | 0 | mAborted = true; |
319 | 0 | return NS_ERROR_FAILURE; |
320 | 0 | } |
321 | 0 |
|
322 | 0 | mImageSize = aSize; |
323 | 0 | mFrameRect = IntRect(IntPoint(0, 0), aSize); |
324 | 0 |
|
325 | 0 | mFormat = aFormat; |
326 | 0 | mPaletteDepth = 0; |
327 | 0 |
|
328 | 0 | RefPtr<DrawTarget> target; |
329 | 0 |
|
330 | 0 | bool canUseDataSurface = Factory::DoesBackendSupportDataDrawtarget(aBackend); |
331 | 0 | if (canUseDataSurface) { |
332 | 0 | // It's safe to use data surfaces for content on this platform, so we can |
333 | 0 | // get away with using volatile buffers. |
334 | 0 | MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitWithDrawable() twice?"); |
335 | 0 |
|
336 | 0 | mRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat); |
337 | 0 | if (!mRawSurface) { |
338 | 0 | mAborted = true; |
339 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
340 | 0 | } |
341 | 0 | |
342 | 0 | mLockedSurface = CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat); |
343 | 0 | if (!mLockedSurface) { |
344 | 0 | NS_WARNING("Failed to create LockedSurface"); |
345 | 0 | mAborted = true; |
346 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
347 | 0 | } |
348 | 0 |
|
349 | 0 | if (!ClearSurface(mRawSurface, mFrameRect.Size(), mFormat)) { |
350 | 0 | NS_WARNING("Could not clear allocated buffer"); |
351 | 0 | mAborted = true; |
352 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
353 | 0 | } |
354 | 0 |
|
355 | 0 | target = gfxPlatform::CreateDrawTargetForData( |
356 | 0 | mLockedSurface->GetData(), |
357 | 0 | mFrameRect.Size(), |
358 | 0 | mLockedSurface->Stride(), |
359 | 0 | mFormat); |
360 | 0 | } else { |
361 | 0 | // We can't use data surfaces for content, so we'll create an offscreen |
362 | 0 | // surface instead. This means if someone later calls RawAccessRef(), we |
363 | 0 | // may have to do an expensive readback, but we warned callers about that in |
364 | 0 | // the documentation for this method. |
365 | 0 | MOZ_ASSERT(!mOptSurface, "Called imgFrame::InitWithDrawable() twice?"); |
366 | 0 |
|
367 | 0 | if (gfxPlatform::GetPlatform()->SupportsAzureContentForType(aBackend)) { |
368 | 0 | target = gfxPlatform::GetPlatform()-> |
369 | 0 | CreateDrawTargetForBackend(aBackend, mFrameRect.Size(), mFormat); |
370 | 0 | } else { |
371 | 0 | target = gfxPlatform::GetPlatform()-> |
372 | 0 | CreateOffscreenContentDrawTarget(mFrameRect.Size(), mFormat); |
373 | 0 | } |
374 | 0 | } |
375 | 0 |
|
376 | 0 | if (!target || !target->IsValid()) { |
377 | 0 | mAborted = true; |
378 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
379 | 0 | } |
380 | 0 | |
381 | 0 | // Draw using the drawable the caller provided. |
382 | 0 | RefPtr<gfxContext> ctx = gfxContext::CreateOrNull(target); |
383 | 0 | MOZ_ASSERT(ctx); // Already checked the draw target above. |
384 | 0 | gfxUtils::DrawPixelSnapped(ctx, aDrawable, SizeDouble(mFrameRect.Size()), |
385 | 0 | ImageRegion::Create(ThebesRect(mFrameRect)), |
386 | 0 | mFormat, aSamplingFilter, aImageFlags); |
387 | 0 |
|
388 | 0 | if (canUseDataSurface && !mLockedSurface) { |
389 | 0 | NS_WARNING("Failed to create VolatileDataSourceSurface"); |
390 | 0 | mAborted = true; |
391 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
392 | 0 | } |
393 | 0 |
|
394 | 0 | if (!canUseDataSurface) { |
395 | 0 | // We used an offscreen surface, which is an "optimized" surface from |
396 | 0 | // imgFrame's perspective. |
397 | 0 | mOptSurface = target->Snapshot(); |
398 | 0 | } else { |
399 | 0 | FinalizeSurface(); |
400 | 0 | } |
401 | 0 |
|
402 | 0 | // If we reach this point, we should regard ourselves as complete. |
403 | 0 | mDecoded = GetRect(); |
404 | 0 | mFinished = true; |
405 | 0 |
|
406 | | #ifdef DEBUG |
407 | | MonitorAutoLock lock(mMonitor); |
408 | | MOZ_ASSERT(AreAllPixelsWritten()); |
409 | | #endif |
410 | |
|
411 | 0 | return NS_OK; |
412 | 0 | } |
413 | | |
414 | | nsresult |
415 | | imgFrame::Optimize(DrawTarget* aTarget) |
416 | 0 | { |
417 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
418 | 0 | mMonitor.AssertCurrentThreadOwns(); |
419 | 0 |
|
420 | 0 | if (mLockCount > 0 || !mOptimizable) { |
421 | 0 | // Don't optimize right now. |
422 | 0 | return NS_OK; |
423 | 0 | } |
424 | 0 | |
425 | 0 | // Check whether image optimization is disabled -- not thread safe! |
426 | 0 | static bool gDisableOptimize = false; |
427 | 0 | static bool hasCheckedOptimize = false; |
428 | 0 | if (!hasCheckedOptimize) { |
429 | 0 | if (PR_GetEnv("MOZ_DISABLE_IMAGE_OPTIMIZE")) { |
430 | 0 | gDisableOptimize = true; |
431 | 0 | } |
432 | 0 | hasCheckedOptimize = true; |
433 | 0 | } |
434 | 0 |
|
435 | 0 | // Don't optimize during shutdown because gfxPlatform may not be available. |
436 | 0 | if (ShutdownTracker::ShutdownHasStarted()) { |
437 | 0 | return NS_OK; |
438 | 0 | } |
439 | 0 | |
440 | 0 | if (gDisableOptimize) { |
441 | 0 | return NS_OK; |
442 | 0 | } |
443 | 0 | |
444 | 0 | if (mPalettedImageData || mOptSurface) { |
445 | 0 | return NS_OK; |
446 | 0 | } |
447 | 0 | |
448 | 0 | // XXX(seth): It's currently unclear if there's any reason why we can't |
449 | 0 | // optimize non-premult surfaces. We should look into removing this. |
450 | 0 | if (mNonPremult) { |
451 | 0 | return NS_OK; |
452 | 0 | } |
453 | 0 | |
454 | 0 | mOptSurface = gfxPlatform::GetPlatform() |
455 | 0 | ->ScreenReferenceDrawTarget()->OptimizeSourceSurface(mLockedSurface); |
456 | 0 | if (mOptSurface == mLockedSurface) { |
457 | 0 | mOptSurface = nullptr; |
458 | 0 | } |
459 | 0 |
|
460 | 0 | if (mOptSurface) { |
461 | 0 | // There's no reason to keep our original surface around if we have an |
462 | 0 | // optimized surface. Release our reference to it. This will leave |
463 | 0 | // |mLockedSurface| as the only thing keeping it alive, so it'll get freed |
464 | 0 | // below. |
465 | 0 | mRawSurface = nullptr; |
466 | 0 | } |
467 | 0 |
|
468 | 0 | // Release all strong references to the surface's memory. If the underlying |
469 | 0 | // surface is volatile, this will allow the operating system to free the |
470 | 0 | // memory if it needs to. |
471 | 0 | mLockedSurface = nullptr; |
472 | 0 | mOptimizable = false; |
473 | 0 |
|
474 | 0 | return NS_OK; |
475 | 0 | } |
476 | | |
477 | | DrawableFrameRef |
478 | | imgFrame::DrawableRef() |
479 | 0 | { |
480 | 0 | return DrawableFrameRef(this); |
481 | 0 | } |
482 | | |
483 | | RawAccessFrameRef |
484 | | imgFrame::RawAccessRef(bool aOnlyFinished /*= false*/) |
485 | 0 | { |
486 | 0 | return RawAccessFrameRef(this, aOnlyFinished); |
487 | 0 | } |
488 | | |
489 | | void |
490 | | imgFrame::SetRawAccessOnly() |
491 | 0 | { |
492 | 0 | AssertImageDataLocked(); |
493 | 0 |
|
494 | 0 | // Lock our data and throw away the key. |
495 | 0 | LockImageData(false); |
496 | 0 | } |
497 | | |
498 | | |
499 | | imgFrame::SurfaceWithFormat |
500 | | imgFrame::SurfaceForDrawing(bool aDoPartialDecode, |
501 | | bool aDoTile, |
502 | | ImageRegion& aRegion, |
503 | | SourceSurface* aSurface) |
504 | 0 | { |
505 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
506 | 0 | mMonitor.AssertCurrentThreadOwns(); |
507 | 0 |
|
508 | 0 | if (!aDoPartialDecode) { |
509 | 0 | return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface, mImageSize), |
510 | 0 | mFormat); |
511 | 0 | } |
512 | 0 | |
513 | 0 | gfxRect available = gfxRect(mDecoded.X(), mDecoded.Y(), mDecoded.Width(), |
514 | 0 | mDecoded.Height()); |
515 | 0 |
|
516 | 0 | if (aDoTile) { |
517 | 0 | // Create a temporary surface. |
518 | 0 | // Give this surface an alpha channel because there are |
519 | 0 | // transparent pixels in the padding or undecoded area |
520 | 0 | RefPtr<DrawTarget> target = |
521 | 0 | gfxPlatform::GetPlatform()-> |
522 | 0 | CreateOffscreenContentDrawTarget(mImageSize, SurfaceFormat::B8G8R8A8); |
523 | 0 | if (!target) { |
524 | 0 | return SurfaceWithFormat(); |
525 | 0 | } |
526 | 0 | |
527 | 0 | SurfacePattern pattern(aSurface, |
528 | 0 | aRegion.GetExtendMode(), |
529 | 0 | Matrix::Translation(mDecoded.X(), mDecoded.Y())); |
530 | 0 | target->FillRect(ToRect(aRegion.Intersect(available).Rect()), pattern); |
531 | 0 |
|
532 | 0 | RefPtr<SourceSurface> newsurf = target->Snapshot(); |
533 | 0 | return SurfaceWithFormat(new gfxSurfaceDrawable(newsurf, mImageSize), |
534 | 0 | target->GetFormat()); |
535 | 0 | } |
536 | 0 | |
537 | 0 | // Not tiling, and we have a surface, so we can account for |
538 | 0 | // a partial decode just by twiddling parameters. |
539 | 0 | aRegion = aRegion.Intersect(available); |
540 | 0 | IntSize availableSize(mDecoded.Width(), mDecoded.Height()); |
541 | 0 |
|
542 | 0 | return SurfaceWithFormat(new gfxSurfaceDrawable(aSurface, availableSize), |
543 | 0 | mFormat); |
544 | 0 | } |
545 | | |
546 | | bool imgFrame::Draw(gfxContext* aContext, const ImageRegion& aRegion, |
547 | | SamplingFilter aSamplingFilter, uint32_t aImageFlags, |
548 | | float aOpacity) |
549 | 0 | { |
550 | 0 | AUTO_PROFILER_LABEL("imgFrame::Draw", GRAPHICS); |
551 | 0 |
|
552 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
553 | 0 | NS_ASSERTION(!aRegion.Rect().IsEmpty(), "Drawing empty region!"); |
554 | 0 | NS_ASSERTION(!aRegion.IsRestricted() || |
555 | 0 | !aRegion.Rect().Intersect(aRegion.Restriction()).IsEmpty(), |
556 | 0 | "We must be allowed to sample *some* source pixels!"); |
557 | 0 | MOZ_ASSERT(mFrameRect.IsEqualEdges(IntRect(IntPoint(), mImageSize)), |
558 | 0 | "Directly drawing an image with a non-trivial frame rect!"); |
559 | 0 |
|
560 | 0 | if (mPalettedImageData) { |
561 | 0 | MOZ_ASSERT_UNREACHABLE("Directly drawing a paletted image!"); |
562 | 0 | return false; |
563 | 0 | } |
564 | 0 |
|
565 | 0 | MonitorAutoLock lock(mMonitor); |
566 | 0 |
|
567 | 0 | // Possibly convert this image into a GPU texture, this may also cause our |
568 | 0 | // mLockedSurface to be released and the OS to release the underlying memory. |
569 | 0 | Optimize(aContext->GetDrawTarget()); |
570 | 0 |
|
571 | 0 | bool doPartialDecode = !AreAllPixelsWritten(); |
572 | 0 |
|
573 | 0 | RefPtr<SourceSurface> surf = GetSourceSurfaceInternal(); |
574 | 0 | if (!surf) { |
575 | 0 | return false; |
576 | 0 | } |
577 | 0 | |
578 | 0 | gfxRect imageRect(0, 0, mImageSize.width, mImageSize.height); |
579 | 0 | bool doTile = !imageRect.Contains(aRegion.Rect()) && |
580 | 0 | !(aImageFlags & imgIContainer::FLAG_CLAMP); |
581 | 0 |
|
582 | 0 | ImageRegion region(aRegion); |
583 | 0 | SurfaceWithFormat surfaceResult = |
584 | 0 | SurfaceForDrawing(doPartialDecode, doTile, region, surf); |
585 | 0 |
|
586 | 0 | if (surfaceResult.IsValid()) { |
587 | 0 | gfxUtils::DrawPixelSnapped(aContext, surfaceResult.mDrawable, |
588 | 0 | imageRect.Size(), region, surfaceResult.mFormat, |
589 | 0 | aSamplingFilter, aImageFlags, aOpacity); |
590 | 0 | } |
591 | 0 |
|
592 | 0 | return true; |
593 | 0 | } |
594 | | |
595 | | nsresult |
596 | | imgFrame::ImageUpdated(const nsIntRect& aUpdateRect) |
597 | 0 | { |
598 | 0 | MonitorAutoLock lock(mMonitor); |
599 | 0 | return ImageUpdatedInternal(aUpdateRect); |
600 | 0 | } |
601 | | |
602 | | nsresult |
603 | | imgFrame::ImageUpdatedInternal(const nsIntRect& aUpdateRect) |
604 | 0 | { |
605 | 0 | mMonitor.AssertCurrentThreadOwns(); |
606 | 0 |
|
607 | 0 | // Clamp to the frame rect to ensure that decoder bugs don't result in a |
608 | 0 | // decoded rect that extends outside the bounds of the frame rect. |
609 | 0 | IntRect updateRect = mFrameRect.Intersect(aUpdateRect); |
610 | 0 | if (updateRect.IsEmpty()) { |
611 | 0 | return NS_OK; |
612 | 0 | } |
613 | 0 | |
614 | 0 | mDecoded.UnionRect(mDecoded, updateRect); |
615 | 0 |
|
616 | 0 | // Paletted images cannot invalidate. |
617 | 0 | if (mPalettedImageData) { |
618 | 0 | return NS_OK; |
619 | 0 | } |
620 | 0 | |
621 | 0 | // Update our invalidation counters for any consumers watching for changes |
622 | 0 | // in the surface. |
623 | 0 | if (mRawSurface) { |
624 | 0 | mRawSurface->Invalidate(updateRect); |
625 | 0 | } |
626 | 0 | if (mLockedSurface && mRawSurface != mLockedSurface) { |
627 | 0 | mLockedSurface->Invalidate(updateRect); |
628 | 0 | } |
629 | 0 | return NS_OK; |
630 | 0 | } |
631 | | |
632 | | void |
633 | | imgFrame::Finish(Opacity aFrameOpacity /* = Opacity::SOME_TRANSPARENCY */, |
634 | | bool aFinalize /* = true */) |
635 | 0 | { |
636 | 0 | MonitorAutoLock lock(mMonitor); |
637 | 0 | MOZ_ASSERT(mLockCount > 0, "Image data should be locked"); |
638 | 0 |
|
639 | 0 | if (mPalettedImageData) { |
640 | 0 | ImageUpdatedInternal(mFrameRect); |
641 | 0 | } else if (!mDecoded.IsEqualEdges(mFrameRect)) { |
642 | 0 | // The decoder should have produced rows starting from either the bottom or |
643 | 0 | // the top of the image. We need to calculate the region for which we have |
644 | 0 | // not yet invalidated. |
645 | 0 | IntRect delta(0, 0, mFrameRect.width, 0); |
646 | 0 | if (mDecoded.y == 0) { |
647 | 0 | delta.y = mDecoded.height; |
648 | 0 | delta.height = mFrameRect.height - mDecoded.height; |
649 | 0 | } else if (mDecoded.y + mDecoded.height == mFrameRect.height) { |
650 | 0 | delta.height = mFrameRect.height - mDecoded.y; |
651 | 0 | } else { |
652 | 0 | MOZ_ASSERT_UNREACHABLE("Decoder only updated middle of image!"); |
653 | 0 | delta = mFrameRect; |
654 | 0 | } |
655 | 0 |
|
656 | 0 | ImageUpdatedInternal(delta); |
657 | 0 | } |
658 | 0 |
|
659 | 0 | MOZ_ASSERT(mDecoded.IsEqualEdges(mFrameRect)); |
660 | 0 |
|
661 | 0 | if (aFinalize) { |
662 | 0 | FinalizeSurfaceInternal(); |
663 | 0 | } |
664 | 0 |
|
665 | 0 | mFinished = true; |
666 | 0 |
|
667 | 0 | // The image is now complete, wake up anyone who's waiting. |
668 | 0 | mMonitor.NotifyAll(); |
669 | 0 | } |
670 | | |
671 | | uint32_t |
672 | | imgFrame::GetImageBytesPerRow() const |
673 | 0 | { |
674 | 0 | mMonitor.AssertCurrentThreadOwns(); |
675 | 0 |
|
676 | 0 | if (mRawSurface) { |
677 | 0 | return mFrameRect.Width() * BytesPerPixel(mFormat); |
678 | 0 | } |
679 | 0 | |
680 | 0 | if (mPaletteDepth) { |
681 | 0 | return mFrameRect.Width(); |
682 | 0 | } |
683 | 0 | |
684 | 0 | return 0; |
685 | 0 | } |
686 | | |
687 | | uint32_t |
688 | | imgFrame::GetImageDataLength() const |
689 | 0 | { |
690 | 0 | return GetImageBytesPerRow() * mFrameRect.Height(); |
691 | 0 | } |
692 | | |
693 | | void |
694 | | imgFrame::GetImageData(uint8_t** aData, uint32_t* aLength) const |
695 | 0 | { |
696 | 0 | MonitorAutoLock lock(mMonitor); |
697 | 0 | GetImageDataInternal(aData, aLength); |
698 | 0 | } |
699 | | |
700 | | void |
701 | | imgFrame::GetImageDataInternal(uint8_t** aData, uint32_t* aLength) const |
702 | 0 | { |
703 | 0 | mMonitor.AssertCurrentThreadOwns(); |
704 | 0 | MOZ_ASSERT(mLockCount > 0, "Image data should be locked"); |
705 | 0 |
|
706 | 0 | if (mLockedSurface) { |
707 | 0 | // TODO: This is okay for now because we only realloc shared surfaces on |
708 | 0 | // the main thread after decoding has finished, but if animations want to |
709 | 0 | // read frame data off the main thread, we will need to reconsider this. |
710 | 0 | *aData = mLockedSurface->GetData(); |
711 | 0 | MOZ_ASSERT(*aData, |
712 | 0 | "mLockedSurface is non-null, but GetData is null in GetImageData"); |
713 | 0 | } else if (mPalettedImageData) { |
714 | 0 | *aData = mPalettedImageData + PaletteDataLength(); |
715 | 0 | MOZ_ASSERT(*aData, |
716 | 0 | "mPalettedImageData is non-null, but result is null in GetImageData"); |
717 | 0 | } else { |
718 | 0 | MOZ_ASSERT(false, |
719 | 0 | "Have neither mLockedSurface nor mPalettedImageData in GetImageData"); |
720 | 0 | *aData = nullptr; |
721 | 0 | } |
722 | 0 |
|
723 | 0 | *aLength = GetImageDataLength(); |
724 | 0 | } |
725 | | |
726 | | uint8_t* |
727 | | imgFrame::GetImageData() const |
728 | 0 | { |
729 | 0 | uint8_t* data; |
730 | 0 | uint32_t length; |
731 | 0 | GetImageData(&data, &length); |
732 | 0 | return data; |
733 | 0 | } |
734 | | |
735 | | bool |
736 | | imgFrame::GetIsPaletted() const |
737 | 0 | { |
738 | 0 | return mPalettedImageData != nullptr; |
739 | 0 | } |
740 | | |
741 | | void |
742 | | imgFrame::GetPaletteData(uint32_t** aPalette, uint32_t* length) const |
743 | 0 | { |
744 | 0 | AssertImageDataLocked(); |
745 | 0 |
|
746 | 0 | if (!mPalettedImageData) { |
747 | 0 | *aPalette = nullptr; |
748 | 0 | *length = 0; |
749 | 0 | } else { |
750 | 0 | *aPalette = (uint32_t*) mPalettedImageData; |
751 | 0 | *length = PaletteDataLength(); |
752 | 0 | } |
753 | 0 | } |
754 | | |
755 | | uint32_t* |
756 | | imgFrame::GetPaletteData() const |
757 | 0 | { |
758 | 0 | uint32_t* data; |
759 | 0 | uint32_t length; |
760 | 0 | GetPaletteData(&data, &length); |
761 | 0 | return data; |
762 | 0 | } |
763 | | |
764 | | uint8_t* |
765 | | imgFrame::LockImageData(bool aOnlyFinished) |
766 | 0 | { |
767 | 0 | MonitorAutoLock lock(mMonitor); |
768 | 0 |
|
769 | 0 | MOZ_ASSERT(mLockCount >= 0, "Unbalanced locks and unlocks"); |
770 | 0 | if (mLockCount < 0 || (aOnlyFinished && !mFinished)) { |
771 | 0 | return nullptr; |
772 | 0 | } |
773 | 0 | |
774 | 0 | uint8_t* data; |
775 | 0 | if (mPalettedImageData) { |
776 | 0 | data = mPalettedImageData; |
777 | 0 | } else if (mLockedSurface) { |
778 | 0 | data = mLockedSurface->GetData(); |
779 | 0 | } else { |
780 | 0 | data = nullptr; |
781 | 0 | } |
782 | 0 |
|
783 | 0 | // If the raw data is still available, we should get a valid pointer for it. |
784 | 0 | if (!data) { |
785 | 0 | MOZ_ASSERT_UNREACHABLE("It's illegal to re-lock an optimized imgFrame"); |
786 | 0 | return nullptr; |
787 | 0 | } |
788 | 0 |
|
789 | 0 | ++mLockCount; |
790 | 0 | return data; |
791 | 0 | } |
792 | | |
793 | | void |
794 | | imgFrame::AssertImageDataLocked() const |
795 | 0 | { |
796 | | #ifdef DEBUG |
797 | | MonitorAutoLock lock(mMonitor); |
798 | | MOZ_ASSERT(mLockCount > 0, "Image data should be locked"); |
799 | | #endif |
800 | | } |
801 | | |
802 | | nsresult |
803 | | imgFrame::UnlockImageData() |
804 | 0 | { |
805 | 0 | MonitorAutoLock lock(mMonitor); |
806 | 0 |
|
807 | 0 | MOZ_ASSERT(mLockCount > 0, "Unlocking an unlocked image!"); |
808 | 0 | if (mLockCount <= 0) { |
809 | 0 | return NS_ERROR_FAILURE; |
810 | 0 | } |
811 | 0 | |
812 | 0 | MOZ_ASSERT(mLockCount > 1 || mFinished || mAborted, |
813 | 0 | "Should have Finish()'d or aborted before unlocking"); |
814 | 0 |
|
815 | 0 | mLockCount--; |
816 | 0 |
|
817 | 0 | return NS_OK; |
818 | 0 | } |
819 | | |
820 | | void |
821 | | imgFrame::SetOptimizable() |
822 | 0 | { |
823 | 0 | AssertImageDataLocked(); |
824 | 0 | MonitorAutoLock lock(mMonitor); |
825 | 0 | mOptimizable = true; |
826 | 0 | } |
827 | | |
828 | | void |
829 | | imgFrame::FinalizeSurface() |
830 | 0 | { |
831 | 0 | MonitorAutoLock lock(mMonitor); |
832 | 0 | FinalizeSurfaceInternal(); |
833 | 0 | } |
834 | | |
835 | | void |
836 | | imgFrame::FinalizeSurfaceInternal() |
837 | 0 | { |
838 | 0 | mMonitor.AssertCurrentThreadOwns(); |
839 | 0 |
|
840 | 0 | // Not all images will have mRawSurface to finalize (i.e. paletted images). |
841 | 0 | if (!mRawSurface || mRawSurface->GetType() != SurfaceType::DATA_SHARED) { |
842 | 0 | return; |
843 | 0 | } |
844 | 0 | |
845 | 0 | auto sharedSurf = static_cast<SourceSurfaceSharedData*>(mRawSurface.get()); |
846 | 0 | sharedSurf->Finalize(); |
847 | 0 | } |
848 | | |
849 | | already_AddRefed<SourceSurface> |
850 | | imgFrame::GetSourceSurface() |
851 | 0 | { |
852 | 0 | MonitorAutoLock lock(mMonitor); |
853 | 0 | return GetSourceSurfaceInternal(); |
854 | 0 | } |
855 | | |
856 | | already_AddRefed<SourceSurface> |
857 | | imgFrame::GetSourceSurfaceInternal() |
858 | 0 | { |
859 | 0 | mMonitor.AssertCurrentThreadOwns(); |
860 | 0 |
|
861 | 0 | if (mOptSurface) { |
862 | 0 | if (mOptSurface->IsValid()) { |
863 | 0 | RefPtr<SourceSurface> surf(mOptSurface); |
864 | 0 | return surf.forget(); |
865 | 0 | } else { |
866 | 0 | mOptSurface = nullptr; |
867 | 0 | } |
868 | 0 | } |
869 | 0 |
|
870 | 0 | if (mLockedSurface) { |
871 | 0 | RefPtr<SourceSurface> surf(mLockedSurface); |
872 | 0 | return surf.forget(); |
873 | 0 | } |
874 | 0 | |
875 | 0 | if (!mRawSurface) { |
876 | 0 | return nullptr; |
877 | 0 | } |
878 | 0 | |
879 | 0 | return CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat); |
880 | 0 | } |
881 | | |
882 | | void |
883 | | imgFrame::Abort() |
884 | 0 | { |
885 | 0 | MonitorAutoLock lock(mMonitor); |
886 | 0 |
|
887 | 0 | mAborted = true; |
888 | 0 |
|
889 | 0 | // Wake up anyone who's waiting. |
890 | 0 | mMonitor.NotifyAll(); |
891 | 0 | } |
892 | | |
893 | | bool |
894 | | imgFrame::IsAborted() const |
895 | 0 | { |
896 | 0 | MonitorAutoLock lock(mMonitor); |
897 | 0 | return mAborted; |
898 | 0 | } |
899 | | |
900 | | bool |
901 | | imgFrame::IsFinished() const |
902 | 0 | { |
903 | 0 | MonitorAutoLock lock(mMonitor); |
904 | 0 | return mFinished; |
905 | 0 | } |
906 | | |
907 | | void |
908 | | imgFrame::WaitUntilFinished() const |
909 | 0 | { |
910 | 0 | MonitorAutoLock lock(mMonitor); |
911 | 0 |
|
912 | 0 | while (true) { |
913 | 0 | // Return if we're aborted or complete. |
914 | 0 | if (mAborted || mFinished) { |
915 | 0 | return; |
916 | 0 | } |
917 | 0 | |
918 | 0 | // Not complete yet, so we'll have to wait. |
919 | 0 | mMonitor.Wait(); |
920 | 0 | } |
921 | 0 | } |
922 | | |
923 | | bool |
924 | | imgFrame::AreAllPixelsWritten() const |
925 | 0 | { |
926 | 0 | mMonitor.AssertCurrentThreadOwns(); |
927 | 0 | return mDecoded.IsEqualInterior(mFrameRect); |
928 | 0 | } |
929 | | |
930 | | bool imgFrame::GetCompositingFailed() const |
931 | 0 | { |
932 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
933 | 0 | return mCompositingFailed; |
934 | 0 | } |
935 | | |
936 | | void |
937 | | imgFrame::SetCompositingFailed(bool val) |
938 | 0 | { |
939 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
940 | 0 | mCompositingFailed = val; |
941 | 0 | } |
942 | | |
943 | | void |
944 | | imgFrame::AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf, |
945 | | size_t& aHeapSizeOut, |
946 | | size_t& aNonHeapSizeOut, |
947 | | size_t& aExtHandlesOut) const |
948 | 0 | { |
949 | 0 | MonitorAutoLock lock(mMonitor); |
950 | 0 |
|
951 | 0 | if (mPalettedImageData) { |
952 | 0 | aHeapSizeOut += aMallocSizeOf(mPalettedImageData); |
953 | 0 | } |
954 | 0 | if (mLockedSurface) { |
955 | 0 | aHeapSizeOut += aMallocSizeOf(mLockedSurface); |
956 | 0 | } |
957 | 0 | if (mOptSurface) { |
958 | 0 | aHeapSizeOut += aMallocSizeOf(mOptSurface); |
959 | 0 | } |
960 | 0 | if (mRawSurface) { |
961 | 0 | aHeapSizeOut += aMallocSizeOf(mRawSurface); |
962 | 0 | mRawSurface->AddSizeOfExcludingThis(aMallocSizeOf, aHeapSizeOut, |
963 | 0 | aNonHeapSizeOut, aExtHandlesOut); |
964 | 0 | } |
965 | 0 | } |
966 | | |
967 | | } // namespace image |
968 | | } // namespace mozilla |