/src/mozilla-central/image/SourceBuffer.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
4 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | #include "SourceBuffer.h" |
7 | | |
8 | | #include <algorithm> |
9 | | #include <cmath> |
10 | | #include <cstring> |
11 | | #include "mozilla/Likely.h" |
12 | | #include "nsIInputStream.h" |
13 | | #include "MainThreadUtils.h" |
14 | | #include "SurfaceCache.h" |
15 | | |
16 | | using std::max; |
17 | | using std::min; |
18 | | |
19 | | namespace mozilla { |
20 | | namespace image { |
21 | | |
22 | | ////////////////////////////////////////////////////////////////////////////// |
23 | | // SourceBufferIterator implementation. |
24 | | ////////////////////////////////////////////////////////////////////////////// |
25 | | |
26 | | SourceBufferIterator::~SourceBufferIterator() |
27 | 0 | { |
28 | 0 | if (mOwner) { |
29 | 0 | mOwner->OnIteratorRelease(); |
30 | 0 | } |
31 | 0 | } |
32 | | |
33 | | SourceBufferIterator& |
34 | | SourceBufferIterator::operator=(SourceBufferIterator&& aOther) |
35 | 0 | { |
36 | 0 | if (mOwner) { |
37 | 0 | mOwner->OnIteratorRelease(); |
38 | 0 | } |
39 | 0 |
|
40 | 0 | mOwner = std::move(aOther.mOwner); |
41 | 0 | mState = aOther.mState; |
42 | 0 | mData = aOther.mData; |
43 | 0 | mChunkCount = aOther.mChunkCount; |
44 | 0 | mByteCount = aOther.mByteCount; |
45 | 0 | mRemainderToRead = aOther.mRemainderToRead; |
46 | 0 |
|
47 | 0 | return *this; |
48 | 0 | } |
49 | | |
50 | | SourceBufferIterator::State |
51 | | SourceBufferIterator::AdvanceOrScheduleResume(size_t aRequestedBytes, |
52 | | IResumable* aConsumer) |
53 | 0 | { |
54 | 0 | MOZ_ASSERT(mOwner); |
55 | 0 |
|
56 | 0 | if (MOZ_UNLIKELY(!HasMore())) { |
57 | 0 | MOZ_ASSERT_UNREACHABLE("Should not advance a completed iterator"); |
58 | 0 | return COMPLETE; |
59 | 0 | } |
60 | 0 |
|
61 | 0 | // The range of data [mOffset, mOffset + mNextReadLength) has just been read |
62 | 0 | // by the caller (or at least they don't have any interest in it), so consume |
63 | 0 | // that data. |
64 | 0 | MOZ_ASSERT(mData.mIterating.mNextReadLength <= mData.mIterating.mAvailableLength); |
65 | 0 | mData.mIterating.mOffset += mData.mIterating.mNextReadLength; |
66 | 0 | mData.mIterating.mAvailableLength -= mData.mIterating.mNextReadLength; |
67 | 0 |
|
68 | 0 | // An iterator can have a limit imposed on it to read only a subset of a |
69 | 0 | // source buffer. If it is present, we need to mimic the same behaviour as |
70 | 0 | // the owning SourceBuffer. |
71 | 0 | if (MOZ_UNLIKELY(mRemainderToRead != SIZE_MAX)) { |
72 | 0 | MOZ_ASSERT(mData.mIterating.mNextReadLength <= mRemainderToRead); |
73 | 0 | mRemainderToRead -= mData.mIterating.mNextReadLength; |
74 | 0 |
|
75 | 0 | if (MOZ_UNLIKELY(mRemainderToRead == 0)) { |
76 | 0 | mData.mIterating.mNextReadLength = 0; |
77 | 0 | SetComplete(NS_OK); |
78 | 0 | return COMPLETE; |
79 | 0 | } |
80 | 0 | |
81 | 0 | if (MOZ_UNLIKELY(aRequestedBytes > mRemainderToRead)) { |
82 | 0 | aRequestedBytes = mRemainderToRead; |
83 | 0 | } |
84 | 0 | } |
85 | 0 |
|
86 | 0 | mData.mIterating.mNextReadLength = 0; |
87 | 0 |
|
88 | 0 | if (MOZ_LIKELY(mState == READY)) { |
89 | 0 | // If the caller wants zero bytes of data, that's easy enough; we just |
90 | 0 | // configured ourselves for a zero-byte read above! In theory we could do |
91 | 0 | // this even in the START state, but it's not important for performance and |
92 | 0 | // breaking the ability of callers to assert that the pointer returned by |
93 | 0 | // Data() is non-null doesn't seem worth it. |
94 | 0 | if (aRequestedBytes == 0) { |
95 | 0 | MOZ_ASSERT(mData.mIterating.mNextReadLength == 0); |
96 | 0 | return READY; |
97 | 0 | } |
98 | 0 |
|
99 | 0 | // Try to satisfy the request out of our local buffer. This is potentially |
100 | 0 | // much faster than requesting data from our owning SourceBuffer because we |
101 | 0 | // don't have to take the lock. Note that if we have anything at all in our |
102 | 0 | // local buffer, we use it to satisfy the request; @aRequestedBytes is just |
103 | 0 | // the *maximum* number of bytes we can return. |
104 | 0 | if (mData.mIterating.mAvailableLength > 0) { |
105 | 0 | return AdvanceFromLocalBuffer(aRequestedBytes); |
106 | 0 | } |
107 | 0 | } |
108 | 0 | |
109 | 0 | // Our local buffer is empty, so we'll have to request data from our owning |
110 | 0 | // SourceBuffer. |
111 | 0 | return mOwner->AdvanceIteratorOrScheduleResume(*this, |
112 | 0 | aRequestedBytes, |
113 | 0 | aConsumer); |
114 | 0 | } |
115 | | |
116 | | bool |
117 | | SourceBufferIterator::RemainingBytesIsNoMoreThan(size_t aBytes) const |
118 | 0 | { |
119 | 0 | MOZ_ASSERT(mOwner); |
120 | 0 | return mOwner->RemainingBytesIsNoMoreThan(*this, aBytes); |
121 | 0 | } |
122 | | |
123 | | |
124 | | ////////////////////////////////////////////////////////////////////////////// |
125 | | // SourceBuffer implementation. |
126 | | ////////////////////////////////////////////////////////////////////////////// |
127 | | |
128 | | const size_t SourceBuffer::MIN_CHUNK_CAPACITY; |
129 | | const size_t SourceBuffer::MAX_CHUNK_CAPACITY; |
130 | | |
131 | | SourceBuffer::SourceBuffer() |
132 | | : mMutex("image::SourceBuffer") |
133 | | , mConsumerCount(0) |
134 | | , mCompacted(false) |
135 | 0 | { } |
136 | | |
137 | | SourceBuffer::~SourceBuffer() |
138 | 0 | { |
139 | 0 | MOZ_ASSERT(mConsumerCount == 0, |
140 | 0 | "SourceBuffer destroyed with active consumers"); |
141 | 0 | } |
142 | | |
143 | | nsresult |
144 | | SourceBuffer::AppendChunk(Maybe<Chunk>&& aChunk) |
145 | 0 | { |
146 | 0 | mMutex.AssertCurrentThreadOwns(); |
147 | 0 |
|
148 | | #ifdef DEBUG |
149 | | if (mChunks.Length() > 0) { |
150 | | NS_WARNING("Appending an extra chunk for SourceBuffer"); |
151 | | } |
152 | | #endif |
153 | |
|
154 | 0 | if (MOZ_UNLIKELY(!aChunk)) { |
155 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
156 | 0 | } |
157 | 0 | |
158 | 0 | if (MOZ_UNLIKELY(aChunk->AllocationFailed())) { |
159 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
160 | 0 | } |
161 | 0 | |
162 | 0 | if (MOZ_UNLIKELY(!mChunks.AppendElement(std::move(*aChunk), fallible))) { |
163 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
164 | 0 | } |
165 | 0 | |
166 | 0 | return NS_OK; |
167 | 0 | } |
168 | | |
169 | | Maybe<SourceBuffer::Chunk> |
170 | | SourceBuffer::CreateChunk(size_t aCapacity, |
171 | | size_t aExistingCapacity /* = 0 */, |
172 | | bool aRoundUp /* = true */) |
173 | 0 | { |
174 | 0 | if (MOZ_UNLIKELY(aCapacity == 0)) { |
175 | 0 | MOZ_ASSERT_UNREACHABLE("Appending a chunk of zero size?"); |
176 | 0 | return Nothing(); |
177 | 0 | } |
178 | 0 |
|
179 | 0 | // Round up if requested. |
180 | 0 | size_t finalCapacity = aRoundUp ? RoundedUpCapacity(aCapacity) |
181 | 0 | : aCapacity; |
182 | 0 |
|
183 | 0 | // Use the size of the SurfaceCache as an additional heuristic to avoid |
184 | 0 | // allocating huge buffers. Generally images do not get smaller when decoded, |
185 | 0 | // so if we could store the source data in the SurfaceCache, we assume that |
186 | 0 | // there's no way we'll be able to store the decoded version. |
187 | 0 | if (MOZ_UNLIKELY(!SurfaceCache::CanHold(finalCapacity + aExistingCapacity))) { |
188 | 0 | NS_WARNING("SourceBuffer refused to create chunk too large for SurfaceCache"); |
189 | 0 | return Nothing(); |
190 | 0 | } |
191 | 0 |
|
192 | 0 | return Some(Chunk(finalCapacity)); |
193 | 0 | } |
194 | | |
195 | | nsresult |
196 | | SourceBuffer::Compact() |
197 | 0 | { |
198 | 0 | mMutex.AssertCurrentThreadOwns(); |
199 | 0 |
|
200 | 0 | MOZ_ASSERT(mConsumerCount == 0, "Should have no consumers here"); |
201 | 0 | MOZ_ASSERT(mWaitingConsumers.Length() == 0, "Shouldn't have waiters"); |
202 | 0 | MOZ_ASSERT(mStatus, "Should be complete here"); |
203 | 0 |
|
204 | 0 | // If we've tried to compact once, don't attempt again. |
205 | 0 | if (mCompacted) { |
206 | 0 | return NS_OK; |
207 | 0 | } |
208 | 0 | |
209 | 0 | mCompacted = true; |
210 | 0 |
|
211 | 0 | // Compact our waiting consumers list, since we're complete and no future |
212 | 0 | // consumer will ever have to wait. |
213 | 0 | mWaitingConsumers.Compact(); |
214 | 0 |
|
215 | 0 | // If we have no chunks, then there's nothing to compact. |
216 | 0 | if (mChunks.Length() < 1) { |
217 | 0 | return NS_OK; |
218 | 0 | } |
219 | 0 | |
220 | 0 | // If we have one chunk, then we can compact if it has excess capacity. |
221 | 0 | if (mChunks.Length() == 1 && mChunks[0].Length() == mChunks[0].Capacity()) { |
222 | 0 | return NS_OK; |
223 | 0 | } |
224 | 0 | |
225 | 0 | // If the last chunk has the maximum capacity, then we know the total size |
226 | 0 | // will be quite large and not worth consolidating. We can likely/cheapily |
227 | 0 | // trim the last chunk if it is too big however. |
228 | 0 | size_t capacity = mChunks.LastElement().Capacity(); |
229 | 0 | if (capacity == MAX_CHUNK_CAPACITY) { |
230 | 0 | size_t lastLength = mChunks.LastElement().Length(); |
231 | 0 | if (lastLength != capacity) { |
232 | 0 | mChunks.LastElement().SetCapacity(lastLength); |
233 | 0 | } |
234 | 0 | return NS_OK; |
235 | 0 | } |
236 | 0 |
|
237 | 0 | // We can compact our buffer. Determine the total length. |
238 | 0 | size_t length = 0; |
239 | 0 | for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) { |
240 | 0 | length += mChunks[i].Length(); |
241 | 0 | } |
242 | 0 |
|
243 | 0 | // If our total length is zero (which means ExpectLength() got called, but no |
244 | 0 | // data ever actually got written) then just empty our chunk list. |
245 | 0 | if (MOZ_UNLIKELY(length == 0)) { |
246 | 0 | mChunks.Clear(); |
247 | 0 | return NS_OK; |
248 | 0 | } |
249 | 0 | |
250 | 0 | Chunk& mergeChunk = mChunks[0]; |
251 | 0 | if (MOZ_UNLIKELY(!mergeChunk.SetCapacity(length))) { |
252 | 0 | NS_WARNING("Failed to reallocate chunk for SourceBuffer compacting - OOM?"); |
253 | 0 | return NS_OK; |
254 | 0 | } |
255 | 0 |
|
256 | 0 | // Copy our old chunks into the newly reallocated first chunk. |
257 | 0 | for (uint32_t i = 1 ; i < mChunks.Length() ; ++i) { |
258 | 0 | size_t offset = mergeChunk.Length(); |
259 | 0 | MOZ_ASSERT(offset < mergeChunk.Capacity()); |
260 | 0 | MOZ_ASSERT(offset + mChunks[i].Length() <= mergeChunk.Capacity()); |
261 | 0 |
|
262 | 0 | memcpy(mergeChunk.Data() + offset, mChunks[i].Data(), mChunks[i].Length()); |
263 | 0 | mergeChunk.AddLength(mChunks[i].Length()); |
264 | 0 | } |
265 | 0 |
|
266 | 0 | MOZ_ASSERT(mergeChunk.Length() == mergeChunk.Capacity(), |
267 | 0 | "Compacted chunk has slack space"); |
268 | 0 |
|
269 | 0 | // Remove the redundant chunks. |
270 | 0 | mChunks.RemoveElementsAt(1, mChunks.Length() - 1); |
271 | 0 | mChunks.Compact(); |
272 | 0 |
|
273 | 0 | return NS_OK; |
274 | 0 | } |
275 | | |
276 | | /* static */ size_t |
277 | | SourceBuffer::RoundedUpCapacity(size_t aCapacity) |
278 | 0 | { |
279 | 0 | // Protect against overflow. |
280 | 0 | if (MOZ_UNLIKELY(SIZE_MAX - aCapacity < MIN_CHUNK_CAPACITY)) { |
281 | 0 | return aCapacity; |
282 | 0 | } |
283 | 0 | |
284 | 0 | // Round up to the next multiple of MIN_CHUNK_CAPACITY (which should be the |
285 | 0 | // size of a page). |
286 | 0 | size_t roundedCapacity = |
287 | 0 | (aCapacity + MIN_CHUNK_CAPACITY - 1) & ~(MIN_CHUNK_CAPACITY - 1); |
288 | 0 | MOZ_ASSERT(roundedCapacity >= aCapacity, "Bad math?"); |
289 | 0 | MOZ_ASSERT(roundedCapacity - aCapacity < MIN_CHUNK_CAPACITY, "Bad math?"); |
290 | 0 |
|
291 | 0 | return roundedCapacity; |
292 | 0 | } |
293 | | |
294 | | size_t |
295 | | SourceBuffer::FibonacciCapacityWithMinimum(size_t aMinCapacity) |
296 | 0 | { |
297 | 0 | mMutex.AssertCurrentThreadOwns(); |
298 | 0 |
|
299 | 0 | // We grow the source buffer using a Fibonacci growth rate. It will be capped |
300 | 0 | // at MAX_CHUNK_CAPACITY, unless the available data exceeds that. |
301 | 0 |
|
302 | 0 | size_t length = mChunks.Length(); |
303 | 0 |
|
304 | 0 | if (length == 0 || aMinCapacity > MAX_CHUNK_CAPACITY) { |
305 | 0 | return aMinCapacity; |
306 | 0 | } |
307 | 0 | |
308 | 0 | if (length == 1) { |
309 | 0 | return min(max(2 * mChunks[0].Capacity(), aMinCapacity), |
310 | 0 | MAX_CHUNK_CAPACITY); |
311 | 0 | } |
312 | 0 | |
313 | 0 | return min(max(mChunks[length - 1].Capacity() + |
314 | 0 | mChunks[length - 2].Capacity(), |
315 | 0 | aMinCapacity), MAX_CHUNK_CAPACITY); |
316 | 0 | } |
317 | | |
318 | | void |
319 | | SourceBuffer::AddWaitingConsumer(IResumable* aConsumer) |
320 | 0 | { |
321 | 0 | mMutex.AssertCurrentThreadOwns(); |
322 | 0 |
|
323 | 0 | MOZ_ASSERT(!mStatus, "Waiting when we're complete?"); |
324 | 0 |
|
325 | 0 | if (aConsumer) { |
326 | 0 | mWaitingConsumers.AppendElement(aConsumer); |
327 | 0 | } |
328 | 0 | } |
329 | | |
330 | | void |
331 | | SourceBuffer::ResumeWaitingConsumers() |
332 | 0 | { |
333 | 0 | mMutex.AssertCurrentThreadOwns(); |
334 | 0 |
|
335 | 0 | if (mWaitingConsumers.Length() == 0) { |
336 | 0 | return; |
337 | 0 | } |
338 | 0 | |
339 | 0 | for (uint32_t i = 0 ; i < mWaitingConsumers.Length() ; ++i) { |
340 | 0 | mWaitingConsumers[i]->Resume(); |
341 | 0 | } |
342 | 0 |
|
343 | 0 | mWaitingConsumers.Clear(); |
344 | 0 | } |
345 | | |
346 | | nsresult |
347 | | SourceBuffer::ExpectLength(size_t aExpectedLength) |
348 | 0 | { |
349 | 0 | MOZ_ASSERT(aExpectedLength > 0, "Zero expected size?"); |
350 | 0 |
|
351 | 0 | MutexAutoLock lock(mMutex); |
352 | 0 |
|
353 | 0 | if (MOZ_UNLIKELY(mStatus)) { |
354 | 0 | MOZ_ASSERT_UNREACHABLE("ExpectLength after SourceBuffer is complete"); |
355 | 0 | return NS_OK; |
356 | 0 | } |
357 | 0 |
|
358 | 0 | if (MOZ_UNLIKELY(mChunks.Length() > 0)) { |
359 | 0 | MOZ_ASSERT_UNREACHABLE("Duplicate or post-Append call to ExpectLength"); |
360 | 0 | return NS_OK; |
361 | 0 | } |
362 | 0 |
|
363 | 0 | if (MOZ_UNLIKELY(!SurfaceCache::CanHold(aExpectedLength))) { |
364 | 0 | NS_WARNING("SourceBuffer refused to store too large buffer"); |
365 | 0 | return HandleError(NS_ERROR_INVALID_ARG); |
366 | 0 | } |
367 | 0 |
|
368 | 0 | size_t length = min(aExpectedLength, MAX_CHUNK_CAPACITY); |
369 | 0 | if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(length, |
370 | 0 | /* aExistingCapacity */ 0, |
371 | 0 | /* aRoundUp */ false))))) { |
372 | 0 | return HandleError(NS_ERROR_OUT_OF_MEMORY); |
373 | 0 | } |
374 | 0 | |
375 | 0 | return NS_OK; |
376 | 0 | } |
377 | | |
378 | | nsresult |
379 | | SourceBuffer::Append(const char* aData, size_t aLength) |
380 | 0 | { |
381 | 0 | MOZ_ASSERT(aData, "Should have a buffer"); |
382 | 0 | MOZ_ASSERT(aLength > 0, "Writing a zero-sized chunk"); |
383 | 0 |
|
384 | 0 | size_t currentChunkCapacity = 0; |
385 | 0 | size_t currentChunkLength = 0; |
386 | 0 | char* currentChunkData = nullptr; |
387 | 0 | size_t currentChunkRemaining = 0; |
388 | 0 | size_t forCurrentChunk = 0; |
389 | 0 | size_t forNextChunk = 0; |
390 | 0 | size_t nextChunkCapacity = 0; |
391 | 0 | size_t totalCapacity = 0; |
392 | 0 |
|
393 | 0 | { |
394 | 0 | MutexAutoLock lock(mMutex); |
395 | 0 |
|
396 | 0 | if (MOZ_UNLIKELY(mStatus)) { |
397 | 0 | // This SourceBuffer is already complete; ignore further data. |
398 | 0 | return NS_ERROR_FAILURE; |
399 | 0 | } |
400 | 0 | |
401 | 0 | if (MOZ_UNLIKELY(mChunks.Length() == 0)) { |
402 | 0 | if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aLength))))) { |
403 | 0 | return HandleError(NS_ERROR_OUT_OF_MEMORY); |
404 | 0 | } |
405 | 0 | } |
406 | 0 | |
407 | 0 | // Copy out the current chunk's information so we can release the lock. |
408 | 0 | // Note that this wouldn't be safe if multiple producers were allowed! |
409 | 0 | Chunk& currentChunk = mChunks.LastElement(); |
410 | 0 | currentChunkCapacity = currentChunk.Capacity(); |
411 | 0 | currentChunkLength = currentChunk.Length(); |
412 | 0 | currentChunkData = currentChunk.Data(); |
413 | 0 |
|
414 | 0 | // Partition this data between the current chunk and the next chunk. |
415 | 0 | // (Because we always allocate a chunk big enough to fit everything passed |
416 | 0 | // to Append, we'll never need more than those two chunks to store |
417 | 0 | // everything.) |
418 | 0 | currentChunkRemaining = currentChunkCapacity - currentChunkLength; |
419 | 0 | forCurrentChunk = min(aLength, currentChunkRemaining); |
420 | 0 | forNextChunk = aLength - forCurrentChunk; |
421 | 0 |
|
422 | 0 | // If we'll need another chunk, determine what its capacity should be while |
423 | 0 | // we still hold the lock. |
424 | 0 | nextChunkCapacity = forNextChunk > 0 |
425 | 0 | ? FibonacciCapacityWithMinimum(forNextChunk) |
426 | 0 | : 0; |
427 | 0 |
|
428 | 0 | for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) { |
429 | 0 | totalCapacity += mChunks[i].Capacity(); |
430 | 0 | } |
431 | 0 | } |
432 | 0 |
|
433 | 0 | // Write everything we can fit into the current chunk. |
434 | 0 | MOZ_ASSERT(currentChunkLength + forCurrentChunk <= currentChunkCapacity); |
435 | 0 | memcpy(currentChunkData + currentChunkLength, aData, forCurrentChunk); |
436 | 0 |
|
437 | 0 | // If there's something left, create a new chunk and write it there. |
438 | 0 | Maybe<Chunk> nextChunk; |
439 | 0 | if (forNextChunk > 0) { |
440 | 0 | MOZ_ASSERT(nextChunkCapacity >= forNextChunk, "Next chunk too small?"); |
441 | 0 | nextChunk = CreateChunk(nextChunkCapacity, totalCapacity); |
442 | 0 | if (MOZ_LIKELY(nextChunk && !nextChunk->AllocationFailed())) { |
443 | 0 | memcpy(nextChunk->Data(), aData + forCurrentChunk, forNextChunk); |
444 | 0 | nextChunk->AddLength(forNextChunk); |
445 | 0 | } |
446 | 0 | } |
447 | 0 |
|
448 | 0 | // Update shared data structures. |
449 | 0 | { |
450 | 0 | MutexAutoLock lock(mMutex); |
451 | 0 |
|
452 | 0 | // Update the length of the current chunk. |
453 | 0 | Chunk& currentChunk = mChunks.LastElement(); |
454 | 0 | MOZ_ASSERT(currentChunk.Data() == currentChunkData, "Multiple producers?"); |
455 | 0 | MOZ_ASSERT(currentChunk.Length() == currentChunkLength, |
456 | 0 | "Multiple producers?"); |
457 | 0 |
|
458 | 0 | currentChunk.AddLength(forCurrentChunk); |
459 | 0 |
|
460 | 0 | // If we created a new chunk, add it to the series. |
461 | 0 | if (forNextChunk > 0) { |
462 | 0 | if (MOZ_UNLIKELY(!nextChunk)) { |
463 | 0 | return HandleError(NS_ERROR_OUT_OF_MEMORY); |
464 | 0 | } |
465 | 0 | |
466 | 0 | if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(std::move(nextChunk))))) { |
467 | 0 | return HandleError(NS_ERROR_OUT_OF_MEMORY); |
468 | 0 | } |
469 | 0 | } |
470 | 0 | |
471 | 0 | // Resume any waiting readers now that there's new data. |
472 | 0 | ResumeWaitingConsumers(); |
473 | 0 | } |
474 | 0 |
|
475 | 0 | return NS_OK; |
476 | 0 | } |
477 | | |
478 | | static nsresult |
479 | | AppendToSourceBuffer(nsIInputStream*, |
480 | | void* aClosure, |
481 | | const char* aFromRawSegment, |
482 | | uint32_t, |
483 | | uint32_t aCount, |
484 | | uint32_t* aWriteCount) |
485 | 0 | { |
486 | 0 | SourceBuffer* sourceBuffer = static_cast<SourceBuffer*>(aClosure); |
487 | 0 |
|
488 | 0 | // Copy the source data. Unless we hit OOM, we squelch the return value here, |
489 | 0 | // because returning an error means that ReadSegments stops reading data, and |
490 | 0 | // we want to ensure that we read everything we get. If we hit OOM then we |
491 | 0 | // return a failed status to the caller. |
492 | 0 | nsresult rv = sourceBuffer->Append(aFromRawSegment, aCount); |
493 | 0 | if (rv == NS_ERROR_OUT_OF_MEMORY) { |
494 | 0 | return rv; |
495 | 0 | } |
496 | 0 | |
497 | 0 | // Report that we wrote everything we got. |
498 | 0 | *aWriteCount = aCount; |
499 | 0 |
|
500 | 0 | return NS_OK; |
501 | 0 | } |
502 | | |
503 | | nsresult |
504 | | SourceBuffer::AppendFromInputStream(nsIInputStream* aInputStream, |
505 | | uint32_t aCount) |
506 | 0 | { |
507 | 0 | uint32_t bytesRead; |
508 | 0 | nsresult rv = aInputStream->ReadSegments(AppendToSourceBuffer, this, |
509 | 0 | aCount, &bytesRead); |
510 | 0 | if (NS_WARN_IF(NS_FAILED(rv))) { |
511 | 0 | return rv; |
512 | 0 | } |
513 | 0 | |
514 | 0 | if (bytesRead == 0) { |
515 | 0 | // The loading of the image has been canceled. |
516 | 0 | return NS_ERROR_FAILURE; |
517 | 0 | } |
518 | 0 | |
519 | 0 | if (bytesRead != aCount) { |
520 | 0 | // Only some of the given data was read. We may have failed in |
521 | 0 | // SourceBuffer::Append but ReadSegments swallowed the error. Otherwise the |
522 | 0 | // stream itself failed to yield the data. |
523 | 0 | MutexAutoLock lock(mMutex); |
524 | 0 | if (mStatus) { |
525 | 0 | MOZ_ASSERT(NS_FAILED(*mStatus)); |
526 | 0 | return *mStatus; |
527 | 0 | } |
528 | 0 |
|
529 | 0 | MOZ_ASSERT_UNREACHABLE("AppendToSourceBuffer should consume everything"); |
530 | 0 | } |
531 | 0 |
|
532 | 0 | return rv; |
533 | 0 | } |
534 | | |
535 | | void |
536 | | SourceBuffer::Complete(nsresult aStatus) |
537 | 0 | { |
538 | 0 | MutexAutoLock lock(mMutex); |
539 | 0 |
|
540 | 0 | // When an error occurs internally (e.g. due to an OOM), we save the status. |
541 | 0 | // This will indirectly trigger a failure higher up and that will call |
542 | 0 | // SourceBuffer::Complete. Since it doesn't necessarily know we are already |
543 | 0 | // complete, it is safe to ignore. |
544 | 0 | if (mStatus && (MOZ_UNLIKELY(NS_SUCCEEDED(*mStatus) || |
545 | 0 | aStatus != NS_IMAGELIB_ERROR_FAILURE))) { |
546 | 0 | MOZ_ASSERT_UNREACHABLE("Called Complete more than once"); |
547 | 0 | return; |
548 | 0 | } |
549 | 0 |
|
550 | 0 | if (MOZ_UNLIKELY(NS_SUCCEEDED(aStatus) && IsEmpty())) { |
551 | 0 | // It's illegal to succeed without writing anything. |
552 | 0 | aStatus = NS_ERROR_FAILURE; |
553 | 0 | } |
554 | 0 |
|
555 | 0 | mStatus = Some(aStatus); |
556 | 0 |
|
557 | 0 | // Resume any waiting consumers now that we're complete. |
558 | 0 | ResumeWaitingConsumers(); |
559 | 0 |
|
560 | 0 | // If we still have active consumers, just return. |
561 | 0 | if (mConsumerCount > 0) { |
562 | 0 | return; |
563 | 0 | } |
564 | 0 | |
565 | 0 | // Attempt to compact our buffer down to a single chunk. |
566 | 0 | Compact(); |
567 | 0 | } |
568 | | |
569 | | bool |
570 | | SourceBuffer::IsComplete() |
571 | 0 | { |
572 | 0 | MutexAutoLock lock(mMutex); |
573 | 0 | return bool(mStatus); |
574 | 0 | } |
575 | | |
576 | | size_t |
577 | | SourceBuffer::SizeOfIncludingThisWithComputedFallback(MallocSizeOf |
578 | | aMallocSizeOf) const |
579 | 0 | { |
580 | 0 | MutexAutoLock lock(mMutex); |
581 | 0 |
|
582 | 0 | size_t n = aMallocSizeOf(this); |
583 | 0 | n += mChunks.ShallowSizeOfExcludingThis(aMallocSizeOf); |
584 | 0 |
|
585 | 0 | for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) { |
586 | 0 | size_t chunkSize = aMallocSizeOf(mChunks[i].Data()); |
587 | 0 |
|
588 | 0 | if (chunkSize == 0) { |
589 | 0 | // We're on a platform where moz_malloc_size_of always returns 0. |
590 | 0 | chunkSize = mChunks[i].Capacity(); |
591 | 0 | } |
592 | 0 |
|
593 | 0 | n += chunkSize; |
594 | 0 | } |
595 | 0 |
|
596 | 0 | return n; |
597 | 0 | } |
598 | | |
599 | | SourceBufferIterator |
600 | | SourceBuffer::Iterator(size_t aReadLength) |
601 | 0 | { |
602 | 0 | { |
603 | 0 | MutexAutoLock lock(mMutex); |
604 | 0 | mConsumerCount++; |
605 | 0 | } |
606 | 0 |
|
607 | 0 | return SourceBufferIterator(this, aReadLength); |
608 | 0 | } |
609 | | |
610 | | void |
611 | | SourceBuffer::OnIteratorRelease() |
612 | 0 | { |
613 | 0 | MutexAutoLock lock(mMutex); |
614 | 0 |
|
615 | 0 | MOZ_ASSERT(mConsumerCount > 0, "Consumer count doesn't add up"); |
616 | 0 | mConsumerCount--; |
617 | 0 |
|
618 | 0 | // If we still have active consumers, or we're not complete yet, then return. |
619 | 0 | if (mConsumerCount > 0 || !mStatus) { |
620 | 0 | return; |
621 | 0 | } |
622 | 0 | |
623 | 0 | // Attempt to compact our buffer down to a single chunk. |
624 | 0 | Compact(); |
625 | 0 | } |
626 | | |
627 | | bool |
628 | | SourceBuffer::RemainingBytesIsNoMoreThan(const SourceBufferIterator& aIterator, |
629 | | size_t aBytes) const |
630 | 0 | { |
631 | 0 | MutexAutoLock lock(mMutex); |
632 | 0 |
|
633 | 0 | // If we're not complete, we always say no. |
634 | 0 | if (!mStatus) { |
635 | 0 | return false; |
636 | 0 | } |
637 | 0 | |
638 | 0 | // If the iterator's at the end, the answer is trivial. |
639 | 0 | if (!aIterator.HasMore()) { |
640 | 0 | return true; |
641 | 0 | } |
642 | 0 | |
643 | 0 | uint32_t iteratorChunk = aIterator.mData.mIterating.mChunk; |
644 | 0 | size_t iteratorOffset = aIterator.mData.mIterating.mOffset; |
645 | 0 | size_t iteratorLength = aIterator.mData.mIterating.mAvailableLength; |
646 | 0 |
|
647 | 0 | // Include the bytes the iterator is currently pointing to in the limit, so |
648 | 0 | // that the current chunk doesn't have to be a special case. |
649 | 0 | size_t bytes = aBytes + iteratorOffset + iteratorLength; |
650 | 0 |
|
651 | 0 | // Count the length over all of our chunks, starting with the one that the |
652 | 0 | // iterator is currently pointing to. (This is O(N), but N is expected to be |
653 | 0 | // ~1, so it doesn't seem worth caching the length separately.) |
654 | 0 | size_t lengthSoFar = 0; |
655 | 0 | for (uint32_t i = iteratorChunk ; i < mChunks.Length() ; ++i) { |
656 | 0 | lengthSoFar += mChunks[i].Length(); |
657 | 0 | if (lengthSoFar > bytes) { |
658 | 0 | return false; |
659 | 0 | } |
660 | 0 | } |
661 | 0 |
|
662 | 0 | return true; |
663 | 0 | } |
664 | | |
665 | | SourceBufferIterator::State |
666 | | SourceBuffer::AdvanceIteratorOrScheduleResume(SourceBufferIterator& aIterator, |
667 | | size_t aRequestedBytes, |
668 | | IResumable* aConsumer) |
669 | 0 | { |
670 | 0 | MutexAutoLock lock(mMutex); |
671 | 0 |
|
672 | 0 | MOZ_ASSERT(aIterator.HasMore(), "Advancing a completed iterator and " |
673 | 0 | "AdvanceOrScheduleResume didn't catch it"); |
674 | 0 |
|
675 | 0 | if (MOZ_UNLIKELY(mStatus && NS_FAILED(*mStatus))) { |
676 | 0 | // This SourceBuffer is complete due to an error; all reads fail. |
677 | 0 | return aIterator.SetComplete(*mStatus); |
678 | 0 | } |
679 | 0 | |
680 | 0 | if (MOZ_UNLIKELY(mChunks.Length() == 0)) { |
681 | 0 | // We haven't gotten an initial chunk yet. |
682 | 0 | AddWaitingConsumer(aConsumer); |
683 | 0 | return aIterator.SetWaiting(!!aConsumer); |
684 | 0 | } |
685 | 0 | |
686 | 0 | uint32_t iteratorChunkIdx = aIterator.mData.mIterating.mChunk; |
687 | 0 | MOZ_ASSERT(iteratorChunkIdx < mChunks.Length()); |
688 | 0 |
|
689 | 0 | const Chunk& currentChunk = mChunks[iteratorChunkIdx]; |
690 | 0 | size_t iteratorEnd = aIterator.mData.mIterating.mOffset + |
691 | 0 | aIterator.mData.mIterating.mAvailableLength; |
692 | 0 | MOZ_ASSERT(iteratorEnd <= currentChunk.Length()); |
693 | 0 | MOZ_ASSERT(iteratorEnd <= currentChunk.Capacity()); |
694 | 0 |
|
695 | 0 | if (iteratorEnd < currentChunk.Length()) { |
696 | 0 | // There's more data in the current chunk. |
697 | 0 | return aIterator.SetReady(iteratorChunkIdx, currentChunk.Data(), |
698 | 0 | iteratorEnd, currentChunk.Length() - iteratorEnd, |
699 | 0 | aRequestedBytes); |
700 | 0 | } |
701 | 0 | |
702 | 0 | if (iteratorEnd == currentChunk.Capacity() && |
703 | 0 | !IsLastChunk(iteratorChunkIdx)) { |
704 | 0 | // Advance to the next chunk. |
705 | 0 | const Chunk& nextChunk = mChunks[iteratorChunkIdx + 1]; |
706 | 0 | return aIterator.SetReady(iteratorChunkIdx + 1, nextChunk.Data(), 0, |
707 | 0 | nextChunk.Length(), aRequestedBytes); |
708 | 0 | } |
709 | 0 | |
710 | 0 | MOZ_ASSERT(IsLastChunk(iteratorChunkIdx), "Should've advanced"); |
711 | 0 |
|
712 | 0 | if (mStatus) { |
713 | 0 | // There's no more data and this SourceBuffer completed successfully. |
714 | 0 | MOZ_ASSERT(NS_SUCCEEDED(*mStatus), "Handled failures earlier"); |
715 | 0 | return aIterator.SetComplete(*mStatus); |
716 | 0 | } |
717 | 0 |
|
718 | 0 | // We're not complete, but there's no more data right now. Arrange to wake up |
719 | 0 | // the consumer when we get more data. |
720 | 0 | AddWaitingConsumer(aConsumer); |
721 | 0 | return aIterator.SetWaiting(!!aConsumer); |
722 | 0 | } |
723 | | |
724 | | nsresult |
725 | | SourceBuffer::HandleError(nsresult aError) |
726 | 0 | { |
727 | 0 | MOZ_ASSERT(NS_FAILED(aError), "Should have an error here"); |
728 | 0 | MOZ_ASSERT(aError == NS_ERROR_OUT_OF_MEMORY || |
729 | 0 | aError == NS_ERROR_INVALID_ARG, |
730 | 0 | "Unexpected error; may want to notify waiting readers, which " |
731 | 0 | "HandleError currently doesn't do"); |
732 | 0 |
|
733 | 0 | mMutex.AssertCurrentThreadOwns(); |
734 | 0 |
|
735 | 0 | NS_WARNING("SourceBuffer encountered an unrecoverable error"); |
736 | 0 |
|
737 | 0 | // Record the error. |
738 | 0 | mStatus = Some(aError); |
739 | 0 |
|
740 | 0 | // Drop our references to waiting readers. |
741 | 0 | mWaitingConsumers.Clear(); |
742 | 0 |
|
743 | 0 | return *mStatus; |
744 | 0 | } |
745 | | |
746 | | bool |
747 | | SourceBuffer::IsEmpty() |
748 | 0 | { |
749 | 0 | mMutex.AssertCurrentThreadOwns(); |
750 | 0 | return mChunks.Length() == 0 || |
751 | 0 | mChunks[0].Length() == 0; |
752 | 0 | } |
753 | | |
754 | | bool |
755 | | SourceBuffer::IsLastChunk(uint32_t aChunk) |
756 | 0 | { |
757 | 0 | mMutex.AssertCurrentThreadOwns(); |
758 | 0 | return aChunk + 1 == mChunks.Length(); |
759 | 0 | } |
760 | | |
761 | | } // namespace image |
762 | | } // namespace mozilla |