Coverage Report

Created: 2018-09-25 14:53

/src/mozilla-central/dom/media/MemoryBlockCache.cpp
Line
Count
Source (jump to first uncovered line)
1
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* vim:set ts=2 sw=2 sts=2 et cindent: */
3
/* This Source Code Form is subject to the terms of the Mozilla Public
4
 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
5
 * You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "MemoryBlockCache.h"
8
9
#include "mozilla/Atomics.h"
10
#include "mozilla/ClearOnShutdown.h"
11
#include "mozilla/Logging.h"
12
#include "mozilla/Telemetry.h"
13
#include "mozilla/Services.h"
14
#include "mozilla/StaticPrefs.h"
15
#include "nsIObserver.h"
16
#include "nsIObserverService.h"
17
#include "nsWeakReference.h"
18
#include "prsystem.h"
19
20
namespace mozilla {
21
22
#undef LOG
23
LazyLogModule gMemoryBlockCacheLog("MemoryBlockCache");
24
#define LOG(x, ...)                                                            \
25
0
  MOZ_LOG(gMemoryBlockCacheLog, LogLevel::Debug, ("%p " x, this, ##__VA_ARGS__))
26
27
// Combined sizes of all MemoryBlockCache buffers.
28
// Initialized to 0 by non-local static initialization.
29
// Increases when a buffer grows (during initialization or unexpected OOB
30
// writes), decreases when a MemoryBlockCache (with its buffer) is destroyed.
31
static Atomic<size_t> gCombinedSizes;
32
33
class MemoryBlockCacheTelemetry final
34
  : public nsIObserver
35
  , public nsSupportsWeakReference
36
{
37
public:
38
  NS_DECL_ISUPPORTS
39
  NS_DECL_NSIOBSERVER
40
41
  // To be called when the combined size has grown, so that the watermark may
42
  // be updated if needed.
43
  // Ensures MemoryBlockCache telemetry will be reported at shutdown.
44
  // Returns current watermark.
45
  static size_t NotifyCombinedSizeGrown(size_t aNewSize);
46
47
private:
48
0
  MemoryBlockCacheTelemetry() {}
49
0
  ~MemoryBlockCacheTelemetry() {}
50
51
  // Singleton instance created when a first MediaCache is registered, and
52
  // released when the last MediaCache is unregistered.
53
  // The observer service will keep a weak reference to it, for notifications.
54
  static StaticRefPtr<MemoryBlockCacheTelemetry> gMemoryBlockCacheTelemetry;
55
56
  // Watermark for the combined sizes; can only increase when a buffer grows.
57
  static Atomic<size_t> gCombinedSizesWatermark;
58
};
59
60
// Initialized to nullptr by non-local static initialization.
61
/* static */ StaticRefPtr<MemoryBlockCacheTelemetry>
62
  MemoryBlockCacheTelemetry::gMemoryBlockCacheTelemetry;
63
64
// Initialized to 0 by non-local static initialization.
65
/* static */ Atomic<size_t> MemoryBlockCacheTelemetry::gCombinedSizesWatermark;
66
67
NS_IMPL_ISUPPORTS(MemoryBlockCacheTelemetry,
68
                  nsIObserver,
69
                  nsISupportsWeakReference)
70
71
/* static */ size_t
72
MemoryBlockCacheTelemetry::NotifyCombinedSizeGrown(size_t aNewSize)
73
0
{
74
0
  // Ensure gMemoryBlockCacheTelemetry exists.
75
0
  if (!gMemoryBlockCacheTelemetry) {
76
0
    MOZ_ASSERT(NS_IsMainThread());
77
0
    gMemoryBlockCacheTelemetry = new MemoryBlockCacheTelemetry();
78
0
79
0
    nsCOMPtr<nsIObserverService> observerService =
80
0
      mozilla::services::GetObserverService();
81
0
    if (observerService) {
82
0
      observerService->AddObserver(
83
0
        gMemoryBlockCacheTelemetry, "profile-change-teardown", true);
84
0
    }
85
0
86
0
    // Clearing gMemoryBlockCacheTelemetry when handling
87
0
    // "profile-change-teardown" could run the risk of re-creating it (and then
88
0
    // leaking it) if some MediaCache work happened after that notification.
89
0
    // So instead we just request it to be cleared on final shutdown.
90
0
    ClearOnShutdown(&gMemoryBlockCacheTelemetry);
91
0
  }
92
0
93
0
  // Update watermark if needed, report current watermark.
94
0
  for (;;) {
95
0
    size_t oldSize = gMemoryBlockCacheTelemetry->gCombinedSizesWatermark;
96
0
    if (aNewSize < oldSize) {
97
0
      return oldSize;
98
0
    }
99
0
    if (gMemoryBlockCacheTelemetry->gCombinedSizesWatermark.compareExchange(
100
0
          oldSize, aNewSize)) {
101
0
      return aNewSize;
102
0
    }
103
0
  }
104
0
}
105
106
NS_IMETHODIMP
107
MemoryBlockCacheTelemetry::Observe(nsISupports* aSubject,
108
                                   char const* aTopic,
109
                                   char16_t const* aData)
110
0
{
111
0
  NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
112
0
113
0
  if (strcmp(aTopic, "profile-change-teardown") == 0) {
114
0
    uint32_t watermark = static_cast<uint32_t>(gCombinedSizesWatermark);
115
0
    LOG("MemoryBlockCacheTelemetry::~Observe() "
116
0
        "MEDIACACHE_MEMORY_WATERMARK=%" PRIu32,
117
0
        watermark);
118
0
    Telemetry::Accumulate(Telemetry::HistogramID::MEDIACACHE_MEMORY_WATERMARK,
119
0
                          watermark);
120
0
    return NS_OK;
121
0
  }
122
0
  return NS_OK;
123
0
}
124
125
enum MemoryBlockCacheTelemetryErrors
126
{
127
  // Don't change order/numbers! Add new values at the end and update
128
  // MEMORYBLOCKCACHE_ERRORS description in Histograms.json.
129
  InitUnderuse = 0,
130
  InitAllocation = 1,
131
  ReadOverrun = 2,
132
  WriteBlockOverflow = 3,
133
  WriteBlockCannotGrow = 4,
134
  MoveBlockSourceOverrun = 5,
135
  MoveBlockDestOverflow = 6,
136
  MoveBlockCannotGrow = 7,
137
};
138
139
static int32_t
140
CalculateMaxBlocks(int64_t aContentLength)
141
0
{
142
0
  int64_t maxSize = int64_t(StaticPrefs::MediaMemoryCacheMaxSize()) * 1024;
143
0
  MOZ_ASSERT(aContentLength <= maxSize);
144
0
  MOZ_ASSERT(maxSize % MediaBlockCacheBase::BLOCK_SIZE == 0);
145
0
  // Note: It doesn't matter if calculations overflow, Init() would later fail.
146
0
  // We want at least enough blocks to contain the original content length.
147
0
  const int32_t requiredBlocks = maxSize / MediaBlockCacheBase::BLOCK_SIZE;
148
0
  // Allow at least 1s of ultra HD (25Mbps).
149
0
  const int32_t workableBlocks =
150
0
    25 * 1024 * 1024 / 8 / MediaBlockCacheBase::BLOCK_SIZE;
151
0
  return std::max(requiredBlocks, workableBlocks);
152
0
}
153
154
MemoryBlockCache::MemoryBlockCache(int64_t aContentLength)
155
  // Buffer whole blocks.
156
  : mInitialContentLength((aContentLength >= 0) ? size_t(aContentLength) : 0)
157
  , mMaxBlocks(CalculateMaxBlocks(aContentLength))
158
  , mMutex("MemoryBlockCache")
159
  , mHasGrown(false)
160
0
{
161
0
  if (aContentLength <= 0) {
162
0
    LOG("MemoryBlockCache() MEMORYBLOCKCACHE_ERRORS='InitUnderuse'");
163
0
    Telemetry::Accumulate(Telemetry::HistogramID::MEMORYBLOCKCACHE_ERRORS,
164
0
                          InitUnderuse);
165
0
  }
166
0
}
167
168
MemoryBlockCache::~MemoryBlockCache()
169
0
{
170
0
  size_t sizes = static_cast<size_t>(gCombinedSizes -= mBuffer.Length());
171
0
  LOG("~MemoryBlockCache() - destroying buffer of size %zu; combined sizes now "
172
0
      "%zu",
173
0
      mBuffer.Length(),
174
0
      sizes);
175
0
}
176
177
bool
178
MemoryBlockCache::EnsureBufferCanContain(size_t aContentLength)
179
0
{
180
0
  mMutex.AssertCurrentThreadOwns();
181
0
  if (aContentLength == 0) {
182
0
    return true;
183
0
  }
184
0
  const size_t initialLength = mBuffer.Length();
185
0
  const size_t desiredLength =
186
0
    ((aContentLength - 1) / BLOCK_SIZE + 1) * BLOCK_SIZE;
187
0
  if (initialLength >= desiredLength) {
188
0
    // Already large enough.
189
0
    return true;
190
0
  }
191
0
  // Need larger buffer. If we are allowed more memory, attempt to re-allocate.
192
0
  const size_t extra = desiredLength - initialLength;
193
0
  // Only check the very first allocation against the combined MemoryBlockCache
194
0
  // limit. Further growths will always be allowed, assuming MediaCache won't
195
0
  // go over GetMaxBlocks() by too much.
196
0
  if (initialLength == 0) {
197
0
    // Note: There is a small race between testing `atomic + extra > limit` and
198
0
    // committing to it with `atomic += extra` below; but this is acceptable, as
199
0
    // in the worst case it may allow a small number of buffers to go past the
200
0
    // limit.
201
0
    // The alternative would have been to reserve the space first with
202
0
    // `atomic += extra` and then undo it with `atomic -= extra` in case of
203
0
    // failure; but this would have meant potentially preventing other (small
204
0
    // but successful) allocations.
205
0
    static const size_t sysmem =
206
0
      std::max<size_t>(PR_GetPhysicalMemorySize(), 32 * 1024 * 1024);
207
0
    const size_t limit = std::min(
208
0
      size_t(StaticPrefs::MediaMemoryCachesCombinedLimitKb()) * 1024,
209
0
      sysmem * StaticPrefs::MediaMemoryCachesCombinedLimitPcSysmem() / 100);
210
0
    const size_t currentSizes = static_cast<size_t>(gCombinedSizes);
211
0
    if (currentSizes + extra > limit) {
212
0
      LOG("EnsureBufferCanContain(%zu) - buffer size %zu, wanted + %zu = %zu;"
213
0
          " combined sizes %zu + %zu > limit %zu",
214
0
          aContentLength,
215
0
          initialLength,
216
0
          extra,
217
0
          desiredLength,
218
0
          currentSizes,
219
0
          extra,
220
0
          limit);
221
0
      return false;
222
0
    }
223
0
  }
224
0
  if (!mBuffer.SetLength(desiredLength, mozilla::fallible)) {
225
0
    LOG("EnsureBufferCanContain(%zu) - buffer size %zu, wanted + %zu = %zu, "
226
0
        "allocation failed",
227
0
        aContentLength,
228
0
        initialLength,
229
0
        extra,
230
0
        desiredLength);
231
0
    return false;
232
0
  }
233
0
  MOZ_ASSERT(mBuffer.Length() == desiredLength);
234
0
  const size_t capacity = mBuffer.Capacity();
235
0
  const size_t extraCapacity = capacity - desiredLength;
236
0
  if (extraCapacity != 0) {
237
0
    // Our buffer was given a larger capacity than the requested length, we may
238
0
    // as well claim that extra capacity, both for our accounting, and to
239
0
    // possibly bypass some future growths that would fit in this new capacity.
240
0
    mBuffer.SetLength(capacity);
241
0
  }
242
0
  size_t newSizes =
243
0
    static_cast<size_t>(gCombinedSizes += (extra + extraCapacity));
244
0
  size_t watermark =
245
0
    MemoryBlockCacheTelemetry::NotifyCombinedSizeGrown(newSizes);
246
0
  LOG("EnsureBufferCanContain(%zu) - buffer size %zu + requested %zu + bonus "
247
0
      "%zu = %zu; combined "
248
0
      "sizes %zu, watermark %zu",
249
0
      aContentLength,
250
0
      initialLength,
251
0
      extra,
252
0
      extraCapacity,
253
0
      capacity,
254
0
      newSizes,
255
0
      watermark);
256
0
  mHasGrown = true;
257
0
  return true;
258
0
}
259
260
nsresult
261
MemoryBlockCache::Init()
262
0
{
263
0
  LOG("Init()");
264
0
  MutexAutoLock lock(mMutex);
265
0
  MOZ_ASSERT(mBuffer.IsEmpty());
266
0
  // Attempt to pre-allocate buffer for expected content length.
267
0
  if (!EnsureBufferCanContain(mInitialContentLength)) {
268
0
    LOG("Init() MEMORYBLOCKCACHE_ERRORS='InitAllocation'");
269
0
    Telemetry::Accumulate(Telemetry::HistogramID::MEMORYBLOCKCACHE_ERRORS,
270
0
                          InitAllocation);
271
0
    return NS_ERROR_FAILURE;
272
0
  }
273
0
  return NS_OK;
274
0
}
275
276
void
277
MemoryBlockCache::Flush()
278
0
{
279
0
  LOG("Flush()");
280
0
  MutexAutoLock lock(mMutex);
281
0
  MOZ_ASSERT(mBuffer.Length() >= mInitialContentLength);
282
0
  memset(mBuffer.Elements(), 0, mBuffer.Length());
283
0
  mHasGrown = false;
284
0
}
285
286
nsresult
287
MemoryBlockCache::WriteBlock(uint32_t aBlockIndex,
288
                             Span<const uint8_t> aData1,
289
                             Span<const uint8_t> aData2)
290
0
{
291
0
  MutexAutoLock lock(mMutex);
292
0
293
0
  size_t offset = BlockIndexToOffset(aBlockIndex);
294
0
  if (offset + aData1.Length() + aData2.Length() > mBuffer.Length() &&
295
0
      !mHasGrown) {
296
0
    LOG("WriteBlock() MEMORYBLOCKCACHE_ERRORS='WriteBlockOverflow'");
297
0
    Telemetry::Accumulate(Telemetry::HistogramID::MEMORYBLOCKCACHE_ERRORS,
298
0
                          WriteBlockOverflow);
299
0
  }
300
0
  if (!EnsureBufferCanContain(offset + aData1.Length() + aData2.Length())) {
301
0
    LOG("WriteBlock() MEMORYBLOCKCACHE_ERRORS='WriteBlockCannotGrow'");
302
0
    Telemetry::Accumulate(Telemetry::HistogramID::MEMORYBLOCKCACHE_ERRORS,
303
0
                          WriteBlockCannotGrow);
304
0
    return NS_ERROR_FAILURE;
305
0
  }
306
0
307
0
  memcpy(mBuffer.Elements() + offset, aData1.Elements(), aData1.Length());
308
0
  if (aData2.Length() > 0) {
309
0
    memcpy(mBuffer.Elements() + offset + aData1.Length(),
310
0
           aData2.Elements(),
311
0
           aData2.Length());
312
0
  }
313
0
314
0
  return NS_OK;
315
0
}
316
317
nsresult
318
MemoryBlockCache::Read(int64_t aOffset,
319
                       uint8_t* aData,
320
                       int32_t aLength,
321
                       int32_t* aBytes)
322
0
{
323
0
  MutexAutoLock lock(mMutex);
324
0
325
0
  MOZ_ASSERT(aOffset >= 0);
326
0
  if (aOffset + aLength > int64_t(mBuffer.Length())) {
327
0
    LOG("Read() MEMORYBLOCKCACHE_ERRORS='ReadOverrun'");
328
0
    Telemetry::Accumulate(Telemetry::HistogramID::MEMORYBLOCKCACHE_ERRORS,
329
0
                          ReadOverrun);
330
0
    return NS_ERROR_FAILURE;
331
0
  }
332
0
333
0
  memcpy(aData, mBuffer.Elements() + aOffset, aLength);
334
0
  *aBytes = aLength;
335
0
336
0
  return NS_OK;
337
0
}
338
339
nsresult
340
MemoryBlockCache::MoveBlock(int32_t aSourceBlockIndex, int32_t aDestBlockIndex)
341
0
{
342
0
  MutexAutoLock lock(mMutex);
343
0
344
0
  size_t sourceOffset = BlockIndexToOffset(aSourceBlockIndex);
345
0
  size_t destOffset = BlockIndexToOffset(aDestBlockIndex);
346
0
  if (sourceOffset + BLOCK_SIZE > mBuffer.Length()) {
347
0
    LOG("MoveBlock() MEMORYBLOCKCACHE_ERRORS='MoveBlockSourceOverrun'");
348
0
    Telemetry::Accumulate(Telemetry::HistogramID::MEMORYBLOCKCACHE_ERRORS,
349
0
                          MoveBlockSourceOverrun);
350
0
    return NS_ERROR_FAILURE;
351
0
  }
352
0
  if (destOffset + BLOCK_SIZE > mBuffer.Length() && !mHasGrown) {
353
0
    LOG("MoveBlock() MEMORYBLOCKCACHE_ERRORS='MoveBlockDestOverflow'");
354
0
    Telemetry::Accumulate(Telemetry::HistogramID::MEMORYBLOCKCACHE_ERRORS,
355
0
                          MoveBlockDestOverflow);
356
0
  }
357
0
  if (!EnsureBufferCanContain(destOffset + BLOCK_SIZE)) {
358
0
    LOG("MoveBlock() MEMORYBLOCKCACHE_ERRORS='MoveBlockCannotGrow'");
359
0
    Telemetry::Accumulate(Telemetry::HistogramID::MEMORYBLOCKCACHE_ERRORS,
360
0
                          MoveBlockCannotGrow);
361
0
    return NS_ERROR_FAILURE;
362
0
  }
363
0
364
0
  memcpy(mBuffer.Elements() + destOffset,
365
0
         mBuffer.Elements() + sourceOffset,
366
0
         BLOCK_SIZE);
367
0
368
0
  return NS_OK;
369
0
}
370
371
} // End namespace mozilla.
372
373
// avoid redefined macro in unified build
374
#undef LOG