Coverage Report

Created: 2018-09-25 14:53

/src/mozilla-central/xpcom/base/nsMemoryReporterManager.cpp
Line
Count
Source (jump to first uncovered line)
1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3
/* This Source Code Form is subject to the terms of the Mozilla Public
4
 * License, v. 2.0. If a copy of the MPL was not distributed with this
5
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "nsAtomTable.h"
8
#include "nsAutoPtr.h"
9
#include "nsCOMPtr.h"
10
#include "nsCOMArray.h"
11
#include "nsPrintfCString.h"
12
#include "nsProxyRelease.h"
13
#include "nsServiceManagerUtils.h"
14
#include "nsMemoryReporterManager.h"
15
#include "nsITimer.h"
16
#include "nsThreadUtils.h"
17
#include "nsPIDOMWindow.h"
18
#include "nsIObserverService.h"
19
#include "nsIGlobalObject.h"
20
#include "nsIXPConnect.h"
21
#ifdef MOZ_GECKO_PROFILER
22
#include "GeckoProfilerReporter.h"
23
#endif
24
#if defined(XP_UNIX) || defined(MOZ_DMD)
25
#include "nsMemoryInfoDumper.h"
26
#endif
27
#include "nsNetCID.h"
28
#include "nsThread.h"
29
#include "mozilla/Attributes.h"
30
#include "mozilla/MemoryReportingProcess.h"
31
#include "mozilla/PodOperations.h"
32
#include "mozilla/Preferences.h"
33
#include "mozilla/ResultExtensions.h"
34
#include "mozilla/Services.h"
35
#include "mozilla/Telemetry.h"
36
#include "mozilla/UniquePtrExtensions.h"
37
#include "mozilla/dom/MemoryReportTypes.h"
38
#include "mozilla/dom/ContentParent.h"
39
#include "mozilla/gfx/GPUProcessManager.h"
40
#include "mozilla/ipc/FileDescriptorUtils.h"
41
42
#ifdef XP_WIN
43
#include "mozilla/MemoryInfo.h"
44
45
#include <process.h>
46
#ifndef getpid
47
#define getpid _getpid
48
#endif
49
#else
50
#include <unistd.h>
51
#endif
52
53
using namespace mozilla;
54
using namespace dom;
55
56
#if defined(MOZ_MEMORY)
57
#  define HAVE_JEMALLOC_STATS 1
58
#  include "mozmemory.h"
59
#endif  // MOZ_MEMORY
60
61
#if defined(XP_LINUX)
62
63
#include "mozilla/MemoryMapping.h"
64
65
#include <malloc.h>
66
#include <string.h>
67
#include <stdlib.h>
68
69
static MOZ_MUST_USE nsresult
70
GetProcSelfStatmField(int aField, int64_t* aN)
71
0
{
72
0
  // There are more than two fields, but we're only interested in the first
73
0
  // two.
74
0
  static const int MAX_FIELD = 2;
75
0
  size_t fields[MAX_FIELD];
76
0
  MOZ_ASSERT(aField < MAX_FIELD, "bad field number");
77
0
  FILE* f = fopen("/proc/self/statm", "r");
78
0
  if (f) {
79
0
    int nread = fscanf(f, "%zu %zu", &fields[0], &fields[1]);
80
0
    fclose(f);
81
0
    if (nread == MAX_FIELD) {
82
0
      *aN = fields[aField] * getpagesize();
83
0
      return NS_OK;
84
0
    }
85
0
  }
86
0
  return NS_ERROR_FAILURE;
87
0
}
88
89
static MOZ_MUST_USE nsresult
90
GetProcSelfSmapsPrivate(int64_t* aN)
91
0
{
92
0
  // You might be tempted to calculate USS by subtracting the "shared" value
93
0
  // from the "resident" value in /proc/<pid>/statm. But at least on Linux,
94
0
  // statm's "shared" value actually counts pages backed by files, which has
95
0
  // little to do with whether the pages are actually shared. /proc/self/smaps
96
0
  // on the other hand appears to give us the correct information.
97
0
98
0
  nsTArray<MemoryMapping> mappings(1024);
99
0
  MOZ_TRY(GetMemoryMappings(mappings));
100
0
101
0
  int64_t amount = 0;
102
0
  for (auto& mapping : mappings) {
103
0
    amount += mapping.Private_Clean();
104
0
    amount += mapping.Private_Dirty();
105
0
  }
106
0
  *aN = amount;
107
0
  return NS_OK;
108
0
}
109
110
#define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
111
static MOZ_MUST_USE nsresult
112
VsizeDistinguishedAmount(int64_t* aN)
113
0
{
114
0
  return GetProcSelfStatmField(0, aN);
115
0
}
116
117
static MOZ_MUST_USE nsresult
118
ResidentDistinguishedAmount(int64_t* aN)
119
0
{
120
0
  return GetProcSelfStatmField(1, aN);
121
0
}
122
123
static MOZ_MUST_USE nsresult
124
ResidentFastDistinguishedAmount(int64_t* aN)
125
0
{
126
0
  return ResidentDistinguishedAmount(aN);
127
0
}
128
129
#define HAVE_RESIDENT_UNIQUE_REPORTER 1
130
static MOZ_MUST_USE nsresult
131
ResidentUniqueDistinguishedAmount(int64_t* aN)
132
0
{
133
0
  return GetProcSelfSmapsPrivate(aN);
134
0
}
135
136
#ifdef HAVE_MALLINFO
137
#define HAVE_SYSTEM_HEAP_REPORTER 1
138
static MOZ_MUST_USE nsresult
139
SystemHeapSize(int64_t* aSizeOut)
140
0
{
141
0
    struct mallinfo info = mallinfo();
142
0
143
0
    // The documentation in the glibc man page makes it sound like |uordblks|
144
0
    // would suffice, but that only gets the small allocations that are put in
145
0
    // the brk heap. We need |hblkhd| as well to get the larger allocations
146
0
    // that are mmapped.
147
0
    //
148
0
    // The fields in |struct mallinfo| are all |int|, <sigh>, so it is
149
0
    // unreliable if memory usage gets high. However, the system heap size on
150
0
    // Linux should usually be zero (so long as jemalloc is enabled) so that
151
0
    // shouldn't be a problem. Nonetheless, cast the |int|s to |size_t| before
152
0
    // adding them to provide a small amount of extra overflow protection.
153
0
    *aSizeOut = size_t(info.hblkhd) + size_t(info.uordblks);
154
0
    return NS_OK;
155
0
}
156
#endif
157
158
#elif defined(__DragonFly__) || defined(__FreeBSD__) \
159
    || defined(__NetBSD__) || defined(__OpenBSD__) \
160
    || defined(__FreeBSD_kernel__)
161
162
#include <sys/param.h>
163
#include <sys/sysctl.h>
164
#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
165
#include <sys/user.h>
166
#endif
167
168
#include <unistd.h>
169
170
#if defined(__NetBSD__)
171
#undef KERN_PROC
172
#define KERN_PROC KERN_PROC2
173
#define KINFO_PROC struct kinfo_proc2
174
#else
175
#define KINFO_PROC struct kinfo_proc
176
#endif
177
178
#if defined(__DragonFly__)
179
#define KP_SIZE(kp) (kp.kp_vm_map_size)
180
#define KP_RSS(kp) (kp.kp_vm_rssize * getpagesize())
181
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
182
#define KP_SIZE(kp) (kp.ki_size)
183
#define KP_RSS(kp) (kp.ki_rssize * getpagesize())
184
#elif defined(__NetBSD__)
185
#define KP_SIZE(kp) (kp.p_vm_msize * getpagesize())
186
#define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
187
#elif defined(__OpenBSD__)
188
#define KP_SIZE(kp) ((kp.p_vm_dsize + kp.p_vm_ssize                     \
189
                      + kp.p_vm_tsize) * getpagesize())
190
#define KP_RSS(kp) (kp.p_vm_rssize * getpagesize())
191
#endif
192
193
static MOZ_MUST_USE nsresult
194
GetKinfoProcSelf(KINFO_PROC* aProc)
195
{
196
#if defined(__OpenBSD__) && defined(MOZ_SANDBOX)
197
  static LazyLogModule sPledgeLog("SandboxPledge");
198
  MOZ_LOG(sPledgeLog, LogLevel::Debug,
199
         ("%s called when pledged, returning NS_ERROR_FAILURE\n", __func__));
200
  return NS_ERROR_FAILURE;
201
#endif
202
  int mib[] = {
203
    CTL_KERN,
204
    KERN_PROC,
205
    KERN_PROC_PID,
206
    getpid(),
207
#if defined(__NetBSD__) || defined(__OpenBSD__)
208
    sizeof(KINFO_PROC),
209
    1,
210
#endif
211
  };
212
  u_int miblen = sizeof(mib) / sizeof(mib[0]);
213
  size_t size = sizeof(KINFO_PROC);
214
  if (sysctl(mib, miblen, aProc, &size, nullptr, 0)) {
215
    return NS_ERROR_FAILURE;
216
  }
217
  return NS_OK;
218
}
219
220
#define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
221
static MOZ_MUST_USE nsresult
222
VsizeDistinguishedAmount(int64_t* aN)
223
{
224
  KINFO_PROC proc;
225
  nsresult rv = GetKinfoProcSelf(&proc);
226
  if (NS_SUCCEEDED(rv)) {
227
    *aN = KP_SIZE(proc);
228
  }
229
  return rv;
230
}
231
232
static MOZ_MUST_USE nsresult
233
ResidentDistinguishedAmount(int64_t* aN)
234
{
235
  KINFO_PROC proc;
236
  nsresult rv = GetKinfoProcSelf(&proc);
237
  if (NS_SUCCEEDED(rv)) {
238
    *aN = KP_RSS(proc);
239
  }
240
  return rv;
241
}
242
243
static MOZ_MUST_USE nsresult
244
ResidentFastDistinguishedAmount(int64_t* aN)
245
{
246
  return ResidentDistinguishedAmount(aN);
247
}
248
249
#ifdef __FreeBSD__
250
#include <libutil.h>
251
#include <algorithm>
252
253
static MOZ_MUST_USE nsresult
254
GetKinfoVmentrySelf(int64_t* aPrss, uint64_t* aMaxreg)
255
{
256
  int cnt;
257
  struct kinfo_vmentry* vmmap;
258
  struct kinfo_vmentry* kve;
259
  if (!(vmmap = kinfo_getvmmap(getpid(), &cnt))) {
260
    return NS_ERROR_FAILURE;
261
  }
262
  if (aPrss) {
263
    *aPrss = 0;
264
  }
265
  if (aMaxreg) {
266
    *aMaxreg = 0;
267
  }
268
269
  for (int i = 0; i < cnt; i++) {
270
    kve = &vmmap[i];
271
    if (aPrss) {
272
      *aPrss += kve->kve_private_resident;
273
    }
274
    if (aMaxreg) {
275
      *aMaxreg = std::max(*aMaxreg, kve->kve_end - kve->kve_start);
276
    }
277
  }
278
279
  free(vmmap);
280
  return NS_OK;
281
}
282
283
#define HAVE_PRIVATE_REPORTER 1
284
static MOZ_MUST_USE nsresult
285
PrivateDistinguishedAmount(int64_t* aN)
286
{
287
  int64_t priv;
288
  nsresult rv = GetKinfoVmentrySelf(&priv, nullptr);
289
  NS_ENSURE_SUCCESS(rv, rv);
290
  *aN = priv * getpagesize();
291
  return NS_OK;
292
}
293
294
#define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
295
static MOZ_MUST_USE nsresult
296
VsizeMaxContiguousDistinguishedAmount(int64_t* aN)
297
{
298
  uint64_t biggestRegion;
299
  nsresult rv = GetKinfoVmentrySelf(nullptr, &biggestRegion);
300
  if (NS_SUCCEEDED(rv)) {
301
    *aN = biggestRegion;
302
  }
303
  return NS_OK;
304
}
305
#endif // FreeBSD
306
307
#elif defined(SOLARIS)
308
309
#include <procfs.h>
310
#include <fcntl.h>
311
#include <unistd.h>
312
313
static void
314
XMappingIter(int64_t& aVsize, int64_t& aResident, int64_t& aShared)
315
{
316
  aVsize = -1;
317
  aResident = -1;
318
  aShared = -1;
319
  int mapfd = open("/proc/self/xmap", O_RDONLY);
320
  struct stat st;
321
  prxmap_t* prmapp = nullptr;
322
  if (mapfd >= 0) {
323
    if (!fstat(mapfd, &st)) {
324
      int nmap = st.st_size / sizeof(prxmap_t);
325
      while (1) {
326
        // stat(2) on /proc/<pid>/xmap returns an incorrect value,
327
        // prior to the release of Solaris 11.
328
        // Here is a workaround for it.
329
        nmap *= 2;
330
        prmapp = (prxmap_t*)malloc((nmap + 1) * sizeof(prxmap_t));
331
        if (!prmapp) {
332
          // out of memory
333
          break;
334
        }
335
        int n = pread(mapfd, prmapp, (nmap + 1) * sizeof(prxmap_t), 0);
336
        if (n < 0) {
337
          break;
338
        }
339
        if (nmap >= n / sizeof(prxmap_t)) {
340
          aVsize = 0;
341
          aResident = 0;
342
          aShared = 0;
343
          for (int i = 0; i < n / sizeof(prxmap_t); i++) {
344
            aVsize += prmapp[i].pr_size;
345
            aResident += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
346
            if (prmapp[i].pr_mflags & MA_SHARED) {
347
              aShared += prmapp[i].pr_rss * prmapp[i].pr_pagesize;
348
            }
349
          }
350
          break;
351
        }
352
        free(prmapp);
353
      }
354
      free(prmapp);
355
    }
356
    close(mapfd);
357
  }
358
}
359
360
#define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
361
static MOZ_MUST_USE nsresult
362
VsizeDistinguishedAmount(int64_t* aN)
363
{
364
  int64_t vsize, resident, shared;
365
  XMappingIter(vsize, resident, shared);
366
  if (vsize == -1) {
367
    return NS_ERROR_FAILURE;
368
  }
369
  *aN = vsize;
370
  return NS_OK;
371
}
372
373
static MOZ_MUST_USE nsresult
374
ResidentDistinguishedAmount(int64_t* aN)
375
{
376
  int64_t vsize, resident, shared;
377
  XMappingIter(vsize, resident, shared);
378
  if (resident == -1) {
379
    return NS_ERROR_FAILURE;
380
  }
381
  *aN = resident;
382
  return NS_OK;
383
}
384
385
static MOZ_MUST_USE nsresult
386
ResidentFastDistinguishedAmount(int64_t* aN)
387
{
388
  return ResidentDistinguishedAmount(aN);
389
}
390
391
#define HAVE_RESIDENT_UNIQUE_REPORTER 1
392
static MOZ_MUST_USE nsresult
393
ResidentUniqueDistinguishedAmount(int64_t* aN)
394
{
395
  int64_t vsize, resident, shared;
396
  XMappingIter(vsize, resident, shared);
397
  if (resident == -1) {
398
    return NS_ERROR_FAILURE;
399
  }
400
  *aN = resident - shared;
401
  return NS_OK;
402
}
403
404
#elif defined(XP_MACOSX)
405
406
#include <mach/mach_init.h>
407
#include <mach/mach_vm.h>
408
#include <mach/shared_region.h>
409
#include <mach/task.h>
410
#include <sys/sysctl.h>
411
412
static MOZ_MUST_USE bool
413
GetTaskBasicInfo(struct task_basic_info* aTi)
414
{
415
  mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
416
  kern_return_t kr = task_info(mach_task_self(), TASK_BASIC_INFO,
417
                               (task_info_t)aTi, &count);
418
  return kr == KERN_SUCCESS;
419
}
420
421
// The VSIZE figure on Mac includes huge amounts of shared memory and is always
422
// absurdly high, eg. 2GB+ even at start-up.  But both 'top' and 'ps' report
423
// it, so we might as well too.
424
#define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
425
static MOZ_MUST_USE nsresult
426
VsizeDistinguishedAmount(int64_t* aN)
427
{
428
  task_basic_info ti;
429
  if (!GetTaskBasicInfo(&ti)) {
430
    return NS_ERROR_FAILURE;
431
  }
432
  *aN = ti.virtual_size;
433
  return NS_OK;
434
}
435
436
// If we're using jemalloc on Mac, we need to instruct jemalloc to purge the
437
// pages it has madvise(MADV_FREE)'d before we read our RSS in order to get
438
// an accurate result.  The OS will take away MADV_FREE'd pages when there's
439
// memory pressure, so ideally, they shouldn't count against our RSS.
440
//
441
// Purging these pages can take a long time for some users (see bug 789975),
442
// so we provide the option to get the RSS without purging first.
443
static MOZ_MUST_USE nsresult
444
ResidentDistinguishedAmountHelper(int64_t* aN, bool aDoPurge)
445
{
446
#ifdef HAVE_JEMALLOC_STATS
447
  if (aDoPurge) {
448
    Telemetry::AutoTimer<Telemetry::MEMORY_FREE_PURGED_PAGES_MS> timer;
449
    jemalloc_purge_freed_pages();
450
  }
451
#endif
452
453
  task_basic_info ti;
454
  if (!GetTaskBasicInfo(&ti)) {
455
    return NS_ERROR_FAILURE;
456
  }
457
  *aN = ti.resident_size;
458
  return NS_OK;
459
}
460
461
static MOZ_MUST_USE nsresult
462
ResidentFastDistinguishedAmount(int64_t* aN)
463
{
464
  return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ false);
465
}
466
467
static MOZ_MUST_USE nsresult
468
ResidentDistinguishedAmount(int64_t* aN)
469
{
470
  return ResidentDistinguishedAmountHelper(aN, /* doPurge = */ true);
471
}
472
473
#define HAVE_RESIDENT_UNIQUE_REPORTER 1
474
475
static bool
476
InSharedRegion(mach_vm_address_t aAddr, cpu_type_t aType)
477
{
478
  mach_vm_address_t base;
479
  mach_vm_address_t size;
480
481
  switch (aType) {
482
    case CPU_TYPE_ARM:
483
      base = SHARED_REGION_BASE_ARM;
484
      size = SHARED_REGION_SIZE_ARM;
485
      break;
486
    case CPU_TYPE_I386:
487
      base = SHARED_REGION_BASE_I386;
488
      size = SHARED_REGION_SIZE_I386;
489
      break;
490
    case CPU_TYPE_X86_64:
491
      base = SHARED_REGION_BASE_X86_64;
492
      size = SHARED_REGION_SIZE_X86_64;
493
      break;
494
    default:
495
      return false;
496
  }
497
498
  return base <= aAddr && aAddr < (base + size);
499
}
500
501
static MOZ_MUST_USE nsresult
502
ResidentUniqueDistinguishedAmount(int64_t* aN)
503
{
504
  if (!aN) {
505
    return NS_ERROR_FAILURE;
506
  }
507
508
  cpu_type_t cpu_type;
509
  size_t len = sizeof(cpu_type);
510
  if (sysctlbyname("sysctl.proc_cputype", &cpu_type, &len, NULL, 0) != 0) {
511
    return NS_ERROR_FAILURE;
512
  }
513
514
  // Roughly based on libtop_update_vm_regions in
515
  // http://www.opensource.apple.com/source/top/top-100.1.2/libtop.c
516
  size_t privatePages = 0;
517
  mach_vm_size_t size = 0;
518
  for (mach_vm_address_t addr = MACH_VM_MIN_ADDRESS; ; addr += size) {
519
    vm_region_top_info_data_t info;
520
    mach_msg_type_number_t infoCount = VM_REGION_TOP_INFO_COUNT;
521
    mach_port_t objectName;
522
523
    kern_return_t kr =
524
        mach_vm_region(mach_task_self(), &addr, &size, VM_REGION_TOP_INFO,
525
                       reinterpret_cast<vm_region_info_t>(&info),
526
                       &infoCount, &objectName);
527
    if (kr == KERN_INVALID_ADDRESS) {
528
      // Done iterating VM regions.
529
      break;
530
    } else if (kr != KERN_SUCCESS) {
531
      return NS_ERROR_FAILURE;
532
    }
533
534
    if (InSharedRegion(addr, cpu_type) && info.share_mode != SM_PRIVATE) {
535
        continue;
536
    }
537
538
    switch (info.share_mode) {
539
      case SM_LARGE_PAGE:
540
        // NB: Large pages are not shareable and always resident.
541
      case SM_PRIVATE:
542
        privatePages += info.private_pages_resident;
543
        privatePages += info.shared_pages_resident;
544
        break;
545
      case SM_COW:
546
        privatePages += info.private_pages_resident;
547
        if (info.ref_count == 1) {
548
          // Treat copy-on-write pages as private if they only have one reference.
549
          privatePages += info.shared_pages_resident;
550
        }
551
        break;
552
      case SM_SHARED:
553
      default:
554
        break;
555
    }
556
  }
557
558
  vm_size_t pageSize;
559
  if (host_page_size(mach_host_self(), &pageSize) != KERN_SUCCESS) {
560
    pageSize = PAGE_SIZE;
561
  }
562
563
  *aN = privatePages * pageSize;
564
  return NS_OK;
565
}
566
567
#elif defined(XP_WIN)
568
569
#include <windows.h>
570
#include <psapi.h>
571
#include <algorithm>
572
573
#define HAVE_VSIZE_AND_RESIDENT_REPORTERS 1
574
static MOZ_MUST_USE nsresult
575
VsizeDistinguishedAmount(int64_t* aN)
576
{
577
  MEMORYSTATUSEX s;
578
  s.dwLength = sizeof(s);
579
580
  if (!GlobalMemoryStatusEx(&s)) {
581
    return NS_ERROR_FAILURE;
582
  }
583
584
  *aN = s.ullTotalVirtual - s.ullAvailVirtual;
585
  return NS_OK;
586
}
587
588
static MOZ_MUST_USE nsresult
589
ResidentDistinguishedAmount(int64_t* aN)
590
{
591
  PROCESS_MEMORY_COUNTERS pmc;
592
  pmc.cb = sizeof(PROCESS_MEMORY_COUNTERS);
593
594
  if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) {
595
    return NS_ERROR_FAILURE;
596
  }
597
598
  *aN = pmc.WorkingSetSize;
599
  return NS_OK;
600
}
601
602
static MOZ_MUST_USE nsresult
603
ResidentFastDistinguishedAmount(int64_t* aN)
604
{
605
  return ResidentDistinguishedAmount(aN);
606
}
607
608
#define HAVE_RESIDENT_UNIQUE_REPORTER 1
609
610
static MOZ_MUST_USE nsresult
611
ResidentUniqueDistinguishedAmount(int64_t* aN)
612
{
613
  // Determine how many entries we need.
614
  PSAPI_WORKING_SET_INFORMATION tmp;
615
  DWORD tmpSize = sizeof(tmp);
616
  memset(&tmp, 0, tmpSize);
617
618
  HANDLE proc = GetCurrentProcess();
619
  QueryWorkingSet(proc, &tmp, tmpSize);
620
621
  // Fudge the size in case new entries are added between calls.
622
  size_t entries = tmp.NumberOfEntries * 2;
623
624
  if (!entries) {
625
    return NS_ERROR_FAILURE;
626
  }
627
628
  DWORD infoArraySize = tmpSize + (entries * sizeof(PSAPI_WORKING_SET_BLOCK));
629
  UniqueFreePtr<PSAPI_WORKING_SET_INFORMATION> infoArray(
630
      static_cast<PSAPI_WORKING_SET_INFORMATION*>(malloc(infoArraySize)));
631
632
  if (!infoArray) {
633
    return NS_ERROR_FAILURE;
634
  }
635
636
  if (!QueryWorkingSet(proc, infoArray.get(), infoArraySize)) {
637
    return NS_ERROR_FAILURE;
638
  }
639
640
  entries = static_cast<size_t>(infoArray->NumberOfEntries);
641
  size_t privatePages = 0;
642
  for (size_t i = 0; i < entries; i++) {
643
    // Count shared pages that only one process is using as private.
644
    if (!infoArray->WorkingSetInfo[i].Shared ||
645
        infoArray->WorkingSetInfo[i].ShareCount <= 1) {
646
      privatePages++;
647
    }
648
  }
649
650
  SYSTEM_INFO si;
651
  GetSystemInfo(&si);
652
653
  *aN = privatePages * si.dwPageSize;
654
  return NS_OK;
655
}
656
657
#define HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER 1
658
static MOZ_MUST_USE nsresult
659
VsizeMaxContiguousDistinguishedAmount(int64_t* aN)
660
{
661
  SIZE_T biggestRegion = 0;
662
  MEMORY_BASIC_INFORMATION vmemInfo = { 0 };
663
  for (size_t currentAddress = 0; ; ) {
664
    if (!VirtualQuery((LPCVOID)currentAddress, &vmemInfo, sizeof(vmemInfo))) {
665
      // Something went wrong, just return whatever we've got already.
666
      break;
667
    }
668
669
    if (vmemInfo.State == MEM_FREE) {
670
      biggestRegion = std::max(biggestRegion, vmemInfo.RegionSize);
671
    }
672
673
    SIZE_T lastAddress = currentAddress;
674
    currentAddress += vmemInfo.RegionSize;
675
676
    // If we overflow, we've examined all of the address space.
677
    if (currentAddress < lastAddress) {
678
      break;
679
    }
680
  }
681
682
  *aN = biggestRegion;
683
  return NS_OK;
684
}
685
686
#define HAVE_PRIVATE_REPORTER 1
687
static MOZ_MUST_USE nsresult
688
PrivateDistinguishedAmount(int64_t* aN)
689
{
690
  PROCESS_MEMORY_COUNTERS_EX pmcex;
691
  pmcex.cb = sizeof(PROCESS_MEMORY_COUNTERS_EX);
692
693
  if (!GetProcessMemoryInfo(GetCurrentProcess(),
694
                            (PPROCESS_MEMORY_COUNTERS) &pmcex, sizeof(pmcex))) {
695
    return NS_ERROR_FAILURE;
696
  }
697
698
  *aN = pmcex.PrivateUsage;
699
  return NS_OK;
700
}
701
702
#define HAVE_SYSTEM_HEAP_REPORTER 1
703
// Windows can have multiple separate heaps. During testing there were multiple
704
// heaps present but the non-default ones had sizes no more than a few 10s of
705
// KiBs. So we combine their sizes into a single measurement.
706
static MOZ_MUST_USE nsresult
707
SystemHeapSize(int64_t* aSizeOut)
708
{
709
  // Get the number of heaps.
710
  DWORD nHeaps = GetProcessHeaps(0, nullptr);
711
  NS_ENSURE_TRUE(nHeaps != 0, NS_ERROR_FAILURE);
712
713
  // Get handles to all heaps, checking that the number of heaps hasn't
714
  // changed in the meantime.
715
  UniquePtr<HANDLE[]> heaps(new HANDLE[nHeaps]);
716
  DWORD nHeaps2 = GetProcessHeaps(nHeaps, heaps.get());
717
  NS_ENSURE_TRUE(nHeaps2 != 0 && nHeaps2 == nHeaps, NS_ERROR_FAILURE);
718
719
  // Lock and iterate over each heap to get its size.
720
  int64_t heapsSize = 0;
721
  for (DWORD i = 0; i < nHeaps; i++) {
722
    HANDLE heap = heaps[i];
723
724
    // Bug 1235982: When Control Flow Guard is enabled for the process,
725
    // GetProcessHeap may return some protected heaps that are in read-only
726
    // memory and thus crash in HeapLock. Ignore such heaps.
727
    MEMORY_BASIC_INFORMATION mbi = {0};
728
    if (VirtualQuery(heap, &mbi, sizeof(mbi)) && mbi.Protect == PAGE_READONLY) {
729
      continue;
730
    }
731
732
    NS_ENSURE_TRUE(HeapLock(heap), NS_ERROR_FAILURE);
733
734
    int64_t heapSize = 0;
735
    PROCESS_HEAP_ENTRY entry;
736
    entry.lpData = nullptr;
737
    while (HeapWalk(heap, &entry)) {
738
      // We don't count entry.cbOverhead, because we just want to measure the
739
      // space available to the program.
740
      if (entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) {
741
        heapSize += entry.cbData;
742
      }
743
    }
744
745
    // Check this result only after unlocking the heap, so that we don't leave
746
    // the heap locked if there was an error.
747
    DWORD lastError = GetLastError();
748
749
    // I have no idea how things would proceed if unlocking this heap failed...
750
    NS_ENSURE_TRUE(HeapUnlock(heap), NS_ERROR_FAILURE);
751
752
    NS_ENSURE_TRUE(lastError == ERROR_NO_MORE_ITEMS, NS_ERROR_FAILURE);
753
754
    heapsSize += heapSize;
755
  }
756
757
  *aSizeOut = heapsSize;
758
  return NS_OK;
759
}
760
761
struct SegmentKind
762
{
763
  DWORD mState;
764
  DWORD mType;
765
  DWORD mProtect;
766
  int mIsStack;
767
};
768
769
struct SegmentEntry : public PLDHashEntryHdr
770
{
771
  static PLDHashNumber HashKey(const void* aKey)
772
  {
773
    auto kind = static_cast<const SegmentKind*>(aKey);
774
    return mozilla::HashGeneric(kind->mState, kind->mType, kind->mProtect,
775
                                kind->mIsStack);
776
  }
777
778
  static bool MatchEntry(const PLDHashEntryHdr* aEntry, const void* aKey)
779
  {
780
    auto kind = static_cast<const SegmentKind*>(aKey);
781
    auto entry = static_cast<const SegmentEntry*>(aEntry);
782
    return kind->mState == entry->mKind.mState &&
783
           kind->mType == entry->mKind.mType &&
784
           kind->mProtect == entry->mKind.mProtect &&
785
           kind->mIsStack == entry->mKind.mIsStack;
786
  }
787
788
  static void InitEntry(PLDHashEntryHdr* aEntry, const void* aKey)
789
  {
790
    auto kind = static_cast<const SegmentKind*>(aKey);
791
    auto entry = static_cast<SegmentEntry*>(aEntry);
792
    entry->mKind = *kind;
793
    entry->mCount = 0;
794
    entry->mSize = 0;
795
  }
796
797
  static const PLDHashTableOps Ops;
798
799
  SegmentKind mKind;  // The segment kind.
800
  uint32_t mCount;    // The number of segments of this kind.
801
  size_t mSize;       // The combined size of segments of this kind.
802
};
803
804
/* static */ const PLDHashTableOps SegmentEntry::Ops = {
805
  SegmentEntry::HashKey,
806
  SegmentEntry::MatchEntry,
807
  PLDHashTable::MoveEntryStub,
808
  PLDHashTable::ClearEntryStub,
809
  SegmentEntry::InitEntry
810
};
811
812
class WindowsAddressSpaceReporter final : public nsIMemoryReporter
813
{
814
  ~WindowsAddressSpaceReporter() {}
815
816
public:
817
  NS_DECL_ISUPPORTS
818
819
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
820
                            nsISupports* aData, bool aAnonymize) override
821
  {
822
    // First iterate over all the segments and record how many of each kind
823
    // there were and their aggregate sizes. We use a hash table for this
824
    // because there are a couple of dozen different kinds possible.
825
826
    PLDHashTable table(&SegmentEntry::Ops, sizeof(SegmentEntry));
827
    MEMORY_BASIC_INFORMATION info = { 0 };
828
    bool isPrevSegStackGuard = false;
829
    for (size_t currentAddress = 0; ; ) {
830
      if (!VirtualQuery((LPCVOID)currentAddress, &info, sizeof(info))) {
831
        // Something went wrong, just return whatever we've got already.
832
        break;
833
      }
834
835
      size_t size = info.RegionSize;
836
837
      // Note that |type| and |protect| are ignored in some cases.
838
      DWORD state = info.State;
839
      DWORD type =
840
        (state == MEM_RESERVE || state == MEM_COMMIT) ? info.Type : 0;
841
      DWORD protect = (state == MEM_COMMIT) ? info.Protect : 0;
842
      bool isStack = isPrevSegStackGuard &&
843
                     state == MEM_COMMIT &&
844
                     type == MEM_PRIVATE &&
845
                     protect == PAGE_READWRITE;
846
847
      SegmentKind kind = { state, type, protect, isStack ? 1 : 0 };
848
      auto entry =
849
        static_cast<SegmentEntry*>(table.Add(&kind, mozilla::fallible));
850
      if (entry) {
851
        entry->mCount += 1;
852
        entry->mSize += size;
853
      }
854
855
      isPrevSegStackGuard = info.State == MEM_COMMIT &&
856
                            info.Type == MEM_PRIVATE &&
857
                            info.Protect == (PAGE_READWRITE|PAGE_GUARD);
858
859
      size_t lastAddress = currentAddress;
860
      currentAddress += size;
861
862
      // If we overflow, we've examined all of the address space.
863
      if (currentAddress < lastAddress) {
864
        break;
865
      }
866
    }
867
868
    // Then iterate over the hash table and report the details for each segment
869
    // kind.
870
871
    for (auto iter = table.Iter(); !iter.Done(); iter.Next()) {
872
      // For each range of pages, we consider one or more of its State, Type
873
      // and Protect values. These are documented at
874
      // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx
875
      // (for State and Type) and
876
      // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx
877
      // (for Protect).
878
      //
879
      // Not all State values have accompanying Type and Protection values.
880
      bool doType = false;
881
      bool doProtect = false;
882
883
      auto entry = static_cast<const SegmentEntry*>(iter.Get());
884
885
      nsCString path("address-space");
886
887
      switch (entry->mKind.mState) {
888
        case MEM_FREE:
889
          path.AppendLiteral("/free");
890
          break;
891
892
        case MEM_RESERVE:
893
          path.AppendLiteral("/reserved");
894
          doType = true;
895
          break;
896
897
        case MEM_COMMIT:
898
          path.AppendLiteral("/commit");
899
          doType = true;
900
          doProtect = true;
901
          break;
902
903
        default:
904
          // Should be impossible, but handle it just in case.
905
          path.AppendLiteral("/???");
906
          break;
907
      }
908
909
      if (doType) {
910
        switch (entry->mKind.mType) {
911
          case MEM_IMAGE:
912
            path.AppendLiteral("/image");
913
            break;
914
915
          case MEM_MAPPED:
916
            path.AppendLiteral("/mapped");
917
            break;
918
919
          case MEM_PRIVATE:
920
            path.AppendLiteral("/private");
921
            break;
922
923
          default:
924
            // Should be impossible, but handle it just in case.
925
            path.AppendLiteral("/???");
926
            break;
927
        }
928
      }
929
930
      if (doProtect) {
931
        DWORD protect = entry->mKind.mProtect;
932
        // Basic attributes. Exactly one of these should be set.
933
        if (protect & PAGE_EXECUTE) {
934
          path.AppendLiteral("/execute");
935
        }
936
        if (protect & PAGE_EXECUTE_READ) {
937
          path.AppendLiteral("/execute-read");
938
        }
939
        if (protect & PAGE_EXECUTE_READWRITE) {
940
          path.AppendLiteral("/execute-readwrite");
941
        }
942
        if (protect & PAGE_EXECUTE_WRITECOPY) {
943
          path.AppendLiteral("/execute-writecopy");
944
        }
945
        if (protect & PAGE_NOACCESS) {
946
          path.AppendLiteral("/noaccess");
947
        }
948
        if (protect & PAGE_READONLY) {
949
          path.AppendLiteral("/readonly");
950
        }
951
        if (protect & PAGE_READWRITE) {
952
          path.AppendLiteral("/readwrite");
953
        }
954
        if (protect & PAGE_WRITECOPY) {
955
          path.AppendLiteral("/writecopy");
956
        }
957
958
        // Modifiers. At most one of these should be set.
959
        if (protect & PAGE_GUARD) {
960
          path.AppendLiteral("+guard");
961
        }
962
        if (protect & PAGE_NOCACHE) {
963
          path.AppendLiteral("+nocache");
964
        }
965
        if (protect & PAGE_WRITECOMBINE) {
966
          path.AppendLiteral("+writecombine");
967
        }
968
969
        // Annotate likely stack segments, too.
970
        if (entry->mKind.mIsStack) {
971
          path.AppendLiteral("+stack");
972
        }
973
      }
974
975
      // Append the segment count.
976
      path.AppendPrintf("(segments=%u)", entry->mCount);
977
978
      aHandleReport->Callback(
979
        EmptyCString(), path, KIND_OTHER, UNITS_BYTES, entry->mSize,
980
        NS_LITERAL_CSTRING("From MEMORY_BASIC_INFORMATION."), aData);
981
    }
982
983
    return NS_OK;
984
  }
985
};
986
NS_IMPL_ISUPPORTS(WindowsAddressSpaceReporter, nsIMemoryReporter)
987
988
#endif  // XP_<PLATFORM>
989
990
#ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
991
class VsizeMaxContiguousReporter final : public nsIMemoryReporter
992
{
993
  ~VsizeMaxContiguousReporter() {}
994
995
public:
996
  NS_DECL_ISUPPORTS
997
998
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
999
                            nsISupports* aData, bool aAnonymize) override
1000
  {
1001
    int64_t amount;
1002
    if (NS_SUCCEEDED(VsizeMaxContiguousDistinguishedAmount(&amount))) {
1003
      MOZ_COLLECT_REPORT(
1004
        "vsize-max-contiguous", KIND_OTHER, UNITS_BYTES, amount,
1005
        "Size of the maximum contiguous block of available virtual memory.");
1006
    }
1007
    return NS_OK;
1008
  }
1009
};
1010
NS_IMPL_ISUPPORTS(VsizeMaxContiguousReporter, nsIMemoryReporter)
1011
#endif
1012
1013
#ifdef HAVE_PRIVATE_REPORTER
1014
class PrivateReporter final : public nsIMemoryReporter
1015
{
1016
  ~PrivateReporter() {}
1017
1018
public:
1019
  NS_DECL_ISUPPORTS
1020
1021
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1022
                            nsISupports* aData, bool aAnonymize) override
1023
  {
1024
    int64_t amount;
1025
    if (NS_SUCCEEDED(PrivateDistinguishedAmount(&amount))) {
1026
      MOZ_COLLECT_REPORT(
1027
        "private", KIND_OTHER, UNITS_BYTES, amount,
1028
"Memory that cannot be shared with other processes, including memory that is "
1029
"committed and marked MEM_PRIVATE, data that is not mapped, and executable "
1030
"pages that have been written to.");
1031
    }
1032
    return NS_OK;
1033
  }
1034
};
1035
NS_IMPL_ISUPPORTS(PrivateReporter, nsIMemoryReporter)
1036
#endif
1037
1038
#ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1039
class VsizeReporter final : public nsIMemoryReporter
1040
{
1041
0
  ~VsizeReporter() {}
1042
1043
public:
1044
  NS_DECL_ISUPPORTS
1045
1046
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1047
                            nsISupports* aData, bool aAnonymize) override
1048
0
  {
1049
0
    int64_t amount;
1050
0
    if (NS_SUCCEEDED(VsizeDistinguishedAmount(&amount))) {
1051
0
      MOZ_COLLECT_REPORT(
1052
0
        "vsize", KIND_OTHER, UNITS_BYTES, amount,
1053
0
"Memory mapped by the process, including code and data segments, the heap, "
1054
0
"thread stacks, memory explicitly mapped by the process via mmap and similar "
1055
0
"operations, and memory shared with other processes. This is the vsize figure "
1056
0
"as reported by 'top' and 'ps'.  This figure is of limited use on Mac, where "
1057
0
"processes share huge amounts of memory with one another.  But even on other "
1058
0
"operating systems, 'resident' is a much better measure of the memory "
1059
0
"resources used by the process.");
1060
0
    }
1061
0
    return NS_OK;
1062
0
  }
1063
};
1064
NS_IMPL_ISUPPORTS(VsizeReporter, nsIMemoryReporter)
1065
1066
class ResidentReporter final : public nsIMemoryReporter
1067
{
1068
0
  ~ResidentReporter() {}
1069
1070
public:
1071
  NS_DECL_ISUPPORTS
1072
1073
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1074
                            nsISupports* aData, bool aAnonymize) override
1075
0
  {
1076
0
    int64_t amount;
1077
0
    if (NS_SUCCEEDED(ResidentDistinguishedAmount(&amount))) {
1078
0
      MOZ_COLLECT_REPORT(
1079
0
        "resident", KIND_OTHER, UNITS_BYTES, amount,
1080
0
"Memory mapped by the process that is present in physical memory, also known "
1081
0
"as the resident set size (RSS).  This is the best single figure to use when "
1082
0
"considering the memory resources used by the process, but it depends both on "
1083
0
"other processes being run and details of the OS kernel and so is best used "
1084
0
"for comparing the memory usage of a single process at different points in "
1085
0
"time.");
1086
0
    }
1087
0
    return NS_OK;
1088
0
  }
1089
};
1090
NS_IMPL_ISUPPORTS(ResidentReporter, nsIMemoryReporter)
1091
1092
#endif  // HAVE_VSIZE_AND_RESIDENT_REPORTERS
1093
1094
#ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1095
class ResidentUniqueReporter final : public nsIMemoryReporter
1096
{
1097
0
  ~ResidentUniqueReporter() {}
1098
1099
public:
1100
  NS_DECL_ISUPPORTS
1101
1102
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1103
                            nsISupports* aData, bool aAnonymize) override
1104
0
  {
1105
0
    int64_t amount = 0;
1106
0
    if (NS_SUCCEEDED(ResidentUniqueDistinguishedAmount(&amount))) {
1107
0
      MOZ_COLLECT_REPORT(
1108
0
        "resident-unique", KIND_OTHER, UNITS_BYTES, amount,
1109
0
"Memory mapped by the process that is present in physical memory and not "
1110
0
"shared with any other processes.  This is also known as the process's unique "
1111
0
"set size (USS).  This is the amount of RAM we'd expect to be freed if we "
1112
0
"closed this process.");
1113
0
    }
1114
0
    return NS_OK;
1115
0
  }
1116
};
1117
NS_IMPL_ISUPPORTS(ResidentUniqueReporter, nsIMemoryReporter)
1118
1119
#endif // HAVE_RESIDENT_UNIQUE_REPORTER
1120
1121
#ifdef HAVE_SYSTEM_HEAP_REPORTER
1122
1123
class SystemHeapReporter final : public nsIMemoryReporter
1124
{
1125
0
  ~SystemHeapReporter() {}
1126
1127
public:
1128
  NS_DECL_ISUPPORTS
1129
1130
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1131
                            nsISupports* aData, bool aAnonymize) override
1132
0
  {
1133
0
    int64_t amount;
1134
0
    if (NS_SUCCEEDED(SystemHeapSize(&amount))) {
1135
0
      MOZ_COLLECT_REPORT(
1136
0
        "system-heap-allocated", KIND_OTHER, UNITS_BYTES, amount,
1137
0
"Memory used by the system allocator that is currently allocated to the "
1138
0
"application. This is distinct from the jemalloc heap that Firefox uses for "
1139
0
"most or all of its heap allocations. Ideally this number is zero, but "
1140
0
"on some platforms we cannot force every heap allocation through jemalloc.");
1141
0
    }
1142
0
    return NS_OK;
1143
0
  }
1144
};
1145
NS_IMPL_ISUPPORTS(SystemHeapReporter, nsIMemoryReporter)
1146
1147
#endif // HAVE_SYSTEM_HEAP_REPORTER
1148
1149
#ifdef XP_UNIX
1150
1151
#include <sys/resource.h>
1152
1153
#define HAVE_RESIDENT_PEAK_REPORTER 1
1154
1155
static MOZ_MUST_USE nsresult
1156
ResidentPeakDistinguishedAmount(int64_t* aN)
1157
0
{
1158
0
  struct rusage usage;
1159
0
  if (0 == getrusage(RUSAGE_SELF, &usage)) {
1160
0
    // The units for ru_maxrrs:
1161
0
    // - Mac: bytes
1162
0
    // - Solaris: pages? But some sources it actually always returns 0, so
1163
0
    //   check for that
1164
0
    // - Linux, {Net/Open/Free}BSD, DragonFly: KiB
1165
#ifdef XP_MACOSX
1166
    *aN = usage.ru_maxrss;
1167
#elif defined(SOLARIS)
1168
    *aN = usage.ru_maxrss * getpagesize();
1169
#else
1170
    *aN = usage.ru_maxrss * 1024;
1171
0
#endif
1172
0
    if (*aN > 0) {
1173
0
      return NS_OK;
1174
0
    }
1175
0
  }
1176
0
  return NS_ERROR_FAILURE;
1177
0
}
1178
1179
class ResidentPeakReporter final : public nsIMemoryReporter
1180
{
1181
0
  ~ResidentPeakReporter() {}
1182
1183
public:
1184
  NS_DECL_ISUPPORTS
1185
1186
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1187
                            nsISupports* aData, bool aAnonymize) override
1188
0
  {
1189
0
    int64_t amount = 0;
1190
0
    if (NS_SUCCEEDED(ResidentPeakDistinguishedAmount(&amount))) {
1191
0
      MOZ_COLLECT_REPORT(
1192
0
        "resident-peak", KIND_OTHER, UNITS_BYTES, amount,
1193
0
"The peak 'resident' value for the lifetime of the process.");
1194
0
    }
1195
0
    return NS_OK;
1196
0
  }
1197
};
1198
NS_IMPL_ISUPPORTS(ResidentPeakReporter, nsIMemoryReporter)
1199
1200
#define HAVE_PAGE_FAULT_REPORTERS 1
1201
1202
class PageFaultsSoftReporter final : public nsIMemoryReporter
1203
{
1204
0
  ~PageFaultsSoftReporter() {}
1205
1206
public:
1207
  NS_DECL_ISUPPORTS
1208
1209
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1210
                            nsISupports* aData, bool aAnonymize) override
1211
0
  {
1212
0
    struct rusage usage;
1213
0
    int err = getrusage(RUSAGE_SELF, &usage);
1214
0
    if (err == 0) {
1215
0
      int64_t amount = usage.ru_minflt;
1216
0
      MOZ_COLLECT_REPORT(
1217
0
        "page-faults-soft", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
1218
0
"The number of soft page faults (also known as 'minor page faults') that "
1219
0
"have occurred since the process started.  A soft page fault occurs when the "
1220
0
"process tries to access a page which is present in physical memory but is "
1221
0
"not mapped into the process's address space.  For instance, a process might "
1222
0
"observe soft page faults when it loads a shared library which is already "
1223
0
"present in physical memory. A process may experience many thousands of soft "
1224
0
"page faults even when the machine has plenty of available physical memory, "
1225
0
"and because the OS services a soft page fault without accessing the disk, "
1226
0
"they impact performance much less than hard page faults.");
1227
0
    }
1228
0
    return NS_OK;
1229
0
  }
1230
};
1231
NS_IMPL_ISUPPORTS(PageFaultsSoftReporter, nsIMemoryReporter)
1232
1233
static MOZ_MUST_USE nsresult
1234
PageFaultsHardDistinguishedAmount(int64_t* aAmount)
1235
0
{
1236
0
  struct rusage usage;
1237
0
  int err = getrusage(RUSAGE_SELF, &usage);
1238
0
  if (err != 0) {
1239
0
    return NS_ERROR_FAILURE;
1240
0
  }
1241
0
  *aAmount = usage.ru_majflt;
1242
0
  return NS_OK;
1243
0
}
1244
1245
class PageFaultsHardReporter final : public nsIMemoryReporter
1246
{
1247
0
  ~PageFaultsHardReporter() {}
1248
1249
public:
1250
  NS_DECL_ISUPPORTS
1251
1252
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1253
                            nsISupports* aData, bool aAnonymize) override
1254
0
  {
1255
0
    int64_t amount = 0;
1256
0
    if (NS_SUCCEEDED(PageFaultsHardDistinguishedAmount(&amount))) {
1257
0
      MOZ_COLLECT_REPORT(
1258
0
        "page-faults-hard", KIND_OTHER, UNITS_COUNT_CUMULATIVE, amount,
1259
0
"The number of hard page faults (also known as 'major page faults') that have "
1260
0
"occurred since the process started.  A hard page fault occurs when a process "
1261
0
"tries to access a page which is not present in physical memory. The "
1262
0
"operating system must access the disk in order to fulfill a hard page fault. "
1263
0
"When memory is plentiful, you should see very few hard page faults. But if "
1264
0
"the process tries to use more memory than your machine has available, you "
1265
0
"may see many thousands of hard page faults. Because accessing the disk is up "
1266
0
"to a million times slower than accessing RAM, the program may run very "
1267
0
"slowly when it is experiencing more than 100 or so hard page faults a "
1268
0
"second.");
1269
0
    }
1270
0
    return NS_OK;
1271
0
  }
1272
};
1273
NS_IMPL_ISUPPORTS(PageFaultsHardReporter, nsIMemoryReporter)
1274
1275
#endif  // XP_UNIX
1276
1277
/**
1278
 ** memory reporter implementation for jemalloc and OSX malloc,
1279
 ** to obtain info on total memory in use (that we know about,
1280
 ** at least -- on OSX, there are sometimes other zones in use).
1281
 **/
1282
1283
#ifdef HAVE_JEMALLOC_STATS
1284
1285
static size_t
1286
HeapOverhead(jemalloc_stats_t* aStats)
1287
{
1288
  return aStats->waste + aStats->bookkeeping +
1289
         aStats->page_cache + aStats->bin_unused;
1290
}
1291
1292
// This has UNITS_PERCENTAGE, so it is multiplied by 100x *again* on top of the
1293
// 100x for the percentage.
1294
static int64_t
1295
HeapOverheadFraction(jemalloc_stats_t* aStats)
1296
{
1297
  size_t heapOverhead = HeapOverhead(aStats);
1298
  size_t heapCommitted = aStats->allocated + heapOverhead;
1299
  return int64_t(10000 * (heapOverhead / (double)heapCommitted));
1300
}
1301
1302
class JemallocHeapReporter final : public nsIMemoryReporter
1303
{
1304
  ~JemallocHeapReporter() {}
1305
1306
public:
1307
  NS_DECL_ISUPPORTS
1308
1309
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1310
                            nsISupports* aData, bool aAnonymize) override
1311
  {
1312
    jemalloc_stats_t stats;
1313
    jemalloc_stats(&stats);
1314
1315
    MOZ_COLLECT_REPORT(
1316
      "heap-committed/allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
1317
"Memory mapped by the heap allocator that is currently allocated to the "
1318
"application.  This may exceed the amount of memory requested by the "
1319
"application because the allocator regularly rounds up request sizes. (The "
1320
"exact amount requested is not recorded.)");
1321
1322
    MOZ_COLLECT_REPORT(
1323
      "heap-allocated", KIND_OTHER, UNITS_BYTES, stats.allocated,
1324
"The same as 'heap-committed/allocated'.");
1325
1326
    // We mark this and the other heap-overhead reporters as KIND_NONHEAP
1327
    // because KIND_HEAP memory means "counted in heap-allocated", which
1328
    // this is not.
1329
    MOZ_COLLECT_REPORT(
1330
      "explicit/heap-overhead/bin-unused", KIND_NONHEAP, UNITS_BYTES,
1331
      stats.bin_unused,
1332
"Unused bytes due to fragmentation in the bins used for 'small' (<= 2 KiB) "
1333
"allocations. These bytes will be used if additional allocations occur.");
1334
1335
    if (stats.waste > 0) {
1336
      MOZ_COLLECT_REPORT(
1337
        "explicit/heap-overhead/waste", KIND_NONHEAP, UNITS_BYTES,
1338
        stats.waste,
1339
"Committed bytes which do not correspond to an active allocation and which the "
1340
"allocator is not intentionally keeping alive (i.e., not "
1341
"'explicit/heap-overhead/{bookkeeping,page-cache,bin-unused}').");
1342
    }
1343
1344
    MOZ_COLLECT_REPORT(
1345
      "explicit/heap-overhead/bookkeeping", KIND_NONHEAP, UNITS_BYTES,
1346
      stats.bookkeeping,
1347
"Committed bytes which the heap allocator uses for internal data structures.");
1348
1349
    MOZ_COLLECT_REPORT(
1350
      "explicit/heap-overhead/page-cache", KIND_NONHEAP, UNITS_BYTES,
1351
      stats.page_cache,
1352
"Memory which the allocator could return to the operating system, but hasn't. "
1353
"The allocator keeps this memory around as an optimization, so it doesn't "
1354
"have to ask the OS the next time it needs to fulfill a request. This value "
1355
"is typically not larger than a few megabytes.");
1356
1357
    MOZ_COLLECT_REPORT(
1358
      "heap-committed/overhead", KIND_OTHER, UNITS_BYTES,
1359
      HeapOverhead(&stats),
1360
"The sum of 'explicit/heap-overhead/*'.");
1361
1362
    MOZ_COLLECT_REPORT(
1363
      "heap-mapped", KIND_OTHER, UNITS_BYTES, stats.mapped,
1364
"Amount of memory currently mapped. Includes memory that is uncommitted, i.e. "
1365
"neither in physical memory nor paged to disk.");
1366
1367
    MOZ_COLLECT_REPORT(
1368
      "heap-chunksize", KIND_OTHER, UNITS_BYTES, stats.chunksize,
1369
      "Size of chunks.");
1370
1371
    return NS_OK;
1372
  }
1373
};
1374
NS_IMPL_ISUPPORTS(JemallocHeapReporter, nsIMemoryReporter)
1375
1376
#endif  // HAVE_JEMALLOC_STATS
1377
1378
// Why is this here?  At first glance, you'd think it could be defined and
1379
// registered with nsMemoryReporterManager entirely within nsAtomTable.cpp.
1380
// However, the obvious time to register it is when the table is initialized,
1381
// and that happens before XPCOM components are initialized, which means the
1382
// RegisterStrongMemoryReporter call fails.  So instead we do it here.
1383
class AtomTablesReporter final : public nsIMemoryReporter
1384
{
1385
  MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1386
1387
0
  ~AtomTablesReporter() {}
1388
1389
public:
1390
  NS_DECL_ISUPPORTS
1391
1392
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1393
                            nsISupports* aData, bool aAnonymize) override
1394
0
  {
1395
0
    AtomsSizes sizes;
1396
0
    NS_AddSizeOfAtoms(MallocSizeOf, sizes);
1397
0
1398
0
    MOZ_COLLECT_REPORT(
1399
0
      "explicit/atoms/table", KIND_HEAP, UNITS_BYTES, sizes.mTable,
1400
0
      "Memory used by the atom table.");
1401
0
1402
0
    MOZ_COLLECT_REPORT(
1403
0
      "explicit/atoms/dynamic-objects-and-chars", KIND_HEAP, UNITS_BYTES,
1404
0
      sizes.mDynamicAtoms,
1405
0
      "Memory used by dynamic atom objects and chars (which are stored "
1406
0
      "at the end of each atom object).");
1407
0
1408
0
    return NS_OK;
1409
0
  }
1410
};
1411
NS_IMPL_ISUPPORTS(AtomTablesReporter, nsIMemoryReporter)
1412
1413
class ThreadsReporter final : public nsIMemoryReporter
1414
{
1415
  MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1416
  ~ThreadsReporter() = default;
1417
1418
public:
1419
  NS_DECL_ISUPPORTS
1420
1421
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1422
                            nsISupports* aData, bool aAnonymize) override
1423
0
  {
1424
0
#ifdef XP_LINUX
1425
0
    nsTArray<MemoryMapping> mappings(1024);
1426
0
    MOZ_TRY(GetMemoryMappings(mappings));
1427
0
#endif
1428
0
1429
0
    // Enumerating over active threads requires holding a lock, so we collect
1430
0
    // info on all threads, and then call our reporter callbacks after releasing
1431
0
    // the lock.
1432
0
    struct ThreadData
1433
0
    {
1434
0
      nsCString mName;
1435
0
      uint32_t mThreadId;
1436
0
      size_t mPrivateSize;
1437
0
    };
1438
0
    AutoTArray<ThreadData, 32> threads;
1439
0
1440
0
    size_t eventQueueSizes = 0;
1441
0
    size_t wrapperSizes = 0;
1442
0
    size_t threadCount = 0;
1443
0
1444
0
    for (auto* thread : nsThread::Enumerate()) {
1445
0
      threadCount++;
1446
0
      eventQueueSizes += thread->SizeOfEventQueues(MallocSizeOf);
1447
0
      wrapperSizes += thread->ShallowSizeOfIncludingThis(MallocSizeOf);
1448
0
1449
0
      if (!thread->StackBase()) {
1450
0
        continue;
1451
0
      }
1452
0
1453
0
#if defined(XP_LINUX)
1454
0
      int idx = mappings.BinaryIndexOf(thread->StackBase());
1455
0
      if (idx < 0) {
1456
0
        continue;
1457
0
      }
1458
0
      // Referenced() is the combined size of all pages in the region which have
1459
0
      // ever been touched, and are therefore consuming memory. For stack
1460
0
      // regions, these pages are guaranteed to be un-shared unless we fork
1461
0
      // after creating threads (which we don't).
1462
0
      size_t privateSize = mappings[idx].Referenced();
1463
0
1464
0
      // On Linux, we have to be very careful matching memory regions to thread
1465
0
      // stacks.
1466
0
      //
1467
0
      // To begin with, the kernel only reports VM stats for regions of all
1468
0
      // adjacent pages with the same flags, protection, and backing file.
1469
0
      // There's no way to get finer-grained usage information for a subset of
1470
0
      // those pages.
1471
0
      //
1472
0
      // Stack segments always have a guard page at the bottom of the stack
1473
0
      // (assuming we only support stacks that grow down), so there's no danger
1474
0
      // of them being merged with other stack regions. At the top, there's no
1475
0
      // protection page, and no way to allocate one without using pthreads
1476
0
      // directly and allocating our own stacks. So we get around the problem by
1477
0
      // adding an extra VM flag (NOHUGEPAGES) to our stack region, which we
1478
0
      // don't expect to be set on any heap regions. But this is not fool-proof.
1479
0
      //
1480
0
      // A second kink is that different C libraries (and different versions
1481
0
      // thereof) report stack base locations and sizes differently with regard
1482
0
      // to the guard page. For the libraries that include the guard page in the
1483
0
      // stack size base pointer, we need to adjust those values to compensate.
1484
0
      // But it's possible that our logic will get out of sync with library
1485
0
      // changes, or someone will compile with an unexpected library.
1486
0
      //
1487
0
      //
1488
0
      // The upshot of all of this is that there may be configurations that our
1489
0
      // special cases don't cover. And if there are, we want to know about it.
1490
0
      // So assert that total size of the memory region we're reporting actually
1491
0
      // matches the allocated size of the thread stack.
1492
0
#ifndef ANDROID
1493
0
      MOZ_ASSERT(mappings[idx].Size() == thread->StackSize(),
1494
0
                 "Mapping region size doesn't match stack allocation size");
1495
0
#endif
1496
#elif defined(XP_WIN)
1497
      auto memInfo = MemoryInfo::Get(thread->StackBase(), thread->StackSize());
1498
      size_t privateSize = memInfo.Committed();
1499
#else
1500
      size_t privateSize = thread->StackSize();
1501
      MOZ_ASSERT_UNREACHABLE("Shouldn't have stack base pointer on this "
1502
                             "platform");
1503
#endif
1504
1505
0
      threads.AppendElement(ThreadData{
1506
0
        nsCString(PR_GetThreadName(thread->GetPRThread())),
1507
0
        thread->ThreadId(),
1508
0
        // On Linux, it's possible (but unlikely) that our stack region will
1509
0
        // have been merged with adjacent heap regions, in which case we'll get
1510
0
        // combined size information for both. So we take the minimum of the
1511
0
        // reported private size and the requested stack size to avoid the
1512
0
        // possible of majorly over-reporting in that case.
1513
0
        std::min(privateSize, thread->StackSize()),
1514
0
      });
1515
0
    }
1516
0
1517
0
    for (auto& thread : threads) {
1518
0
      nsPrintfCString path("explicit/threads/stacks/%s (tid=%u)",
1519
0
                           thread.mName.get(), thread.mThreadId);
1520
0
1521
0
      aHandleReport->Callback(
1522
0
          EmptyCString(), path,
1523
0
          KIND_NONHEAP, UNITS_BYTES,
1524
0
          thread.mPrivateSize,
1525
0
          NS_LITERAL_CSTRING("The sizes of thread stacks which have been "
1526
0
                             "committed to memory."),
1527
0
          aData);
1528
0
    }
1529
0
1530
0
    MOZ_COLLECT_REPORT(
1531
0
      "explicit/threads/overhead/event-queues", KIND_HEAP, UNITS_BYTES,
1532
0
      eventQueueSizes,
1533
0
      "The sizes of nsThread event queues and observers.");
1534
0
1535
0
    MOZ_COLLECT_REPORT(
1536
0
      "explicit/threads/overhead/wrappers", KIND_HEAP, UNITS_BYTES,
1537
0
      wrapperSizes,
1538
0
      "The sizes of nsThread/PRThread wrappers.");
1539
0
1540
#if defined(XP_WIN)
1541
    // Each thread on Windows has a fixed kernel overhead. For 32 bit Windows,
1542
    // that's 12K. For 64 bit, it's 24K.
1543
    //
1544
    // See https://blogs.technet.microsoft.com/markrussinovich/2009/07/05/pushing-the-limits-of-windows-processes-and-threads/
1545
    constexpr size_t kKernelSize = (sizeof(void*) == 8 ? 24 : 12) * 1024;
1546
#elif defined(XP_LINUX)
1547
    // On Linux, kernel stacks are usually 8K. However, on x86, they are
1548
0
    // allocated virtually, and start out at 4K. They may grow to 8K, but we
1549
0
    // have no way of knowing which ones do, so all we can do is guess.
1550
0
#if defined(__x86_64__) || defined(__i386__)
1551
0
    constexpr size_t kKernelSize = 4 * 1024;
1552
#else
1553
    constexpr size_t kKernelSize = 8 * 1024;
1554
#endif
1555
#elif defined(XP_MACOSX)
1556
    // On Darwin, kernel stacks are 16K:
1557
    //
1558
    // https://books.google.com/books?id=K8vUkpOXhN4C&lpg=PA513&dq=mach%20kernel%20thread%20stack%20size&pg=PA513#v=onepage&q=mach%20kernel%20thread%20stack%20size&f=false
1559
    constexpr size_t kKernelSize = 16 * 1024;
1560
#else
1561
    // Elsewhere, just assume that kernel stacks require at least 8K.
1562
    constexpr size_t kKernelSize = 8 * 1024;
1563
#endif
1564
1565
0
    MOZ_COLLECT_REPORT(
1566
0
      "explicit/threads/overhead/kernel", KIND_NONHEAP, UNITS_BYTES,
1567
0
      threadCount * kKernelSize,
1568
0
      "The total kernel overhead for all active threads.");
1569
0
1570
0
    return NS_OK;
1571
0
  }
1572
};
1573
NS_IMPL_ISUPPORTS(ThreadsReporter, nsIMemoryReporter)
1574
1575
#ifdef DEBUG
1576
1577
// Ideally, this would be implemented in BlockingResourceBase.cpp.
1578
// However, this ends up breaking the linking step of various unit tests due
1579
// to adding a new dependency to libdmd for a commonly used feature (mutexes)
1580
// in  DMD  builds. So instead we do it here.
1581
class DeadlockDetectorReporter final : public nsIMemoryReporter
1582
{
1583
  MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)
1584
1585
  ~DeadlockDetectorReporter() {}
1586
1587
public:
1588
  NS_DECL_ISUPPORTS
1589
1590
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1591
                            nsISupports* aData, bool aAnonymize) override
1592
  {
1593
    MOZ_COLLECT_REPORT(
1594
      "explicit/deadlock-detector", KIND_HEAP, UNITS_BYTES,
1595
      BlockingResourceBase::SizeOfDeadlockDetector(MallocSizeOf),
1596
      "Memory used by the deadlock detector.");
1597
1598
    return NS_OK;
1599
  }
1600
};
1601
NS_IMPL_ISUPPORTS(DeadlockDetectorReporter, nsIMemoryReporter)
1602
1603
#endif
1604
1605
#ifdef MOZ_DMD
1606
1607
namespace mozilla {
1608
namespace dmd {
1609
1610
class DMDReporter final : public nsIMemoryReporter
1611
{
1612
public:
1613
  NS_DECL_ISUPPORTS
1614
1615
  NS_IMETHOD CollectReports(nsIHandleReportCallback* aHandleReport,
1616
                            nsISupports* aData, bool aAnonymize) override
1617
  {
1618
    dmd::Sizes sizes;
1619
    dmd::SizeOf(&sizes);
1620
1621
    MOZ_COLLECT_REPORT(
1622
      "explicit/dmd/stack-traces/used", KIND_HEAP, UNITS_BYTES,
1623
      sizes.mStackTracesUsed,
1624
      "Memory used by stack traces which correspond to at least "
1625
      "one heap block DMD is tracking.");
1626
1627
    MOZ_COLLECT_REPORT(
1628
      "explicit/dmd/stack-traces/unused", KIND_HEAP, UNITS_BYTES,
1629
      sizes.mStackTracesUnused,
1630
      "Memory used by stack traces which don't correspond to any heap "
1631
      "blocks DMD is currently tracking.");
1632
1633
    MOZ_COLLECT_REPORT(
1634
      "explicit/dmd/stack-traces/table", KIND_HEAP, UNITS_BYTES,
1635
      sizes.mStackTraceTable,
1636
      "Memory used by DMD's stack trace table.");
1637
1638
    MOZ_COLLECT_REPORT(
1639
      "explicit/dmd/live-block-table", KIND_HEAP, UNITS_BYTES,
1640
      sizes.mLiveBlockTable,
1641
      "Memory used by DMD's live block table.");
1642
1643
    MOZ_COLLECT_REPORT(
1644
      "explicit/dmd/dead-block-list", KIND_HEAP, UNITS_BYTES,
1645
      sizes.mDeadBlockTable,
1646
      "Memory used by DMD's dead block list.");
1647
1648
    return NS_OK;
1649
  }
1650
1651
private:
1652
  ~DMDReporter() {}
1653
};
1654
NS_IMPL_ISUPPORTS(DMDReporter, nsIMemoryReporter)
1655
1656
} // namespace dmd
1657
} // namespace mozilla
1658
1659
#endif  // MOZ_DMD
1660
1661
/**
1662
 ** nsMemoryReporterManager implementation
1663
 **/
1664
1665
NS_IMPL_ISUPPORTS(nsMemoryReporterManager, nsIMemoryReporterManager, nsIMemoryReporter)
1666
1667
NS_IMETHODIMP
1668
nsMemoryReporterManager::Init()
1669
3
{
1670
3
  if (!NS_IsMainThread()) {
1671
0
    MOZ_CRASH();
1672
0
  }
1673
3
1674
3
  // Under normal circumstances this function is only called once. However,
1675
3
  // we've (infrequently) seen memory report dumps in crash reports that
1676
3
  // suggest that this function is sometimes called multiple times. That in
1677
3
  // turn means that multiple reporters of each kind are registered, which
1678
3
  // leads to duplicated reports of individual measurements such as "resident",
1679
3
  // "vsize", etc.
1680
3
  //
1681
3
  // It's unclear how these multiple calls can occur. The only plausible theory
1682
3
  // so far is badly-written extensions, because this function is callable from
1683
3
  // JS code via nsIMemoryReporter.idl.
1684
3
  //
1685
3
  // Whatever the cause, it's a bad thing. So we protect against it with the
1686
3
  // following check.
1687
3
  static bool isInited = false;
1688
3
  if (isInited) {
1689
0
    NS_WARNING("nsMemoryReporterManager::Init() has already been called!");
1690
0
    return NS_OK;
1691
0
  }
1692
3
  isInited = true;
1693
3
1694
#if defined(HAVE_JEMALLOC_STATS) && defined(MOZ_GLUE_IN_PROGRAM)
1695
  if (!jemalloc_stats) {
1696
    return NS_ERROR_FAILURE;
1697
  }
1698
#endif
1699
1700
#ifdef HAVE_JEMALLOC_STATS
1701
  RegisterStrongReporter(new JemallocHeapReporter());
1702
#endif
1703
1704
3
#ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
1705
3
  RegisterStrongReporter(new VsizeReporter());
1706
3
  RegisterStrongReporter(new ResidentReporter());
1707
3
#endif
1708
3
1709
#ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
1710
  RegisterStrongReporter(new VsizeMaxContiguousReporter());
1711
#endif
1712
1713
3
#ifdef HAVE_RESIDENT_PEAK_REPORTER
1714
3
  RegisterStrongReporter(new ResidentPeakReporter());
1715
3
#endif
1716
3
1717
3
#ifdef HAVE_RESIDENT_UNIQUE_REPORTER
1718
3
  RegisterStrongReporter(new ResidentUniqueReporter());
1719
3
#endif
1720
3
1721
3
#ifdef HAVE_PAGE_FAULT_REPORTERS
1722
3
  RegisterStrongReporter(new PageFaultsSoftReporter());
1723
3
  RegisterStrongReporter(new PageFaultsHardReporter());
1724
3
#endif
1725
3
1726
#ifdef HAVE_PRIVATE_REPORTER
1727
  RegisterStrongReporter(new PrivateReporter());
1728
#endif
1729
1730
3
#ifdef HAVE_SYSTEM_HEAP_REPORTER
1731
3
  RegisterStrongReporter(new SystemHeapReporter());
1732
3
#endif
1733
3
1734
3
  RegisterStrongReporter(new AtomTablesReporter());
1735
3
1736
3
  RegisterStrongReporter(new ThreadsReporter());
1737
3
1738
#ifdef DEBUG
1739
  RegisterStrongReporter(new DeadlockDetectorReporter());
1740
#endif
1741
1742
3
#ifdef MOZ_GECKO_PROFILER
1743
3
  // We have to register this here rather than in profiler_init() because
1744
3
  // profiler_init() runs prior to nsMemoryReporterManager's creation.
1745
3
  RegisterStrongReporter(new GeckoProfilerReporter());
1746
3
#endif
1747
3
1748
#ifdef MOZ_DMD
1749
  RegisterStrongReporter(new mozilla::dmd::DMDReporter());
1750
#endif
1751
1752
#ifdef XP_WIN
1753
  RegisterStrongReporter(new WindowsAddressSpaceReporter());
1754
#endif
1755
1756
3
#ifdef XP_UNIX
1757
3
  nsMemoryInfoDumper::Initialize();
1758
3
#endif
1759
3
1760
3
  // Report our own memory usage as well.
1761
3
  RegisterWeakReporter(this);
1762
3
1763
3
  return NS_OK;
1764
3
}
1765
1766
nsMemoryReporterManager::nsMemoryReporterManager()
1767
  : mMutex("nsMemoryReporterManager::mMutex")
1768
  , mIsRegistrationBlocked(false)
1769
  , mStrongReporters(new StrongReportersTable())
1770
  , mWeakReporters(new WeakReportersTable())
1771
  , mSavedStrongReporters(nullptr)
1772
  , mSavedWeakReporters(nullptr)
1773
  , mNextGeneration(1)
1774
  , mPendingProcessesState(nullptr)
1775
  , mPendingReportersState(nullptr)
1776
#ifdef HAVE_JEMALLOC_STATS
1777
  , mThreadPool(do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID))
1778
#endif
1779
3
{
1780
3
}
1781
1782
nsMemoryReporterManager::~nsMemoryReporterManager()
1783
0
{
1784
0
  delete mStrongReporters;
1785
0
  delete mWeakReporters;
1786
0
  NS_ASSERTION(!mSavedStrongReporters, "failed to restore strong reporters");
1787
0
  NS_ASSERTION(!mSavedWeakReporters, "failed to restore weak reporters");
1788
0
}
1789
1790
NS_IMETHODIMP
1791
nsMemoryReporterManager::CollectReports(nsIHandleReportCallback* aHandleReport,
1792
                                        nsISupports* aData, bool aAnonymize)
1793
0
{
1794
0
  size_t n = MallocSizeOf(this);
1795
0
  n += mStrongReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1796
0
  n += mWeakReporters->ShallowSizeOfIncludingThis(MallocSizeOf);
1797
0
1798
0
  MOZ_COLLECT_REPORT(
1799
0
    "explicit/memory-reporter-manager", KIND_HEAP, UNITS_BYTES,
1800
0
    n,
1801
0
    "Memory used by the memory reporter infrastructure.");
1802
0
1803
0
  return NS_OK;
1804
0
}
1805
1806
#ifdef DEBUG_CHILD_PROCESS_MEMORY_REPORTING
1807
#define MEMORY_REPORTING_LOG(format, ...) \
1808
  printf_stderr("++++ MEMORY REPORTING: " format, ##__VA_ARGS__);
1809
#else
1810
#define MEMORY_REPORTING_LOG(...)
1811
#endif
1812
1813
NS_IMETHODIMP
1814
nsMemoryReporterManager::GetReports(
1815
  nsIHandleReportCallback* aHandleReport,
1816
  nsISupports* aHandleReportData,
1817
  nsIFinishReportingCallback* aFinishReporting,
1818
  nsISupports* aFinishReportingData,
1819
  bool aAnonymize)
1820
0
{
1821
0
  return GetReportsExtended(aHandleReport, aHandleReportData,
1822
0
                            aFinishReporting, aFinishReportingData,
1823
0
                            aAnonymize,
1824
0
                            /* minimize = */ false,
1825
0
                            /* DMDident = */ EmptyString());
1826
0
}
1827
1828
NS_IMETHODIMP
1829
nsMemoryReporterManager::GetReportsExtended(
1830
  nsIHandleReportCallback* aHandleReport,
1831
  nsISupports* aHandleReportData,
1832
  nsIFinishReportingCallback* aFinishReporting,
1833
  nsISupports* aFinishReportingData,
1834
  bool aAnonymize,
1835
  bool aMinimize,
1836
  const nsAString& aDMDDumpIdent)
1837
0
{
1838
0
  nsresult rv;
1839
0
1840
0
  // Memory reporters are not necessarily threadsafe, so this function must
1841
0
  // be called from the main thread.
1842
0
  if (!NS_IsMainThread()) {
1843
0
    MOZ_CRASH();
1844
0
  }
1845
0
1846
0
  uint32_t generation = mNextGeneration++;
1847
0
1848
0
  if (mPendingProcessesState) {
1849
0
    // A request is in flight.  Don't start another one.  And don't report
1850
0
    // an error;  just ignore it, and let the in-flight request finish.
1851
0
    MEMORY_REPORTING_LOG("GetReports (gen=%u, s->gen=%u): abort\n",
1852
0
                         generation, mPendingProcessesState->mGeneration);
1853
0
    return NS_OK;
1854
0
  }
1855
0
1856
0
  MEMORY_REPORTING_LOG("GetReports (gen=%u)\n", generation);
1857
0
1858
0
  uint32_t concurrency = Preferences::GetUint("memory.report_concurrency", 1);
1859
0
  MOZ_ASSERT(concurrency >= 1);
1860
0
  if (concurrency < 1) {
1861
0
    concurrency = 1;
1862
0
  }
1863
0
  mPendingProcessesState = new PendingProcessesState(generation,
1864
0
                                                     aAnonymize,
1865
0
                                                     aMinimize,
1866
0
                                                     concurrency,
1867
0
                                                     aHandleReport,
1868
0
                                                     aHandleReportData,
1869
0
                                                     aFinishReporting,
1870
0
                                                     aFinishReportingData,
1871
0
                                                     aDMDDumpIdent);
1872
0
1873
0
  if (aMinimize) {
1874
0
    nsCOMPtr<nsIRunnable> callback =
1875
0
      NewRunnableMethod("nsMemoryReporterManager::StartGettingReports",
1876
0
                        this,
1877
0
                        &nsMemoryReporterManager::StartGettingReports);
1878
0
    rv = MinimizeMemoryUsage(callback);
1879
0
  } else {
1880
0
    rv = StartGettingReports();
1881
0
  }
1882
0
  return rv;
1883
0
}
1884
1885
nsresult
1886
nsMemoryReporterManager::StartGettingReports()
1887
0
{
1888
0
  PendingProcessesState* s = mPendingProcessesState;
1889
0
  nsresult rv;
1890
0
1891
0
  // Get reports for this process.
1892
0
  FILE* parentDMDFile = nullptr;
1893
#ifdef MOZ_DMD
1894
  if (!s->mDMDDumpIdent.IsEmpty()) {
1895
    rv = nsMemoryInfoDumper::OpenDMDFile(s->mDMDDumpIdent, getpid(),
1896
                                                  &parentDMDFile);
1897
    if (NS_WARN_IF(NS_FAILED(rv))) {
1898
      // Proceed with the memory report as if DMD were disabled.
1899
      parentDMDFile = nullptr;
1900
    }
1901
  }
1902
#endif
1903
1904
0
  // This is async.
1905
0
  GetReportsForThisProcessExtended(s->mHandleReport, s->mHandleReportData,
1906
0
                                   s->mAnonymize, parentDMDFile,
1907
0
                                   s->mFinishReporting, s->mFinishReportingData);
1908
0
1909
0
  nsTArray<dom::ContentParent*> childWeakRefs;
1910
0
  dom::ContentParent::GetAll(childWeakRefs);
1911
0
  if (!childWeakRefs.IsEmpty()) {
1912
0
    // Request memory reports from child processes.  This happens
1913
0
    // after the parent report so that the parent's main thread will
1914
0
    // be free to process the child reports, instead of causing them
1915
0
    // to be buffered and consume (possibly scarce) memory.
1916
0
1917
0
    for (size_t i = 0; i < childWeakRefs.Length(); ++i) {
1918
0
      s->mChildrenPending.AppendElement(childWeakRefs[i]);
1919
0
    }
1920
0
  }
1921
0
1922
0
  if (gfx::GPUProcessManager* gpu = gfx::GPUProcessManager::Get()) {
1923
0
    if (RefPtr<MemoryReportingProcess> proc = gpu->GetProcessMemoryReporter()) {
1924
0
      s->mChildrenPending.AppendElement(proc.forget());
1925
0
    }
1926
0
  }
1927
0
1928
0
  if (!s->mChildrenPending.IsEmpty()) {
1929
0
    nsCOMPtr<nsITimer> timer;
1930
0
    rv = NS_NewTimerWithFuncCallback(
1931
0
      getter_AddRefs(timer),
1932
0
      TimeoutCallback,
1933
0
      this,
1934
0
      kTimeoutLengthMS,
1935
0
      nsITimer::TYPE_ONE_SHOT,
1936
0
      "nsMemoryReporterManager::StartGettingReports");
1937
0
    if (NS_WARN_IF(NS_FAILED(rv))) {
1938
0
      FinishReporting();
1939
0
      return rv;
1940
0
    }
1941
0
1942
0
    MOZ_ASSERT(!s->mTimer);
1943
0
    s->mTimer.swap(timer);
1944
0
  }
1945
0
1946
0
  return NS_OK;
1947
0
}
1948
1949
void
1950
nsMemoryReporterManager::DispatchReporter(
1951
  nsIMemoryReporter* aReporter, bool aIsAsync,
1952
  nsIHandleReportCallback* aHandleReport,
1953
  nsISupports* aHandleReportData,
1954
  bool aAnonymize)
1955
0
{
1956
0
  MOZ_ASSERT(mPendingReportersState);
1957
0
1958
0
  // Grab refs to everything used in the lambda function.
1959
0
  RefPtr<nsMemoryReporterManager> self = this;
1960
0
  nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
1961
0
  nsCOMPtr<nsIHandleReportCallback> handleReport = aHandleReport;
1962
0
  nsCOMPtr<nsISupports> handleReportData = aHandleReportData;
1963
0
1964
0
  nsCOMPtr<nsIRunnable> event = NS_NewRunnableFunction(
1965
0
    "nsMemoryReporterManager::DispatchReporter",
1966
0
    [self, reporter, aIsAsync, handleReport, handleReportData, aAnonymize]() {
1967
0
      reporter->CollectReports(handleReport, handleReportData, aAnonymize);
1968
0
      if (!aIsAsync) {
1969
0
        self->EndReport();
1970
0
      }
1971
0
    });
1972
0
1973
0
  NS_DispatchToMainThread(event);
1974
0
  mPendingReportersState->mReportsPending++;
1975
0
}
1976
1977
NS_IMETHODIMP
1978
nsMemoryReporterManager::GetReportsForThisProcessExtended(
1979
  nsIHandleReportCallback* aHandleReport, nsISupports* aHandleReportData,
1980
  bool aAnonymize, FILE* aDMDFile,
1981
  nsIFinishReportingCallback* aFinishReporting,
1982
  nsISupports* aFinishReportingData)
1983
0
{
1984
0
  // Memory reporters are not necessarily threadsafe, so this function must
1985
0
  // be called from the main thread.
1986
0
  if (!NS_IsMainThread()) {
1987
0
    MOZ_CRASH();
1988
0
  }
1989
0
1990
0
  if (NS_WARN_IF(mPendingReportersState)) {
1991
0
    // Report is already in progress.
1992
0
    return NS_ERROR_IN_PROGRESS;
1993
0
  }
1994
0
1995
#ifdef MOZ_DMD
1996
  if (aDMDFile) {
1997
    // Clear DMD's reportedness state before running the memory
1998
    // reporters, to avoid spurious twice-reported warnings.
1999
    dmd::ClearReports();
2000
  }
2001
#else
2002
0
  MOZ_ASSERT(!aDMDFile);
2003
0
#endif
2004
0
2005
0
  mPendingReportersState = new PendingReportersState(
2006
0
      aFinishReporting, aFinishReportingData, aDMDFile);
2007
0
2008
0
  {
2009
0
    mozilla::MutexAutoLock autoLock(mMutex);
2010
0
2011
0
    for (auto iter = mStrongReporters->Iter(); !iter.Done(); iter.Next()) {
2012
0
      DispatchReporter(iter.Key(), iter.Data(),
2013
0
                       aHandleReport, aHandleReportData, aAnonymize);
2014
0
    }
2015
0
2016
0
    for (auto iter = mWeakReporters->Iter(); !iter.Done(); iter.Next()) {
2017
0
      nsCOMPtr<nsIMemoryReporter> reporter = iter.Key();
2018
0
      DispatchReporter(reporter, iter.Data(),
2019
0
                       aHandleReport, aHandleReportData, aAnonymize);
2020
0
    }
2021
0
  }
2022
0
2023
0
  return NS_OK;
2024
0
}
2025
2026
NS_IMETHODIMP
2027
nsMemoryReporterManager::EndReport()
2028
0
{
2029
0
  if (--mPendingReportersState->mReportsPending == 0) {
2030
#ifdef MOZ_DMD
2031
    if (mPendingReportersState->mDMDFile) {
2032
      nsMemoryInfoDumper::DumpDMDToFile(mPendingReportersState->mDMDFile);
2033
    }
2034
#endif
2035
0
    if (mPendingProcessesState) {
2036
0
      // This is the parent process.
2037
0
      EndProcessReport(mPendingProcessesState->mGeneration, true);
2038
0
    } else {
2039
0
      mPendingReportersState->mFinishReporting->Callback(
2040
0
          mPendingReportersState->mFinishReportingData);
2041
0
    }
2042
0
2043
0
    delete mPendingReportersState;
2044
0
    mPendingReportersState = nullptr;
2045
0
  }
2046
0
2047
0
  return NS_OK;
2048
0
}
2049
2050
nsMemoryReporterManager::PendingProcessesState*
2051
nsMemoryReporterManager::GetStateForGeneration(uint32_t aGeneration)
2052
0
{
2053
0
  // Memory reporting only happens on the main thread.
2054
0
  MOZ_RELEASE_ASSERT(NS_IsMainThread());
2055
0
2056
0
  PendingProcessesState* s = mPendingProcessesState;
2057
0
2058
0
  if (!s) {
2059
0
    // If we reach here, then:
2060
0
    //
2061
0
    // - A child process reported back too late, and no subsequent request
2062
0
    //   is in flight.
2063
0
    //
2064
0
    // So there's nothing to be done.  Just ignore it.
2065
0
    MEMORY_REPORTING_LOG(
2066
0
      "HandleChildReports: no request in flight (aGen=%u)\n",
2067
0
      aGeneration);
2068
0
    return nullptr;
2069
0
  }
2070
0
2071
0
  if (aGeneration != s->mGeneration) {
2072
0
    // If we reach here, a child process must have reported back, too late,
2073
0
    // while a subsequent (higher-numbered) request is in flight.  Again,
2074
0
    // ignore it.
2075
0
    MOZ_ASSERT(aGeneration < s->mGeneration);
2076
0
    MEMORY_REPORTING_LOG(
2077
0
      "HandleChildReports: gen mismatch (aGen=%u, s->gen=%u)\n",
2078
0
      aGeneration, s->mGeneration);
2079
0
    return nullptr;
2080
0
  }
2081
0
2082
0
  return s;
2083
0
}
2084
2085
// This function has no return value.  If something goes wrong, there's no
2086
// clear place to report the problem to, but that's ok -- we will end up
2087
// hitting the timeout and executing TimeoutCallback().
2088
void
2089
nsMemoryReporterManager::HandleChildReport(
2090
  uint32_t aGeneration,
2091
  const dom::MemoryReport& aChildReport)
2092
0
{
2093
0
  PendingProcessesState* s = GetStateForGeneration(aGeneration);
2094
0
  if (!s) {
2095
0
    return;
2096
0
  }
2097
0
2098
0
  // Child reports should have a non-empty process.
2099
0
  MOZ_ASSERT(!aChildReport.process().IsEmpty());
2100
0
2101
0
  // If the call fails, ignore and continue.
2102
0
  s->mHandleReport->Callback(aChildReport.process(),
2103
0
                             aChildReport.path(),
2104
0
                             aChildReport.kind(),
2105
0
                             aChildReport.units(),
2106
0
                             aChildReport.amount(),
2107
0
                             aChildReport.desc(),
2108
0
                             s->mHandleReportData);
2109
0
}
2110
2111
/* static */ bool
2112
nsMemoryReporterManager::StartChildReport(mozilla::MemoryReportingProcess* aChild,
2113
                                          const PendingProcessesState* aState)
2114
0
{
2115
0
  if (!aChild->IsAlive()) {
2116
0
    MEMORY_REPORTING_LOG("StartChildReports (gen=%u): child exited before"
2117
0
                         " its report was started\n",
2118
0
                         aState->mGeneration);
2119
0
    return false;
2120
0
  }
2121
0
2122
0
  mozilla::dom::MaybeFileDesc dmdFileDesc = void_t();
2123
#ifdef MOZ_DMD
2124
  if (!aState->mDMDDumpIdent.IsEmpty()) {
2125
    FILE *dmdFile = nullptr;
2126
    nsresult rv = nsMemoryInfoDumper::OpenDMDFile(aState->mDMDDumpIdent,
2127
                                                  aChild->Pid(), &dmdFile);
2128
    if (NS_WARN_IF(NS_FAILED(rv))) {
2129
      // Proceed with the memory report as if DMD were disabled.
2130
      dmdFile = nullptr;
2131
    }
2132
    if (dmdFile) {
2133
      dmdFileDesc = mozilla::ipc::FILEToFileDescriptor(dmdFile);
2134
      fclose(dmdFile);
2135
    }
2136
  }
2137
#endif
2138
  return aChild->SendRequestMemoryReport(
2139
0
    aState->mGeneration, aState->mAnonymize, aState->mMinimize, dmdFileDesc);
2140
0
}
2141
2142
void
2143
nsMemoryReporterManager::EndProcessReport(uint32_t aGeneration, bool aSuccess)
2144
0
{
2145
0
  PendingProcessesState* s = GetStateForGeneration(aGeneration);
2146
0
  if (!s) {
2147
0
    return;
2148
0
  }
2149
0
2150
0
  MOZ_ASSERT(s->mNumProcessesRunning > 0);
2151
0
  s->mNumProcessesRunning--;
2152
0
  s->mNumProcessesCompleted++;
2153
0
  MEMORY_REPORTING_LOG("HandleChildReports (aGen=%u): process %u %s"
2154
0
                       " (%u running, %u pending)\n",
2155
0
                       aGeneration, s->mNumProcessesCompleted,
2156
0
                       aSuccess ? "completed" : "exited during report",
2157
0
                       s->mNumProcessesRunning,
2158
0
                       static_cast<unsigned>(s->mChildrenPending.Length()));
2159
0
2160
0
  // Start pending children up to the concurrency limit.
2161
0
  while (s->mNumProcessesRunning < s->mConcurrencyLimit &&
2162
0
         !s->mChildrenPending.IsEmpty()) {
2163
0
    // Pop last element from s->mChildrenPending
2164
0
    RefPtr<MemoryReportingProcess> nextChild;
2165
0
    nextChild.swap(s->mChildrenPending.LastElement());
2166
0
    s->mChildrenPending.TruncateLength(s->mChildrenPending.Length() - 1);
2167
0
    // Start report (if the child is still alive).
2168
0
    if (StartChildReport(nextChild, s)) {
2169
0
      ++s->mNumProcessesRunning;
2170
0
      MEMORY_REPORTING_LOG("HandleChildReports (aGen=%u): started child report"
2171
0
                           " (%u running, %u pending)\n",
2172
0
                           aGeneration, s->mNumProcessesRunning,
2173
0
                           static_cast<unsigned>(s->mChildrenPending.Length()));
2174
0
    }
2175
0
  }
2176
0
2177
0
  // If all the child processes (if any) have reported, we can cancel
2178
0
  // the timer (if started) and finish up.  Otherwise, just return.
2179
0
  if (s->mNumProcessesRunning == 0) {
2180
0
    MOZ_ASSERT(s->mChildrenPending.IsEmpty());
2181
0
    if (s->mTimer) {
2182
0
      s->mTimer->Cancel();
2183
0
    }
2184
0
    FinishReporting();
2185
0
  }
2186
0
}
2187
2188
/* static */ void
2189
nsMemoryReporterManager::TimeoutCallback(nsITimer* aTimer, void* aData)
2190
0
{
2191
0
  nsMemoryReporterManager* mgr = static_cast<nsMemoryReporterManager*>(aData);
2192
0
  PendingProcessesState* s = mgr->mPendingProcessesState;
2193
0
2194
0
  // Release assert because: if the pointer is null we're about to
2195
0
  // crash regardless of DEBUG, and this way the compiler doesn't
2196
0
  // complain about unused variables.
2197
0
  MOZ_RELEASE_ASSERT(s, "mgr->mPendingProcessesState");
2198
0
  MEMORY_REPORTING_LOG("TimeoutCallback (s->gen=%u; %u running, %u pending)\n",
2199
0
                       s->mGeneration, s->mNumProcessesRunning,
2200
0
                       static_cast<unsigned>(s->mChildrenPending.Length()));
2201
0
2202
0
  // We don't bother sending any kind of cancellation message to the child
2203
0
  // processes that haven't reported back.
2204
0
  mgr->FinishReporting();
2205
0
}
2206
2207
nsresult
2208
nsMemoryReporterManager::FinishReporting()
2209
0
{
2210
0
  // Memory reporting only happens on the main thread.
2211
0
  if (!NS_IsMainThread()) {
2212
0
    MOZ_CRASH();
2213
0
  }
2214
0
2215
0
  MOZ_ASSERT(mPendingProcessesState);
2216
0
  MEMORY_REPORTING_LOG("FinishReporting (s->gen=%u; %u processes reported)\n",
2217
0
                       mPendingProcessesState->mGeneration,
2218
0
                       mPendingProcessesState->mNumProcessesCompleted);
2219
0
2220
0
  // Call this before deleting |mPendingProcessesState|.  That way, if
2221
0
  // |mFinishReportData| calls GetReports(), it will silently abort, as
2222
0
  // required.
2223
0
  nsresult rv = mPendingProcessesState->mFinishReporting->Callback(
2224
0
    mPendingProcessesState->mFinishReportingData);
2225
0
2226
0
  delete mPendingProcessesState;
2227
0
  mPendingProcessesState = nullptr;
2228
0
  return rv;
2229
0
}
2230
2231
nsMemoryReporterManager::PendingProcessesState::PendingProcessesState(
2232
    uint32_t aGeneration, bool aAnonymize, bool aMinimize,
2233
    uint32_t aConcurrencyLimit,
2234
    nsIHandleReportCallback* aHandleReport,
2235
    nsISupports* aHandleReportData,
2236
    nsIFinishReportingCallback* aFinishReporting,
2237
    nsISupports* aFinishReportingData,
2238
    const nsAString& aDMDDumpIdent)
2239
  : mGeneration(aGeneration)
2240
  , mAnonymize(aAnonymize)
2241
  , mMinimize(aMinimize)
2242
  , mChildrenPending()
2243
  , mNumProcessesRunning(1) // reporting starts with the parent
2244
  , mNumProcessesCompleted(0)
2245
  , mConcurrencyLimit(aConcurrencyLimit)
2246
  , mHandleReport(aHandleReport)
2247
  , mHandleReportData(aHandleReportData)
2248
  , mFinishReporting(aFinishReporting)
2249
  , mFinishReportingData(aFinishReportingData)
2250
  , mDMDDumpIdent(aDMDDumpIdent)
2251
0
{
2252
0
}
2253
2254
static void
2255
CrashIfRefcountIsZero(nsISupports* aObj)
2256
85
{
2257
85
  // This will probably crash if the object's refcount is 0.
2258
85
  uint32_t refcnt = NS_ADDREF(aObj);
2259
85
  if (refcnt <= 1) {
2260
0
    MOZ_CRASH("CrashIfRefcountIsZero: refcount is zero");
2261
0
  }
2262
85
  NS_RELEASE(aObj);
2263
85
}
2264
2265
nsresult
2266
nsMemoryReporterManager::RegisterReporterHelper(
2267
  nsIMemoryReporter* aReporter, bool aForce, bool aStrong, bool aIsAsync)
2268
85
{
2269
85
  // This method is thread-safe.
2270
85
  mozilla::MutexAutoLock autoLock(mMutex);
2271
85
2272
85
  if (mIsRegistrationBlocked && !aForce) {
2273
0
    return NS_ERROR_FAILURE;
2274
0
  }
2275
85
2276
85
  if (mStrongReporters->Contains(aReporter) ||
2277
85
      mWeakReporters->Contains(aReporter)) {
2278
0
    return NS_ERROR_FAILURE;
2279
0
  }
2280
85
2281
85
  // If |aStrong| is true, |aReporter| may have a refcnt of 0, so we take
2282
85
  // a kung fu death grip before calling PutEntry.  Otherwise, if PutEntry
2283
85
  // addref'ed and released |aReporter| before finally addref'ing it for
2284
85
  // good, it would free aReporter!  The kung fu death grip could itself be
2285
85
  // problematic if PutEntry didn't addref |aReporter| (because then when the
2286
85
  // death grip goes out of scope, we would delete the reporter).  In debug
2287
85
  // mode, we check that this doesn't happen.
2288
85
  //
2289
85
  // If |aStrong| is false, we require that |aReporter| have a non-zero
2290
85
  // refcnt.
2291
85
  //
2292
85
  if (aStrong) {
2293
52
    nsCOMPtr<nsIMemoryReporter> kungFuDeathGrip = aReporter;
2294
52
    mStrongReporters->Put(aReporter, aIsAsync);
2295
52
    CrashIfRefcountIsZero(aReporter);
2296
52
  } else {
2297
33
    CrashIfRefcountIsZero(aReporter);
2298
33
    nsCOMPtr<nsIXPConnectWrappedJS> jsComponent = do_QueryInterface(aReporter);
2299
33
    if (jsComponent) {
2300
0
      // We cannot allow non-native reporters (WrappedJS), since we'll be
2301
0
      // holding onto a raw pointer, which would point to the wrapper,
2302
0
      // and that wrapper is likely to go away as soon as this register
2303
0
      // call finishes.  This would then lead to subsequent crashes in
2304
0
      // CollectReports().
2305
0
      return NS_ERROR_XPC_BAD_CONVERT_JS;
2306
0
    }
2307
33
    mWeakReporters->Put(aReporter, aIsAsync);
2308
33
  }
2309
85
2310
85
  return NS_OK;
2311
85
}
2312
2313
NS_IMETHODIMP
2314
nsMemoryReporterManager::RegisterStrongReporter(nsIMemoryReporter* aReporter)
2315
52
{
2316
52
  return RegisterReporterHelper(aReporter, /* force = */ false,
2317
52
                                /* strong = */ true,
2318
52
                                /* async = */ false);
2319
52
}
2320
2321
NS_IMETHODIMP
2322
nsMemoryReporterManager::RegisterStrongAsyncReporter(nsIMemoryReporter* aReporter)
2323
0
{
2324
0
  return RegisterReporterHelper(aReporter, /* force = */ false,
2325
0
                                /* strong = */ true,
2326
0
                                /* async = */ true);
2327
0
}
2328
2329
NS_IMETHODIMP
2330
nsMemoryReporterManager::RegisterWeakReporter(nsIMemoryReporter* aReporter)
2331
33
{
2332
33
  return RegisterReporterHelper(aReporter, /* force = */ false,
2333
33
                                /* strong = */ false,
2334
33
                                /* async = */ false);
2335
33
}
2336
2337
NS_IMETHODIMP
2338
nsMemoryReporterManager::RegisterWeakAsyncReporter(nsIMemoryReporter* aReporter)
2339
0
{
2340
0
  return RegisterReporterHelper(aReporter, /* force = */ false,
2341
0
                                /* strong = */ false,
2342
0
                                /* async = */ true);
2343
0
}
2344
2345
NS_IMETHODIMP
2346
nsMemoryReporterManager::RegisterStrongReporterEvenIfBlocked(
2347
  nsIMemoryReporter* aReporter)
2348
0
{
2349
0
  return RegisterReporterHelper(aReporter, /* force = */ true,
2350
0
                                /* strong = */ true,
2351
0
                                /* async = */ false);
2352
0
}
2353
2354
NS_IMETHODIMP
2355
nsMemoryReporterManager::UnregisterStrongReporter(nsIMemoryReporter* aReporter)
2356
0
{
2357
0
  // This method is thread-safe.
2358
0
  mozilla::MutexAutoLock autoLock(mMutex);
2359
0
2360
0
  MOZ_ASSERT(!mWeakReporters->Contains(aReporter));
2361
0
2362
0
  if (mStrongReporters->Contains(aReporter)) {
2363
0
    mStrongReporters->Remove(aReporter);
2364
0
    return NS_OK;
2365
0
  }
2366
0
2367
0
  // We don't register new reporters when the block is in place, but we do
2368
0
  // unregister existing reporters. This is so we don't keep holding strong
2369
0
  // references that these reporters aren't expecting (which can keep them
2370
0
  // alive longer than intended).
2371
0
  if (mSavedStrongReporters && mSavedStrongReporters->Contains(aReporter)) {
2372
0
    mSavedStrongReporters->Remove(aReporter);
2373
0
    return NS_OK;
2374
0
  }
2375
0
2376
0
  return NS_ERROR_FAILURE;
2377
0
}
2378
2379
NS_IMETHODIMP
2380
nsMemoryReporterManager::UnregisterWeakReporter(nsIMemoryReporter* aReporter)
2381
0
{
2382
0
  // This method is thread-safe.
2383
0
  mozilla::MutexAutoLock autoLock(mMutex);
2384
0
2385
0
  MOZ_ASSERT(!mStrongReporters->Contains(aReporter));
2386
0
2387
0
  if (mWeakReporters->Contains(aReporter)) {
2388
0
    mWeakReporters->Remove(aReporter);
2389
0
    return NS_OK;
2390
0
  }
2391
0
2392
0
  // We don't register new reporters when the block is in place, but we do
2393
0
  // unregister existing reporters. This is so we don't keep holding weak
2394
0
  // references that the old reporters aren't expecting (which can end up as
2395
0
  // dangling pointers that lead to use-after-frees).
2396
0
  if (mSavedWeakReporters && mSavedWeakReporters->Contains(aReporter)) {
2397
0
    mSavedWeakReporters->Remove(aReporter);
2398
0
    return NS_OK;
2399
0
  }
2400
0
2401
0
  return NS_ERROR_FAILURE;
2402
0
}
2403
2404
NS_IMETHODIMP
2405
nsMemoryReporterManager::BlockRegistrationAndHideExistingReporters()
2406
0
{
2407
0
  // This method is thread-safe.
2408
0
  mozilla::MutexAutoLock autoLock(mMutex);
2409
0
  if (mIsRegistrationBlocked) {
2410
0
    return NS_ERROR_FAILURE;
2411
0
  }
2412
0
  mIsRegistrationBlocked = true;
2413
0
2414
0
  // Hide the existing reporters, saving them for later restoration.
2415
0
  MOZ_ASSERT(!mSavedStrongReporters);
2416
0
  MOZ_ASSERT(!mSavedWeakReporters);
2417
0
  mSavedStrongReporters = mStrongReporters;
2418
0
  mSavedWeakReporters = mWeakReporters;
2419
0
  mStrongReporters = new StrongReportersTable();
2420
0
  mWeakReporters = new WeakReportersTable();
2421
0
2422
0
  return NS_OK;
2423
0
}
2424
2425
NS_IMETHODIMP
2426
nsMemoryReporterManager::UnblockRegistrationAndRestoreOriginalReporters()
2427
0
{
2428
0
  // This method is thread-safe.
2429
0
  mozilla::MutexAutoLock autoLock(mMutex);
2430
0
  if (!mIsRegistrationBlocked) {
2431
0
    return NS_ERROR_FAILURE;
2432
0
  }
2433
0
2434
0
  // Banish the current reporters, and restore the hidden ones.
2435
0
  delete mStrongReporters;
2436
0
  delete mWeakReporters;
2437
0
  mStrongReporters = mSavedStrongReporters;
2438
0
  mWeakReporters = mSavedWeakReporters;
2439
0
  mSavedStrongReporters = nullptr;
2440
0
  mSavedWeakReporters = nullptr;
2441
0
2442
0
  mIsRegistrationBlocked = false;
2443
0
  return NS_OK;
2444
0
}
2445
2446
NS_IMETHODIMP
2447
nsMemoryReporterManager::GetVsize(int64_t* aVsize)
2448
0
{
2449
0
#ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2450
0
  return VsizeDistinguishedAmount(aVsize);
2451
#else
2452
  *aVsize = 0;
2453
  return NS_ERROR_NOT_AVAILABLE;
2454
#endif
2455
}
2456
2457
NS_IMETHODIMP
2458
nsMemoryReporterManager::GetVsizeMaxContiguous(int64_t* aAmount)
2459
0
{
2460
#ifdef HAVE_VSIZE_MAX_CONTIGUOUS_REPORTER
2461
  return VsizeMaxContiguousDistinguishedAmount(aAmount);
2462
#else
2463
  *aAmount = 0;
2464
0
  return NS_ERROR_NOT_AVAILABLE;
2465
0
#endif
2466
0
}
2467
2468
NS_IMETHODIMP
2469
nsMemoryReporterManager::GetResident(int64_t* aAmount)
2470
0
{
2471
0
#ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2472
0
  return ResidentDistinguishedAmount(aAmount);
2473
#else
2474
  *aAmount = 0;
2475
  return NS_ERROR_NOT_AVAILABLE;
2476
#endif
2477
}
2478
2479
NS_IMETHODIMP
2480
nsMemoryReporterManager::GetResidentFast(int64_t* aAmount)
2481
0
{
2482
0
#ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2483
0
  return ResidentFastDistinguishedAmount(aAmount);
2484
#else
2485
  *aAmount = 0;
2486
  return NS_ERROR_NOT_AVAILABLE;
2487
#endif
2488
}
2489
2490
/*static*/ int64_t
2491
nsMemoryReporterManager::ResidentFast()
2492
0
{
2493
0
#ifdef HAVE_VSIZE_AND_RESIDENT_REPORTERS
2494
0
  int64_t amount;
2495
0
  nsresult rv = ResidentFastDistinguishedAmount(&amount);
2496
0
  NS_ENSURE_SUCCESS(rv, 0);
2497
0
  return amount;
2498
#else
2499
  return 0;
2500
#endif
2501
}
2502
2503
NS_IMETHODIMP
2504
nsMemoryReporterManager::GetResidentPeak(int64_t* aAmount)
2505
0
{
2506
0
#ifdef HAVE_RESIDENT_PEAK_REPORTER
2507
0
  return ResidentPeakDistinguishedAmount(aAmount);
2508
#else
2509
  *aAmount = 0;
2510
  return NS_ERROR_NOT_AVAILABLE;
2511
#endif
2512
}
2513
2514
/*static*/ int64_t
2515
nsMemoryReporterManager::ResidentPeak()
2516
0
{
2517
0
#ifdef HAVE_RESIDENT_PEAK_REPORTER
2518
0
  int64_t amount = 0;
2519
0
  nsresult rv = ResidentPeakDistinguishedAmount(&amount);
2520
0
  NS_ENSURE_SUCCESS(rv, 0);
2521
0
  return amount;
2522
#else
2523
  return 0;
2524
#endif
2525
}
2526
2527
NS_IMETHODIMP
2528
nsMemoryReporterManager::GetResidentUnique(int64_t* aAmount)
2529
0
{
2530
0
#ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2531
0
  return ResidentUniqueDistinguishedAmount(aAmount);
2532
#else
2533
  *aAmount = 0;
2534
  return NS_ERROR_NOT_AVAILABLE;
2535
#endif
2536
}
2537
2538
/*static*/ int64_t
2539
nsMemoryReporterManager::ResidentUnique()
2540
0
{
2541
0
#ifdef HAVE_RESIDENT_UNIQUE_REPORTER
2542
0
  int64_t amount = 0;
2543
0
  nsresult rv = ResidentUniqueDistinguishedAmount(&amount);
2544
0
  NS_ENSURE_SUCCESS(rv, 0);
2545
0
  return amount;
2546
#else
2547
  return 0;
2548
#endif
2549
}
2550
2551
NS_IMETHODIMP
2552
nsMemoryReporterManager::GetHeapAllocated(int64_t* aAmount)
2553
0
{
2554
#ifdef HAVE_JEMALLOC_STATS
2555
  jemalloc_stats_t stats;
2556
  jemalloc_stats(&stats);
2557
  *aAmount = stats.allocated;
2558
  return NS_OK;
2559
#else
2560
  *aAmount = 0;
2561
0
  return NS_ERROR_NOT_AVAILABLE;
2562
0
#endif
2563
0
}
2564
2565
NS_IMETHODIMP
2566
nsMemoryReporterManager::GetHeapAllocatedAsync(nsIHeapAllocatedCallback *aCallback)
2567
0
{
2568
#ifdef HAVE_JEMALLOC_STATS
2569
  if (!mThreadPool) {
2570
    return NS_ERROR_UNEXPECTED;
2571
  }
2572
2573
  RefPtr<nsIMemoryReporterManager> self{this};
2574
  nsMainThreadPtrHandle<nsIHeapAllocatedCallback> mainThreadCallback(
2575
    new nsMainThreadPtrHolder<nsIHeapAllocatedCallback>("HeapAllocatedCallback",
2576
                                                        aCallback));
2577
2578
  nsCOMPtr<nsIRunnable> getHeapAllocatedRunnable = NS_NewRunnableFunction(
2579
    "nsMemoryReporterManager::GetHeapAllocatedAsync",
2580
    [self, mainThreadCallback]() mutable {
2581
      MOZ_ASSERT(!NS_IsMainThread());
2582
2583
      int64_t heapAllocated = 0;
2584
      nsresult rv = self->GetHeapAllocated(&heapAllocated);
2585
2586
      nsCOMPtr<nsIRunnable> resultCallbackRunnable = NS_NewRunnableFunction(
2587
        "nsMemoryReporterManager::GetHeapAllocatedAsync",
2588
        [mainThreadCallback, heapAllocated, rv]() mutable {
2589
          MOZ_ASSERT(NS_IsMainThread());
2590
2591
          if (NS_FAILED(rv)) {
2592
            mainThreadCallback->Callback(0);
2593
            return;
2594
          }
2595
2596
          mainThreadCallback->Callback(heapAllocated);
2597
        });  // resultCallbackRunnable.
2598
2599
      Unused << NS_DispatchToMainThread(resultCallbackRunnable);
2600
    }); // getHeapAllocatedRunnable.
2601
2602
  return mThreadPool->Dispatch(getHeapAllocatedRunnable, NS_DISPATCH_NORMAL);
2603
#else
2604
  return NS_ERROR_NOT_AVAILABLE;
2605
0
#endif
2606
0
}
2607
2608
// This has UNITS_PERCENTAGE, so it is multiplied by 100x.
2609
NS_IMETHODIMP
2610
nsMemoryReporterManager::GetHeapOverheadFraction(int64_t* aAmount)
2611
0
{
2612
#ifdef HAVE_JEMALLOC_STATS
2613
  jemalloc_stats_t stats;
2614
  jemalloc_stats(&stats);
2615
  *aAmount = HeapOverheadFraction(&stats);
2616
  return NS_OK;
2617
#else
2618
  *aAmount = 0;
2619
0
  return NS_ERROR_NOT_AVAILABLE;
2620
0
#endif
2621
0
}
2622
2623
static MOZ_MUST_USE nsresult
2624
GetInfallibleAmount(InfallibleAmountFn aAmountFn, int64_t* aAmount)
2625
0
{
2626
0
  if (aAmountFn) {
2627
0
    *aAmount = aAmountFn();
2628
0
    return NS_OK;
2629
0
  }
2630
0
  *aAmount = 0;
2631
0
  return NS_ERROR_NOT_AVAILABLE;
2632
0
}
2633
2634
NS_IMETHODIMP
2635
nsMemoryReporterManager::GetJSMainRuntimeGCHeap(int64_t* aAmount)
2636
0
{
2637
0
  return GetInfallibleAmount(mAmountFns.mJSMainRuntimeGCHeap, aAmount);
2638
0
}
2639
2640
NS_IMETHODIMP
2641
nsMemoryReporterManager::GetJSMainRuntimeTemporaryPeak(int64_t* aAmount)
2642
0
{
2643
0
  return GetInfallibleAmount(mAmountFns.mJSMainRuntimeTemporaryPeak, aAmount);
2644
0
}
2645
2646
NS_IMETHODIMP
2647
nsMemoryReporterManager::GetJSMainRuntimeRealmsSystem(int64_t* aAmount)
2648
0
{
2649
0
  return GetInfallibleAmount(mAmountFns.mJSMainRuntimeRealmsSystem,
2650
0
                             aAmount);
2651
0
}
2652
2653
NS_IMETHODIMP
2654
nsMemoryReporterManager::GetJSMainRuntimeRealmsUser(int64_t* aAmount)
2655
0
{
2656
0
  return GetInfallibleAmount(mAmountFns.mJSMainRuntimeRealmsUser,
2657
0
                             aAmount);
2658
0
}
2659
2660
NS_IMETHODIMP
2661
nsMemoryReporterManager::GetImagesContentUsedUncompressed(int64_t* aAmount)
2662
0
{
2663
0
  return GetInfallibleAmount(mAmountFns.mImagesContentUsedUncompressed,
2664
0
                             aAmount);
2665
0
}
2666
2667
NS_IMETHODIMP
2668
nsMemoryReporterManager::GetStorageSQLite(int64_t* aAmount)
2669
0
{
2670
0
  return GetInfallibleAmount(mAmountFns.mStorageSQLite, aAmount);
2671
0
}
2672
2673
NS_IMETHODIMP
2674
nsMemoryReporterManager::GetLowMemoryEventsVirtual(int64_t* aAmount)
2675
0
{
2676
0
  return GetInfallibleAmount(mAmountFns.mLowMemoryEventsVirtual, aAmount);
2677
0
}
2678
2679
NS_IMETHODIMP
2680
nsMemoryReporterManager::GetLowMemoryEventsCommitSpace(int64_t* aAmount)
2681
0
{
2682
0
  return GetInfallibleAmount(mAmountFns.mLowMemoryEventsCommitSpace, aAmount);
2683
0
}
2684
2685
NS_IMETHODIMP
2686
nsMemoryReporterManager::GetLowMemoryEventsPhysical(int64_t* aAmount)
2687
0
{
2688
0
  return GetInfallibleAmount(mAmountFns.mLowMemoryEventsPhysical, aAmount);
2689
0
}
2690
2691
NS_IMETHODIMP
2692
nsMemoryReporterManager::GetGhostWindows(int64_t* aAmount)
2693
0
{
2694
0
  return GetInfallibleAmount(mAmountFns.mGhostWindows, aAmount);
2695
0
}
2696
2697
NS_IMETHODIMP
2698
nsMemoryReporterManager::GetPageFaultsHard(int64_t* aAmount)
2699
0
{
2700
0
#ifdef HAVE_PAGE_FAULT_REPORTERS
2701
0
  return PageFaultsHardDistinguishedAmount(aAmount);
2702
#else
2703
  *aAmount = 0;
2704
  return NS_ERROR_NOT_AVAILABLE;
2705
#endif
2706
}
2707
2708
NS_IMETHODIMP
2709
nsMemoryReporterManager::GetHasMozMallocUsableSize(bool* aHas)
2710
0
{
2711
0
  void* p = malloc(16);
2712
0
  if (!p) {
2713
0
    return NS_ERROR_OUT_OF_MEMORY;
2714
0
  }
2715
0
  size_t usable = moz_malloc_usable_size(p);
2716
0
  free(p);
2717
0
  *aHas = !!(usable > 0);
2718
0
  return NS_OK;
2719
0
}
2720
2721
NS_IMETHODIMP
2722
nsMemoryReporterManager::GetIsDMDEnabled(bool* aIsEnabled)
2723
0
{
2724
#ifdef MOZ_DMD
2725
  *aIsEnabled = true;
2726
#else
2727
  *aIsEnabled = false;
2728
0
#endif
2729
0
  return NS_OK;
2730
0
}
2731
2732
NS_IMETHODIMP
2733
nsMemoryReporterManager::GetIsDMDRunning(bool* aIsRunning)
2734
0
{
2735
#ifdef MOZ_DMD
2736
  *aIsRunning = dmd::IsRunning();
2737
#else
2738
  *aIsRunning = false;
2739
0
#endif
2740
0
  return NS_OK;
2741
0
}
2742
2743
namespace {
2744
2745
/**
2746
 * This runnable lets us implement
2747
 * nsIMemoryReporterManager::MinimizeMemoryUsage().  We fire a heap-minimize
2748
 * notification, spin the event loop, and repeat this process a few times.
2749
 *
2750
 * When this sequence finishes, we invoke the callback function passed to the
2751
 * runnable's constructor.
2752
 */
2753
class MinimizeMemoryUsageRunnable : public Runnable
2754
{
2755
public:
2756
  explicit MinimizeMemoryUsageRunnable(nsIRunnable* aCallback)
2757
    : mozilla::Runnable("MinimizeMemoryUsageRunnable")
2758
    , mCallback(aCallback)
2759
    , mRemainingIters(sNumIters)
2760
0
  {
2761
0
  }
2762
2763
  NS_IMETHOD Run() override
2764
0
  {
2765
0
    nsCOMPtr<nsIObserverService> os = services::GetObserverService();
2766
0
    if (!os) {
2767
0
      return NS_ERROR_FAILURE;
2768
0
    }
2769
0
2770
0
    if (mRemainingIters == 0) {
2771
0
      os->NotifyObservers(nullptr, "after-minimize-memory-usage",
2772
0
                          u"MinimizeMemoryUsageRunnable");
2773
0
      if (mCallback) {
2774
0
        mCallback->Run();
2775
0
      }
2776
0
      return NS_OK;
2777
0
    }
2778
0
2779
0
    os->NotifyObservers(nullptr, "memory-pressure", u"heap-minimize");
2780
0
    mRemainingIters--;
2781
0
    NS_DispatchToMainThread(this);
2782
0
2783
0
    return NS_OK;
2784
0
  }
2785
2786
private:
2787
  // Send sNumIters heap-minimize notifications, spinning the event
2788
  // loop after each notification (see bug 610166 comment 12 for an
2789
  // explanation), because one notification doesn't cut it.
2790
  static const uint32_t sNumIters = 3;
2791
2792
  nsCOMPtr<nsIRunnable> mCallback;
2793
  uint32_t mRemainingIters;
2794
};
2795
2796
} // namespace
2797
2798
NS_IMETHODIMP
2799
nsMemoryReporterManager::MinimizeMemoryUsage(nsIRunnable* aCallback)
2800
0
{
2801
0
  RefPtr<MinimizeMemoryUsageRunnable> runnable =
2802
0
    new MinimizeMemoryUsageRunnable(aCallback);
2803
0
2804
0
  return NS_DispatchToMainThread(runnable);
2805
0
}
2806
2807
NS_IMETHODIMP
2808
nsMemoryReporterManager::SizeOfTab(mozIDOMWindowProxy* aTopWindow,
2809
                                   int64_t* aJSObjectsSize,
2810
                                   int64_t* aJSStringsSize,
2811
                                   int64_t* aJSOtherSize,
2812
                                   int64_t* aDomSize,
2813
                                   int64_t* aStyleSize,
2814
                                   int64_t* aOtherSize,
2815
                                   int64_t* aTotalSize,
2816
                                   double*  aJSMilliseconds,
2817
                                   double*  aNonJSMilliseconds)
2818
0
{
2819
0
  nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aTopWindow);
2820
0
  auto* piWindow = nsPIDOMWindowOuter::From(aTopWindow);
2821
0
  if (NS_WARN_IF(!global) || NS_WARN_IF(!piWindow)) {
2822
0
    return NS_ERROR_FAILURE;
2823
0
  }
2824
0
2825
0
  TimeStamp t1 = TimeStamp::Now();
2826
0
2827
0
  // Measure JS memory consumption (and possibly some non-JS consumption, via
2828
0
  // |jsPrivateSize|).
2829
0
  size_t jsObjectsSize, jsStringsSize, jsPrivateSize, jsOtherSize;
2830
0
  nsresult rv = mSizeOfTabFns.mJS(global->GetGlobalJSObject(),
2831
0
                                  &jsObjectsSize, &jsStringsSize,
2832
0
                                  &jsPrivateSize, &jsOtherSize);
2833
0
  if (NS_WARN_IF(NS_FAILED(rv))) {
2834
0
    return rv;
2835
0
  }
2836
0
2837
0
  TimeStamp t2 = TimeStamp::Now();
2838
0
2839
0
  // Measure non-JS memory consumption.
2840
0
  size_t domSize, styleSize, otherSize;
2841
0
  rv = mSizeOfTabFns.mNonJS(piWindow, &domSize, &styleSize, &otherSize);
2842
0
  if (NS_WARN_IF(NS_FAILED(rv))) {
2843
0
    return rv;
2844
0
  }
2845
0
2846
0
  TimeStamp t3 = TimeStamp::Now();
2847
0
2848
0
  *aTotalSize = 0;
2849
0
#define DO(aN, n) { *aN = (n); *aTotalSize += (n); }
2850
0
  DO(aJSObjectsSize, jsObjectsSize);
2851
0
  DO(aJSStringsSize, jsStringsSize);
2852
0
  DO(aJSOtherSize,   jsOtherSize);
2853
0
  DO(aDomSize,       jsPrivateSize + domSize);
2854
0
  DO(aStyleSize,     styleSize);
2855
0
  DO(aOtherSize,     otherSize);
2856
0
#undef DO
2857
0
2858
0
  *aJSMilliseconds    = (t2 - t1).ToMilliseconds();
2859
0
  *aNonJSMilliseconds = (t3 - t2).ToMilliseconds();
2860
0
2861
0
  return NS_OK;
2862
0
}
2863
2864
namespace mozilla {
2865
2866
#define GET_MEMORY_REPORTER_MANAGER(mgr)                                      \
2867
73
  RefPtr<nsMemoryReporterManager> mgr =                                       \
2868
73
    nsMemoryReporterManager::GetOrCreate();                                   \
2869
73
  if (!mgr) {                                                                 \
2870
0
    return NS_ERROR_FAILURE;                                                  \
2871
0
  }
2872
2873
nsresult
2874
RegisterStrongMemoryReporter(nsIMemoryReporter* aReporter)
2875
22
{
2876
22
  // Hold a strong reference to the argument to make sure it gets released if
2877
22
  // we return early below.
2878
22
  nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2879
22
  GET_MEMORY_REPORTER_MANAGER(mgr)
2880
22
  return mgr->RegisterStrongReporter(reporter);
2881
22
}
2882
2883
nsresult
2884
RegisterStrongAsyncMemoryReporter(nsIMemoryReporter* aReporter)
2885
0
{
2886
0
  // Hold a strong reference to the argument to make sure it gets released if
2887
0
  // we return early below.
2888
0
  nsCOMPtr<nsIMemoryReporter> reporter = aReporter;
2889
0
  GET_MEMORY_REPORTER_MANAGER(mgr)
2890
0
  return mgr->RegisterStrongAsyncReporter(reporter);
2891
0
}
2892
2893
nsresult
2894
RegisterWeakMemoryReporter(nsIMemoryReporter* aReporter)
2895
30
{
2896
30
  GET_MEMORY_REPORTER_MANAGER(mgr)
2897
30
  return mgr->RegisterWeakReporter(aReporter);
2898
30
}
2899
2900
nsresult
2901
RegisterWeakAsyncMemoryReporter(nsIMemoryReporter* aReporter)
2902
0
{
2903
0
  GET_MEMORY_REPORTER_MANAGER(mgr)
2904
0
  return mgr->RegisterWeakAsyncReporter(aReporter);
2905
0
}
2906
2907
nsresult
2908
UnregisterStrongMemoryReporter(nsIMemoryReporter* aReporter)
2909
0
{
2910
0
  GET_MEMORY_REPORTER_MANAGER(mgr)
2911
0
  return mgr->UnregisterStrongReporter(aReporter);
2912
0
}
2913
2914
nsresult
2915
UnregisterWeakMemoryReporter(nsIMemoryReporter* aReporter)
2916
0
{
2917
0
  GET_MEMORY_REPORTER_MANAGER(mgr)
2918
0
  return mgr->UnregisterWeakReporter(aReporter);
2919
0
}
2920
2921
// Macro for generating functions that register distinguished amount functions
2922
// with the memory reporter manager.
2923
#define DEFINE_REGISTER_DISTINGUISHED_AMOUNT(kind, name)                      \
2924
  nsresult                                                                    \
2925
  Register##name##DistinguishedAmount(kind##AmountFn aAmountFn)               \
2926
15
  {                                                                           \
2927
15
    GET_MEMORY_REPORTER_MANAGER(mgr)                                          \
2928
15
    mgr->mAmountFns.m##name = aAmountFn;                                      \
2929
15
    return NS_OK;                                                             \
2930
15
  }
mozilla::RegisterJSMainRuntimeGCHeapDistinguishedAmount(long (*)())
Line
Count
Source
2926
3
  {                                                                           \
2927
3
    GET_MEMORY_REPORTER_MANAGER(mgr)                                          \
2928
3
    mgr->mAmountFns.m##name = aAmountFn;                                      \
2929
3
    return NS_OK;                                                             \
2930
3
  }
mozilla::RegisterJSMainRuntimeTemporaryPeakDistinguishedAmount(long (*)())
Line
Count
Source
2926
3
  {                                                                           \
2927
3
    GET_MEMORY_REPORTER_MANAGER(mgr)                                          \
2928
3
    mgr->mAmountFns.m##name = aAmountFn;                                      \
2929
3
    return NS_OK;                                                             \
2930
3
  }
mozilla::RegisterJSMainRuntimeRealmsSystemDistinguishedAmount(long (*)())
Line
Count
Source
2926
3
  {                                                                           \
2927
3
    GET_MEMORY_REPORTER_MANAGER(mgr)                                          \
2928
3
    mgr->mAmountFns.m##name = aAmountFn;                                      \
2929
3
    return NS_OK;                                                             \
2930
3
  }
mozilla::RegisterJSMainRuntimeRealmsUserDistinguishedAmount(long (*)())
Line
Count
Source
2926
3
  {                                                                           \
2927
3
    GET_MEMORY_REPORTER_MANAGER(mgr)                                          \
2928
3
    mgr->mAmountFns.m##name = aAmountFn;                                      \
2929
3
    return NS_OK;                                                             \
2930
3
  }
Unexecuted instantiation: mozilla::RegisterImagesContentUsedUncompressedDistinguishedAmount(long (*)())
Unexecuted instantiation: mozilla::RegisterStorageSQLiteDistinguishedAmount(long (*)())
Unexecuted instantiation: mozilla::RegisterLowMemoryEventsVirtualDistinguishedAmount(long (*)())
Unexecuted instantiation: mozilla::RegisterLowMemoryEventsCommitSpaceDistinguishedAmount(long (*)())
Unexecuted instantiation: mozilla::RegisterLowMemoryEventsPhysicalDistinguishedAmount(long (*)())
mozilla::RegisterGhostWindowsDistinguishedAmount(long (*)())
Line
Count
Source
2926
3
  {                                                                           \
2927
3
    GET_MEMORY_REPORTER_MANAGER(mgr)                                          \
2928
3
    mgr->mAmountFns.m##name = aAmountFn;                                      \
2929
3
    return NS_OK;                                                             \
2930
3
  }
2931
2932
// Macro for generating functions that unregister distinguished amount
2933
// functions with the memory reporter manager.
2934
#define DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(name)                          \
2935
  nsresult                                                                    \
2936
  Unregister##name##DistinguishedAmount()                                     \
2937
0
  {                                                                           \
2938
0
    GET_MEMORY_REPORTER_MANAGER(mgr)                                          \
2939
0
    mgr->mAmountFns.m##name = nullptr;                                        \
2940
0
    return NS_OK;                                                             \
2941
0
  }
Unexecuted instantiation: mozilla::UnregisterImagesContentUsedUncompressedDistinguishedAmount()
Unexecuted instantiation: mozilla::UnregisterStorageSQLiteDistinguishedAmount()
2942
2943
DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeGCHeap)
2944
DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeTemporaryPeak)
2945
DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeRealmsSystem)
2946
DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, JSMainRuntimeRealmsUser)
2947
2948
DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, ImagesContentUsedUncompressed)
2949
DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(ImagesContentUsedUncompressed)
2950
2951
DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, StorageSQLite)
2952
DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT(StorageSQLite)
2953
2954
DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, LowMemoryEventsVirtual)
2955
DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, LowMemoryEventsCommitSpace)
2956
DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, LowMemoryEventsPhysical)
2957
2958
DEFINE_REGISTER_DISTINGUISHED_AMOUNT(Infallible, GhostWindows)
2959
2960
#undef DEFINE_REGISTER_DISTINGUISHED_AMOUNT
2961
#undef DEFINE_UNREGISTER_DISTINGUISHED_AMOUNT
2962
2963
#define DEFINE_REGISTER_SIZE_OF_TAB(name)                                     \
2964
  nsresult                                                                    \
2965
  Register##name##SizeOfTab(name##SizeOfTabFn aSizeOfTabFn)                   \
2966
6
  {                                                                           \
2967
6
    GET_MEMORY_REPORTER_MANAGER(mgr)                                          \
2968
6
    mgr->mSizeOfTabFns.m##name = aSizeOfTabFn;                                \
2969
6
    return NS_OK;                                                             \
2970
6
  }
mozilla::RegisterJSSizeOfTab(nsresult (*)(JSObject*, unsigned long*, unsigned long*, unsigned long*, unsigned long*))
Line
Count
Source
2966
3
  {                                                                           \
2967
3
    GET_MEMORY_REPORTER_MANAGER(mgr)                                          \
2968
3
    mgr->mSizeOfTabFns.m##name = aSizeOfTabFn;                                \
2969
3
    return NS_OK;                                                             \
2970
3
  }
mozilla::RegisterNonJSSizeOfTab(nsresult (*)(nsPIDOMWindowOuter*, unsigned long*, unsigned long*, unsigned long*))
Line
Count
Source
2966
3
  {                                                                           \
2967
3
    GET_MEMORY_REPORTER_MANAGER(mgr)                                          \
2968
3
    mgr->mSizeOfTabFns.m##name = aSizeOfTabFn;                                \
2969
3
    return NS_OK;                                                             \
2970
3
  }
2971
2972
DEFINE_REGISTER_SIZE_OF_TAB(JS);
2973
DEFINE_REGISTER_SIZE_OF_TAB(NonJS);
2974
2975
#undef DEFINE_REGISTER_SIZE_OF_TAB
2976
2977
#undef GET_MEMORY_REPORTER_MANAGER
2978
2979
} // namespace mozilla