Coverage Report

Created: 2018-09-25 14:53

/src/mozilla-central/gfx/layers/mlgpu/SharedBufferMLGPU.cpp
Line
Count
Source (jump to first uncovered line)
1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3
/* This Source Code Form is subject to the terms of the Mozilla Public
4
 * License, v. 2.0. If a copy of the MPL was not distributed with this
5
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "SharedBufferMLGPU.h"
8
#include "BufferCache.h"
9
#include "MLGDevice.h"
10
11
using namespace std;
12
13
namespace mozilla {
14
namespace layers {
15
16
SharedBufferMLGPU::SharedBufferMLGPU(MLGDevice* aDevice, MLGBufferType aType, size_t aDefaultSize)
17
 : mDevice(aDevice),
18
   mType(aType),
19
   mDefaultSize(aDefaultSize),
20
   mCanUseOffsetAllocation(true),
21
   mCurrentPosition(0),
22
   mMaxSize(0),
23
   mMap(),
24
   mMapped(false),
25
   mBytesUsedThisFrame(0),
26
   mNumSmallFrames(0)
27
0
{
28
0
  MOZ_COUNT_CTOR(SharedBufferMLGPU);
29
0
}
30
31
SharedBufferMLGPU::~SharedBufferMLGPU()
32
0
{
33
0
  MOZ_COUNT_DTOR(SharedBufferMLGPU);
34
0
  Unmap();
35
0
}
36
37
bool
38
SharedBufferMLGPU::Init()
39
0
{
40
0
  // If we can't use buffer offset binding, we never allocated shared buffers.
41
0
  if (!mCanUseOffsetAllocation) {
42
0
    return true;
43
0
  }
44
0
45
0
  // If we can use offset binding, allocate an initial shared buffer now.
46
0
  if (!GrowBuffer(mDefaultSize)) {
47
0
    return false;
48
0
  }
49
0
  return true;
50
0
}
51
52
void
53
SharedBufferMLGPU::Reset()
54
0
{
55
0
  // We shouldn't be mapped here, but just in case, unmap now.
56
0
  Unmap();
57
0
  mBytesUsedThisFrame = 0;
58
0
59
0
  // If we allocated a large buffer for a particularly heavy layer tree,
60
0
  // but have not used most of the buffer again for many frames, we
61
0
  // discard the buffer. This is to prevent having to perform large
62
0
  // pointless uploads after visiting a single havy page - it also
63
0
  // lessens ping-ponging between large and small buffers.
64
0
  if (mBuffer &&
65
0
      (mBuffer->GetSize() > mDefaultSize * 4) &&
66
0
      mNumSmallFrames >= 10)
67
0
  {
68
0
    mBuffer = nullptr;
69
0
  }
70
0
71
0
  // Note that we do not aggressively map a new buffer. There's no reason to,
72
0
  // and it'd cause unnecessary uploads when painting empty frames.
73
0
}
74
75
bool
76
SharedBufferMLGPU::EnsureMappedBuffer(size_t aBytes)
77
0
{
78
0
  if (!mBuffer || (mMaxSize - mCurrentPosition < aBytes)) {
79
0
    if (!GrowBuffer(aBytes)) {
80
0
      return false;
81
0
    }
82
0
  }
83
0
  if (!mMapped && !Map()) {
84
0
    return false;
85
0
  }
86
0
  return true;
87
0
}
88
89
// We don't want to cache large buffers, since it results in larger uploads
90
// that might not be needed.
91
static const size_t kMaxCachedBufferSize = 128 * 1024;
92
93
bool
94
SharedBufferMLGPU::GrowBuffer(size_t aBytes)
95
0
{
96
0
  // We only pre-allocate buffers if we can use offset allocation.
97
0
  MOZ_ASSERT(mCanUseOffsetAllocation);
98
0
99
0
  // Unmap the previous buffer. This will retain mBuffer, but free up the
100
0
  // address space used by its mapping.
101
0
  Unmap();
102
0
103
0
  size_t maybeSize = mDefaultSize;
104
0
  if (mBuffer) {
105
0
    // Try to first grow the previous allocation size.
106
0
    maybeSize = std::min(kMaxCachedBufferSize, mBuffer->GetSize() * 2);
107
0
  }
108
0
109
0
  size_t bytes = std::max(aBytes, maybeSize);
110
0
  mBuffer = mDevice->CreateBuffer(mType, bytes, MLGUsage::Dynamic);
111
0
  if (!mBuffer) {
112
0
    return false;
113
0
  }
114
0
115
0
  mCurrentPosition = 0;
116
0
  mMaxSize = mBuffer->GetSize();
117
0
  return true;
118
0
}
119
120
void
121
SharedBufferMLGPU::PrepareForUsage()
122
0
{
123
0
  Unmap();
124
0
125
0
  if (mBytesUsedThisFrame <= mDefaultSize) {
126
0
    mNumSmallFrames++;
127
0
  } else {
128
0
    mNumSmallFrames = 0;
129
0
  }
130
0
}
131
132
bool
133
SharedBufferMLGPU::Map()
134
0
{
135
0
  MOZ_ASSERT(mBuffer);
136
0
  MOZ_ASSERT(!mMapped);
137
0
138
0
  if (!mDevice->Map(mBuffer, MLGMapType::WRITE_DISCARD, &mMap)) {
139
0
    // Don't retain the buffer, it's useless if we can't map it.
140
0
    mBuffer = nullptr;
141
0
    return false;
142
0
  }
143
0
144
0
  mCurrentPosition = 0;
145
0
  mMapped = true;
146
0
  return true;
147
0
}
148
149
void
150
SharedBufferMLGPU::Unmap()
151
0
{
152
0
  if (!mMapped) {
153
0
    return;
154
0
  }
155
0
156
0
  mBytesUsedThisFrame += mCurrentPosition;
157
0
158
0
  mDevice->Unmap(mBuffer);
159
0
  mMap = MLGMappedResource();
160
0
  mMapped = false;
161
0
}
162
163
uint8_t*
164
SharedBufferMLGPU::GetBufferPointer(size_t aBytes, ptrdiff_t* aOutOffset, RefPtr<MLGBuffer>* aOutBuffer)
165
0
{
166
0
  if (!EnsureMappedBuffer(aBytes)) {
167
0
    return nullptr;
168
0
  }
169
0
170
0
  ptrdiff_t newPos = mCurrentPosition + aBytes;
171
0
  MOZ_ASSERT(size_t(newPos) <= mMaxSize);
172
0
173
0
  *aOutOffset = mCurrentPosition;
174
0
  *aOutBuffer = mBuffer;
175
0
176
0
  uint8_t* ptr = reinterpret_cast<uint8_t*>(mMap.mData) + mCurrentPosition;
177
0
  mCurrentPosition = newPos;
178
0
  return ptr;
179
0
}
180
181
VertexBufferSection::VertexBufferSection()
182
 : mOffset(-1),
183
   mNumVertices(0),
184
   mStride(0)
185
0
{}
186
187
void
188
VertexBufferSection::Init(MLGBuffer* aBuffer, ptrdiff_t aOffset, size_t aNumVertices, size_t aStride)
189
0
{
190
0
  mBuffer = aBuffer;
191
0
  mOffset = aOffset;
192
0
  mNumVertices = aNumVertices;
193
0
  mStride = aStride;
194
0
}
195
196
ConstantBufferSection::ConstantBufferSection()
197
 : mOffset(-1),
198
   mNumBytes(0),
199
   mNumItems(0)
200
0
{}
201
202
void
203
ConstantBufferSection::Init(MLGBuffer* aBuffer, ptrdiff_t aOffset, size_t aBytes, size_t aNumItems)
204
0
{
205
0
  mBuffer = aBuffer;
206
0
  mOffset = aOffset;
207
0
  mNumBytes = aBytes;
208
0
  mNumItems = aNumItems;
209
0
}
210
211
SharedVertexBuffer::SharedVertexBuffer(MLGDevice* aDevice, size_t aDefaultSize)
212
 : SharedBufferMLGPU(aDevice, MLGBufferType::Vertex, aDefaultSize)
213
0
{
214
0
}
215
216
bool
217
SharedVertexBuffer::Allocate(VertexBufferSection* aHolder,
218
                             size_t aNumItems,
219
                             size_t aSizeOfItem,
220
                             const void* aData)
221
0
{
222
0
  RefPtr<MLGBuffer> buffer;
223
0
  ptrdiff_t offset;
224
0
  size_t bytes = aSizeOfItem * aNumItems;
225
0
  uint8_t* ptr = GetBufferPointer(bytes, &offset, &buffer);
226
0
  if (!ptr) {
227
0
    return false;
228
0
  }
229
0
230
0
  memcpy(ptr, aData, bytes);
231
0
  aHolder->Init(buffer, offset, aNumItems, aSizeOfItem);
232
0
  return true;
233
0
}
234
235
AutoBufferUploadBase::AutoBufferUploadBase()
236
  : mPtr(nullptr)
237
0
{
238
0
}
239
240
AutoBufferUploadBase::~AutoBufferUploadBase()
241
0
{
242
0
  if (mBuffer) {
243
0
    UnmapBuffer();
244
0
  }
245
0
}
246
247
void
248
AutoBufferUploadBase::Init(void* aPtr, MLGDevice* aDevice, MLGBuffer* aBuffer)
249
0
{
250
0
  MOZ_ASSERT(!mPtr && aPtr);
251
0
  mPtr = aPtr;
252
0
  mDevice = aDevice;
253
0
  mBuffer = aBuffer;
254
0
}
255
256
SharedConstantBuffer::SharedConstantBuffer(MLGDevice* aDevice, size_t aDefaultSize)
257
 : SharedBufferMLGPU(aDevice, MLGBufferType::Constant, aDefaultSize)
258
0
{
259
0
  mMaxConstantBufferBindSize = aDevice->GetMaxConstantBufferBindSize();
260
0
  mCanUseOffsetAllocation = aDevice->CanUseConstantBufferOffsetBinding();
261
0
}
262
263
bool
264
SharedConstantBuffer::Allocate(ConstantBufferSection* aHolder,
265
                               AutoBufferUploadBase* aPtr,
266
                               size_t aNumItems,
267
                               size_t aSizeOfItem)
268
0
{
269
0
  MOZ_ASSERT(aSizeOfItem % 16 == 0, "Items must be padded to 16 bytes");
270
0
271
0
  size_t bytes = aNumItems * aSizeOfItem;
272
0
  if (bytes > mMaxConstantBufferBindSize) {
273
0
    gfxWarning() << "Attempted to allocate too many bytes into a constant buffer";
274
0
    return false;
275
0
  }
276
0
277
0
  RefPtr<MLGBuffer> buffer;
278
0
  ptrdiff_t offset;
279
0
  if (!GetBufferPointer(aPtr, bytes, &offset, &buffer)) {
280
0
    return false;
281
0
  }
282
0
283
0
  aHolder->Init(buffer, offset, bytes, aNumItems);
284
0
  return true;
285
0
}
286
287
uint8_t*
288
SharedConstantBuffer::AllocateNewBuffer(size_t aBytes, ptrdiff_t* aOutOffset, RefPtr<MLGBuffer>* aOutBuffer)
289
0
{
290
0
  RefPtr<MLGBuffer> buffer;
291
0
  if (BufferCache* cache = mDevice->GetConstantBufferCache()) {
292
0
    buffer = cache->GetOrCreateBuffer(aBytes);
293
0
  } else {
294
0
    buffer = mDevice->CreateBuffer(MLGBufferType::Constant, aBytes, MLGUsage::Dynamic);
295
0
  }
296
0
  if (!buffer) {
297
0
    return nullptr;
298
0
  }
299
0
300
0
  MLGMappedResource map;
301
0
  if (!mDevice->Map(buffer, MLGMapType::WRITE_DISCARD, &map)) {
302
0
    return nullptr;
303
0
  }
304
0
305
0
  // Signal that offsetting is not supported.
306
0
  *aOutOffset = -1;
307
0
  *aOutBuffer = buffer;
308
0
  return reinterpret_cast<uint8_t*>(map.mData);
309
0
}
310
311
void
312
AutoBufferUploadBase::UnmapBuffer()
313
0
{
314
0
  mDevice->Unmap(mBuffer);
315
0
}
316
317
} // namespace layers
318
} // namespace mozilla