/src/skia/src/gpu/graphite/BufferManager.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2021 Google Inc. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license that can be |
5 | | * found in the LICENSE file. |
6 | | */ |
7 | | |
8 | | #include "src/gpu/graphite/BufferManager.h" |
9 | | |
10 | | #include "include/gpu/graphite/Recording.h" |
11 | | #include "src/gpu/graphite/Caps.h" |
12 | | #include "src/gpu/graphite/ContextPriv.h" |
13 | | #include "src/gpu/graphite/Log.h" |
14 | | #include "src/gpu/graphite/QueueManager.h" |
15 | | #include "src/gpu/graphite/RecordingPriv.h" |
16 | | #include "src/gpu/graphite/ResourceProvider.h" |
17 | | #include "src/gpu/graphite/SharedContext.h" |
18 | | #include "src/gpu/graphite/UploadBufferManager.h" |
19 | | #include "src/gpu/graphite/task/ClearBuffersTask.h" |
20 | | #include "src/gpu/graphite/task/CopyTask.h" |
21 | | #include "src/gpu/graphite/task/TaskList.h" |
22 | | |
23 | | #include <limits> |
24 | | |
25 | | namespace skgpu::graphite { |
26 | | |
27 | | namespace { |
28 | | |
29 | | // TODO: Tune these values on real world data |
30 | | static constexpr uint32_t kVertexBufferSize = 16 << 10; // 16 KB |
31 | | static constexpr uint32_t kIndexBufferSize = 2 << 10; // 2 KB |
32 | | static constexpr uint32_t kUniformBufferSize = 2 << 10; // 2 KB |
33 | | static constexpr uint32_t kStorageBufferSize = 2 << 10; // 2 KB |
34 | | |
35 | | // The limit for all data created by the StaticBufferManager. This data remains alive for |
36 | | // the entire SharedContext so we want to keep it small and give a concrete upper bound to |
37 | | // clients for our steady-state memory usage. |
38 | | // FIXME The current usage is 4732 bytes across static vertex and index buffers, but that includes |
39 | | // multiple copies of tessellation data, and an unoptimized AnalyticRRect mesh. Once those issues |
40 | | // are addressed, we can tighten this and decide on the transfer buffer sizing as well. |
41 | | [[maybe_unused]] static constexpr uint32_t kMaxStaticDataSize = 6 << 10; |
42 | | |
43 | 0 | uint32_t validate_count_and_stride(size_t count, size_t stride) { |
44 | | // size_t may just be uint32_t, so this ensures we have enough bits to do |
45 | | // compute the required byte product. |
46 | 0 | uint64_t count64 = SkTo<uint64_t>(count); |
47 | 0 | uint64_t stride64 = SkTo<uint64_t>(stride); |
48 | 0 | uint64_t bytes64 = count64*stride64; |
49 | 0 | if (count64 > std::numeric_limits<uint32_t>::max() || |
50 | 0 | stride64 > std::numeric_limits<uint32_t>::max() || |
51 | 0 | bytes64 > std::numeric_limits<uint32_t>::max()) { |
52 | | // Return 0 to skip further allocation attempts. |
53 | 0 | return 0; |
54 | 0 | } |
55 | | // Since count64 and stride64 fit into 32-bits, their product did not overflow, and the product |
56 | | // fits into 32-bits so this cast is safe. |
57 | 0 | return SkTo<uint32_t>(bytes64); |
58 | 0 | } |
59 | | |
60 | 0 | uint32_t validate_size(size_t requiredBytes) { |
61 | 0 | return validate_count_and_stride(1, requiredBytes); |
62 | 0 | } |
63 | | |
64 | 0 | uint32_t sufficient_block_size(uint32_t requiredBytes, uint32_t blockSize) { |
65 | | // Always request a buffer at least 'requiredBytes', but keep them in multiples of |
66 | | // 'blockSize' for improved reuse. |
67 | 0 | static constexpr uint32_t kMaxSize = std::numeric_limits<uint32_t>::max(); |
68 | 0 | uint32_t maxBlocks = kMaxSize / blockSize; |
69 | 0 | uint32_t blocks = (requiredBytes / blockSize) + 1; |
70 | 0 | uint32_t bufferSize = blocks > maxBlocks ? kMaxSize : (blocks * blockSize); |
71 | 0 | SkASSERT(requiredBytes < bufferSize); |
72 | 0 | return bufferSize; |
73 | 0 | } Unexecuted instantiation: BufferManager.cpp:skgpu::graphite::(anonymous namespace)::sufficient_block_size(unsigned int, unsigned int) Unexecuted instantiation: BufferManager.cpp:skgpu::graphite::(anonymous namespace)::sufficient_block_size(unsigned int, unsigned int) |
74 | | |
75 | | bool can_fit(uint32_t requestedSize, |
76 | | uint32_t allocatedSize, |
77 | | uint32_t currentOffset, |
78 | 0 | uint32_t alignment) { |
79 | 0 | uint32_t startOffset = SkAlignTo(currentOffset, alignment); |
80 | 0 | return requestedSize <= (allocatedSize - startOffset); |
81 | 0 | } |
82 | | |
83 | 0 | uint32_t starting_alignment(BufferType type, bool useTransferBuffers, const Caps* caps) { |
84 | | // Both vertex and index data is aligned to 4 bytes by default |
85 | 0 | uint32_t alignment = 4; |
86 | 0 | if (type == BufferType::kUniform) { |
87 | 0 | alignment = SkTo<uint32_t>(caps->requiredUniformBufferAlignment()); |
88 | 0 | } else if (type == BufferType::kStorage || type == BufferType::kVertexStorage || |
89 | 0 | type == BufferType::kIndexStorage || type == BufferType::kIndirect) { |
90 | 0 | alignment = SkTo<uint32_t>(caps->requiredStorageBufferAlignment()); |
91 | 0 | } |
92 | 0 | if (useTransferBuffers) { |
93 | 0 | alignment = std::max(alignment, SkTo<uint32_t>(caps->requiredTransferBufferAlignment())); |
94 | 0 | } |
95 | 0 | return alignment; |
96 | 0 | } |
97 | | |
98 | | } // anonymous namespace |
99 | | |
100 | | // ------------------------------------------------------------------------------------------------ |
101 | | // ScratchBuffer |
102 | | |
103 | | ScratchBuffer::ScratchBuffer(uint32_t size, uint32_t alignment, |
104 | | sk_sp<Buffer> buffer, DrawBufferManager* owner) |
105 | | : fSize(size) |
106 | | , fAlignment(alignment) |
107 | | , fBuffer(std::move(buffer)) |
108 | 0 | , fOwner(owner) { |
109 | 0 | SkASSERT(fSize > 0); |
110 | 0 | SkASSERT(fBuffer); |
111 | 0 | SkASSERT(fOwner); |
112 | 0 | SkASSERT(fSize <= fBuffer->size()); |
113 | 0 | } Unexecuted instantiation: skgpu::graphite::ScratchBuffer::ScratchBuffer(unsigned int, unsigned int, sk_sp<skgpu::graphite::Buffer>, skgpu::graphite::DrawBufferManager*) Unexecuted instantiation: skgpu::graphite::ScratchBuffer::ScratchBuffer(unsigned int, unsigned int, sk_sp<skgpu::graphite::Buffer>, skgpu::graphite::DrawBufferManager*) |
114 | | |
115 | 0 | ScratchBuffer::~ScratchBuffer() { this->returnToPool(); } |
116 | | |
117 | 0 | BindBufferInfo ScratchBuffer::suballocate(size_t requiredBytes) { |
118 | 0 | const uint32_t requiredBytes32 = validate_size(requiredBytes); |
119 | 0 | if (!this->isValid() || !requiredBytes32) { |
120 | 0 | return {}; |
121 | 0 | } |
122 | 0 | if (!can_fit(requiredBytes32, fSize, fOffset, fAlignment)) { |
123 | 0 | return {}; |
124 | 0 | } |
125 | 0 | const uint32_t offset = SkAlignTo(fOffset, fAlignment); |
126 | 0 | fOffset = offset + requiredBytes32; |
127 | 0 | return {fBuffer.get(), offset, requiredBytes32}; |
128 | 0 | } |
129 | | |
130 | 0 | void ScratchBuffer::returnToPool() { |
131 | 0 | if (fOwner && fBuffer) { |
132 | | // TODO: Generalize the pool to other buffer types. |
133 | 0 | fOwner->fReusableScratchStorageBuffers.push_back(std::move(fBuffer)); |
134 | 0 | SkASSERT(!fBuffer); |
135 | 0 | } |
136 | 0 | } Unexecuted instantiation: skgpu::graphite::ScratchBuffer::returnToPool() Unexecuted instantiation: skgpu::graphite::ScratchBuffer::returnToPool() |
137 | | |
138 | | // ------------------------------------------------------------------------------------------------ |
139 | | // DrawBufferManager |
140 | | |
141 | | DrawBufferManager::DrawBufferManager(ResourceProvider* resourceProvider, |
142 | | const Caps* caps, |
143 | | UploadBufferManager* uploadManager) |
144 | | : fResourceProvider(resourceProvider) |
145 | | , fCaps(caps) |
146 | | , fUploadManager(uploadManager) |
147 | | , fCurrentBuffers{{ |
148 | | { BufferType::kVertex, kVertexBufferSize, caps }, |
149 | | { BufferType::kIndex, kIndexBufferSize, caps }, |
150 | | { BufferType::kUniform, kUniformBufferSize, caps }, |
151 | | { BufferType::kStorage, kStorageBufferSize, caps }, // mapped storage |
152 | | { BufferType::kStorage, kStorageBufferSize, caps }, // GPU-only storage |
153 | | { BufferType::kVertexStorage, kVertexBufferSize, caps }, |
154 | | { BufferType::kIndexStorage, kIndexBufferSize, caps }, |
155 | 0 | { BufferType::kIndirect, kStorageBufferSize, caps } }} {} |
156 | | |
157 | 0 | DrawBufferManager::~DrawBufferManager() {} |
158 | | |
159 | | // For simplicity, if transfer buffers are being used, we align the data to the max alignment of |
160 | | // either the final buffer type or cpu->gpu transfer alignment so that the buffers are laid out |
161 | | // the same in memory. |
162 | | DrawBufferManager::BufferInfo::BufferInfo(BufferType type, uint32_t blockSize, const Caps* caps) |
163 | | : fType(type) |
164 | | , fStartAlignment(starting_alignment(type, !caps->drawBufferCanBeMapped(), caps)) |
165 | 0 | , fBlockSize(SkAlignTo(blockSize, fStartAlignment)) {} |
166 | | |
167 | | std::pair<VertexWriter, BindBufferInfo> DrawBufferManager::getVertexWriter(size_t count, |
168 | 0 | size_t stride) { |
169 | 0 | uint32_t requiredBytes = validate_count_and_stride(count, stride); |
170 | 0 | if (!requiredBytes) { |
171 | 0 | return {}; |
172 | 0 | } |
173 | | |
174 | 0 | auto& info = fCurrentBuffers[kVertexBufferIndex]; |
175 | 0 | auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&info, requiredBytes, "VertexBuffer"); |
176 | 0 | return {VertexWriter(ptr, requiredBytes), bindInfo}; |
177 | 0 | } |
178 | | |
179 | 0 | void DrawBufferManager::returnVertexBytes(size_t unusedBytes) { |
180 | 0 | if (fMappingFailed) { |
181 | | // The caller can be unaware that the written data went to no-where and will still call |
182 | | // this function. |
183 | 0 | return; |
184 | 0 | } |
185 | 0 | SkASSERT(fCurrentBuffers[kVertexBufferIndex].fOffset >= unusedBytes); |
186 | 0 | fCurrentBuffers[kVertexBufferIndex].fOffset -= unusedBytes; |
187 | 0 | } Unexecuted instantiation: skgpu::graphite::DrawBufferManager::returnVertexBytes(unsigned long) Unexecuted instantiation: skgpu::graphite::DrawBufferManager::returnVertexBytes(unsigned long) |
188 | | |
189 | | std::pair<IndexWriter, BindBufferInfo> DrawBufferManager::getIndexWriter(size_t count, |
190 | 0 | size_t stride) { |
191 | 0 | uint32_t requiredBytes = validate_count_and_stride(count, stride); |
192 | 0 | if (!requiredBytes) { |
193 | 0 | return {}; |
194 | 0 | } |
195 | | |
196 | 0 | auto& info = fCurrentBuffers[kIndexBufferIndex]; |
197 | 0 | auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&info, requiredBytes, "IndexBuffer"); |
198 | 0 | return {IndexWriter(ptr, requiredBytes), bindInfo}; |
199 | 0 | } |
200 | | |
201 | | std::pair<UniformWriter, BindBufferInfo> DrawBufferManager::getUniformWriter(size_t count, |
202 | 0 | size_t stride) { |
203 | 0 | uint32_t requiredBytes = validate_count_and_stride(count, stride); |
204 | 0 | if (!requiredBytes) { |
205 | 0 | return {}; |
206 | 0 | } |
207 | | |
208 | 0 | auto& info = fCurrentBuffers[kUniformBufferIndex]; |
209 | 0 | auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&info, requiredBytes, "UniformBuffer"); |
210 | 0 | return {UniformWriter(ptr, requiredBytes), bindInfo}; |
211 | 0 | } |
212 | | |
213 | | std::pair<UniformWriter, BindBufferInfo> DrawBufferManager::getSsboWriter(size_t count, |
214 | 0 | size_t stride) { |
215 | 0 | uint32_t requiredBytes = validate_count_and_stride(count, stride); |
216 | 0 | if (!requiredBytes) { |
217 | 0 | return {}; |
218 | 0 | } |
219 | | |
220 | 0 | auto& info = fCurrentBuffers[kStorageBufferIndex]; |
221 | 0 | auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&info, requiredBytes, "StorageBuffer"); |
222 | 0 | return {UniformWriter(ptr, requiredBytes), bindInfo}; |
223 | 0 | } |
224 | | |
225 | | std::pair<void* /*mappedPtr*/, BindBufferInfo> DrawBufferManager::getUniformPointer( |
226 | 0 | size_t requiredBytes) { |
227 | 0 | uint32_t requiredBytes32 = validate_size(requiredBytes); |
228 | 0 | if (!requiredBytes32) { |
229 | 0 | return {}; |
230 | 0 | } |
231 | | |
232 | 0 | auto& info = fCurrentBuffers[kUniformBufferIndex]; |
233 | 0 | return this->prepareMappedBindBuffer(&info, requiredBytes32, "UniformBuffer"); |
234 | 0 | } |
235 | | |
236 | | std::pair<void* /*mappedPtr*/, BindBufferInfo> DrawBufferManager::getStoragePointer( |
237 | 0 | size_t requiredBytes) { |
238 | 0 | uint32_t requiredBytes32 = validate_size(requiredBytes); |
239 | 0 | if (!requiredBytes32) { |
240 | 0 | return {}; |
241 | 0 | } |
242 | | |
243 | 0 | auto& info = fCurrentBuffers[kStorageBufferIndex]; |
244 | 0 | return this->prepareMappedBindBuffer(&info, requiredBytes32, "StorageBuffer"); |
245 | 0 | } |
246 | | |
247 | 0 | BindBufferInfo DrawBufferManager::getStorage(size_t requiredBytes, ClearBuffer cleared) { |
248 | 0 | uint32_t requiredBytes32 = validate_size(requiredBytes); |
249 | 0 | if (!requiredBytes32) { |
250 | 0 | return {}; |
251 | 0 | } |
252 | | |
253 | 0 | auto& info = fCurrentBuffers[kGpuOnlyStorageBufferIndex]; |
254 | 0 | return this->prepareBindBuffer(&info, |
255 | 0 | requiredBytes32, |
256 | 0 | "StorageBuffer", |
257 | 0 | /*supportCpuUpload=*/false, |
258 | 0 | cleared); |
259 | 0 | } |
260 | | |
261 | 0 | BindBufferInfo DrawBufferManager::getVertexStorage(size_t requiredBytes) { |
262 | 0 | uint32_t requiredBytes32 = validate_size(requiredBytes); |
263 | 0 | if (!requiredBytes32) { |
264 | 0 | return {}; |
265 | 0 | } |
266 | | |
267 | 0 | auto& info = fCurrentBuffers[kVertexStorageBufferIndex]; |
268 | 0 | return this->prepareBindBuffer(&info, requiredBytes32, "VertexStorageBuffer"); |
269 | 0 | } |
270 | | |
271 | 0 | BindBufferInfo DrawBufferManager::getIndexStorage(size_t requiredBytes) { |
272 | 0 | uint32_t requiredBytes32 = validate_size(requiredBytes); |
273 | 0 | if (!requiredBytes32) { |
274 | 0 | return {}; |
275 | 0 | } |
276 | | |
277 | 0 | auto& info = fCurrentBuffers[kIndexStorageBufferIndex]; |
278 | 0 | return this->prepareBindBuffer(&info, requiredBytes32, "IndexStorageBuffer"); |
279 | 0 | } |
280 | | |
281 | 0 | BindBufferInfo DrawBufferManager::getIndirectStorage(size_t requiredBytes, ClearBuffer cleared) { |
282 | 0 | uint32_t requiredBytes32 = validate_size(requiredBytes); |
283 | 0 | if (!requiredBytes32) { |
284 | 0 | return {}; |
285 | 0 | } |
286 | | |
287 | 0 | auto& info = fCurrentBuffers[kIndirectStorageBufferIndex]; |
288 | 0 | return this->prepareBindBuffer(&info, |
289 | 0 | requiredBytes32, |
290 | 0 | "IndirectStorageBuffer", |
291 | 0 | /*supportCpuUpload=*/false, |
292 | 0 | cleared); |
293 | 0 | } |
294 | | |
295 | 0 | ScratchBuffer DrawBufferManager::getScratchStorage(size_t requiredBytes) { |
296 | 0 | uint32_t requiredBytes32 = validate_size(requiredBytes); |
297 | 0 | if (!requiredBytes32 || fMappingFailed) { |
298 | 0 | return {}; |
299 | 0 | } |
300 | | |
301 | | // TODO: Generalize the pool to other buffer types. |
302 | 0 | auto& info = fCurrentBuffers[kStorageBufferIndex]; |
303 | 0 | uint32_t bufferSize = sufficient_block_size(requiredBytes32, info.fBlockSize); |
304 | 0 | sk_sp<Buffer> buffer = this->findReusableSbo(bufferSize); |
305 | 0 | if (!buffer) { |
306 | 0 | buffer = fResourceProvider->findOrCreateBuffer( |
307 | 0 | bufferSize, BufferType::kStorage, AccessPattern::kGpuOnly, "ScratchStorageBuffer"); |
308 | |
|
309 | 0 | if (!buffer) { |
310 | 0 | this->onFailedBuffer(); |
311 | 0 | return {}; |
312 | 0 | } |
313 | 0 | } |
314 | 0 | return {requiredBytes32, info.fStartAlignment, std::move(buffer), this}; |
315 | 0 | } |
316 | | |
317 | 0 | void DrawBufferManager::onFailedBuffer() { |
318 | 0 | fMappingFailed = true; |
319 | | |
320 | | // Clean up and unmap everything now |
321 | 0 | fClearList.clear(); |
322 | 0 | fReusableScratchStorageBuffers.clear(); |
323 | |
|
324 | 0 | for (auto& [buffer, _] : fUsedBuffers) { |
325 | 0 | if (buffer->isMapped()) { |
326 | 0 | buffer->unmap(); |
327 | 0 | } |
328 | 0 | } |
329 | 0 | fUsedBuffers.clear(); |
330 | |
|
331 | 0 | for (auto& info : fCurrentBuffers) { |
332 | 0 | if (info.fBuffer && info.fBuffer->isMapped()) { |
333 | 0 | info.fBuffer->unmap(); |
334 | 0 | } |
335 | 0 | info.fBuffer = nullptr; |
336 | 0 | info.fTransferBuffer = {}; |
337 | 0 | info.fOffset = 0; |
338 | 0 | } |
339 | 0 | } |
340 | | |
341 | 0 | bool DrawBufferManager::transferToRecording(Recording* recording) { |
342 | 0 | if (fMappingFailed) { |
343 | | // All state should have been reset by onFailedBuffer() except for this error flag. |
344 | 0 | SkASSERT(fUsedBuffers.empty() && |
345 | 0 | fClearList.empty() && |
346 | 0 | fReusableScratchStorageBuffers.empty()); |
347 | 0 | fMappingFailed = false; |
348 | 0 | return false; |
349 | 0 | } |
350 | | |
351 | 0 | if (!fClearList.empty()) { |
352 | 0 | recording->priv().taskList()->add(ClearBuffersTask::Make(std::move(fClearList))); |
353 | 0 | } |
354 | | |
355 | | // Transfer the buffers in the reuse pool to the recording. |
356 | | // TODO: Allow reuse across different Recordings? |
357 | 0 | for (auto& buffer : fReusableScratchStorageBuffers) { |
358 | 0 | recording->priv().addResourceRef(std::move(buffer)); |
359 | 0 | } |
360 | 0 | fReusableScratchStorageBuffers.clear(); |
361 | |
|
362 | 0 | for (auto& [buffer, transferBuffer] : fUsedBuffers) { |
363 | 0 | if (transferBuffer) { |
364 | 0 | SkASSERT(buffer); |
365 | 0 | SkASSERT(!fCaps->drawBufferCanBeMapped()); |
366 | | // Since the transfer buffer is managed by the UploadManager, we don't manually unmap |
367 | | // it here or need to pass a ref into CopyBufferToBufferTask. |
368 | 0 | size_t copySize = buffer->size(); |
369 | 0 | recording->priv().taskList()->add( |
370 | 0 | CopyBufferToBufferTask::Make(transferBuffer.fBuffer, |
371 | 0 | transferBuffer.fOffset, |
372 | 0 | std::move(buffer), |
373 | 0 | /*dstOffset=*/0, |
374 | 0 | copySize)); |
375 | 0 | } else { |
376 | 0 | if (buffer->isMapped()) { |
377 | 0 | buffer->unmap(); |
378 | 0 | } |
379 | 0 | recording->priv().addResourceRef(std::move(buffer)); |
380 | 0 | } |
381 | 0 | } |
382 | 0 | fUsedBuffers.clear(); |
383 | | |
384 | | // The current draw buffers have not been added to fUsedBuffers, |
385 | | // so we need to handle them as well. |
386 | 0 | for (auto& info : fCurrentBuffers) { |
387 | 0 | if (!info.fBuffer) { |
388 | 0 | continue; |
389 | 0 | } |
390 | 0 | if (info.fTransferBuffer) { |
391 | | // A transfer buffer should always be mapped at this stage |
392 | 0 | SkASSERT(info.fBuffer); |
393 | 0 | SkASSERT(!fCaps->drawBufferCanBeMapped()); |
394 | | // Since the transfer buffer is managed by the UploadManager, we don't manually unmap |
395 | | // it here or need to pass a ref into CopyBufferToBufferTask. |
396 | 0 | recording->priv().taskList()->add( |
397 | 0 | CopyBufferToBufferTask::Make(info.fTransferBuffer.fBuffer, |
398 | 0 | info.fTransferBuffer.fOffset, |
399 | 0 | info.fBuffer, |
400 | 0 | /*dstOffset=*/0, |
401 | 0 | info.fBuffer->size())); |
402 | 0 | } else { |
403 | 0 | if (info.fBuffer->isMapped()) { |
404 | 0 | info.fBuffer->unmap(); |
405 | 0 | } |
406 | 0 | recording->priv().addResourceRef(std::move(info.fBuffer)); |
407 | 0 | } |
408 | 0 | info.fTransferBuffer = {}; |
409 | 0 | info.fOffset = 0; |
410 | 0 | } |
411 | |
|
412 | 0 | return true; |
413 | 0 | } Unexecuted instantiation: skgpu::graphite::DrawBufferManager::transferToRecording(skgpu::graphite::Recording*) Unexecuted instantiation: skgpu::graphite::DrawBufferManager::transferToRecording(skgpu::graphite::Recording*) |
414 | | |
415 | | std::pair<void*, BindBufferInfo> DrawBufferManager::prepareMappedBindBuffer( |
416 | | BufferInfo* info, |
417 | | uint32_t requiredBytes, |
418 | 0 | std::string_view label) { |
419 | 0 | BindBufferInfo bindInfo = this->prepareBindBuffer(info, |
420 | 0 | requiredBytes, |
421 | 0 | std::move(label), |
422 | 0 | /*supportCpuUpload=*/true); |
423 | 0 | if (!bindInfo) { |
424 | | // prepareBindBuffer() already called onFailedBuffer() |
425 | 0 | SkASSERT(fMappingFailed); |
426 | 0 | return {nullptr, {}}; |
427 | 0 | } |
428 | | |
429 | | // If there's a transfer buffer, its mapped pointer should already have been validated |
430 | 0 | SkASSERT(!info->fTransferBuffer || info->fTransferMapPtr); |
431 | 0 | void* mapPtr = info->fTransferBuffer ? info->fTransferMapPtr : info->fBuffer->map(); |
432 | 0 | if (!mapPtr) { |
433 | | // Mapping a direct draw buffer failed |
434 | 0 | this->onFailedBuffer(); |
435 | 0 | return {nullptr, {}}; |
436 | 0 | } |
437 | | |
438 | 0 | mapPtr = SkTAddOffset<void>(mapPtr, static_cast<ptrdiff_t>(bindInfo.fOffset)); |
439 | 0 | return {mapPtr, bindInfo}; |
440 | 0 | } Unexecuted instantiation: skgpu::graphite::DrawBufferManager::prepareMappedBindBuffer(skgpu::graphite::DrawBufferManager::BufferInfo*, unsigned int, std::__1::basic_string_view<char, std::__1::char_traits<char> >) Unexecuted instantiation: skgpu::graphite::DrawBufferManager::prepareMappedBindBuffer(skgpu::graphite::DrawBufferManager::BufferInfo*, unsigned int, std::__1::basic_string_view<char, std::__1::char_traits<char> >) |
441 | | |
442 | | BindBufferInfo DrawBufferManager::prepareBindBuffer(BufferInfo* info, |
443 | | uint32_t requiredBytes, |
444 | | std::string_view label, |
445 | | bool supportCpuUpload, |
446 | 0 | ClearBuffer cleared) { |
447 | 0 | SkASSERT(info); |
448 | 0 | SkASSERT(requiredBytes); |
449 | |
|
450 | 0 | if (fMappingFailed) { |
451 | 0 | return {}; |
452 | 0 | } |
453 | | |
454 | | // A transfer buffer is not necessary if the caller does not intend to upload CPU data to it. |
455 | 0 | bool useTransferBuffer = supportCpuUpload && !fCaps->drawBufferCanBeMapped(); |
456 | |
|
457 | 0 | if (info->fBuffer && !can_fit(requiredBytes, SkTo<uint32_t>(info->fBuffer->size()), |
458 | 0 | info->fOffset, info->fStartAlignment)) { |
459 | 0 | fUsedBuffers.emplace_back(std::move(info->fBuffer), info->fTransferBuffer); |
460 | 0 | info->fTransferBuffer = {}; |
461 | 0 | } |
462 | |
|
463 | 0 | if (!info->fBuffer) { |
464 | | // This buffer can be GPU-only if |
465 | | // a) the caller does not intend to ever upload CPU data to the buffer; or |
466 | | // b) CPU data will get uploaded to fBuffer only via a transfer buffer |
467 | 0 | AccessPattern accessPattern = (useTransferBuffer || !supportCpuUpload) |
468 | 0 | ? AccessPattern::kGpuOnly |
469 | 0 | : AccessPattern::kHostVisible; |
470 | 0 | size_t bufferSize = sufficient_block_size(requiredBytes, info->fBlockSize); |
471 | 0 | info->fBuffer = fResourceProvider->findOrCreateBuffer(bufferSize, |
472 | 0 | info->fType, |
473 | 0 | accessPattern, |
474 | 0 | std::move(label)); |
475 | 0 | info->fOffset = 0; |
476 | 0 | if (!info->fBuffer) { |
477 | 0 | this->onFailedBuffer(); |
478 | 0 | return {}; |
479 | 0 | } |
480 | 0 | } |
481 | | |
482 | 0 | if (useTransferBuffer && !info->fTransferBuffer) { |
483 | 0 | std::tie(info->fTransferMapPtr, info->fTransferBuffer) = |
484 | 0 | fUploadManager->makeBindInfo(info->fBuffer->size(), |
485 | 0 | fCaps->requiredTransferBufferAlignment(), |
486 | 0 | "TransferForDataBuffer"); |
487 | |
|
488 | 0 | if (!info->fTransferBuffer) { |
489 | 0 | this->onFailedBuffer(); |
490 | 0 | return {}; |
491 | 0 | } |
492 | 0 | SkASSERT(info->fTransferMapPtr); |
493 | 0 | } |
494 | | |
495 | 0 | info->fOffset = SkAlignTo(info->fOffset, info->fStartAlignment); |
496 | 0 | BindBufferInfo bindInfo{info->fBuffer.get(), info->fOffset, requiredBytes}; |
497 | 0 | info->fOffset += requiredBytes; |
498 | |
|
499 | 0 | if (cleared == ClearBuffer::kYes) { |
500 | 0 | fClearList.push_back(bindInfo); |
501 | 0 | } |
502 | |
|
503 | 0 | return bindInfo; |
504 | 0 | } Unexecuted instantiation: skgpu::graphite::DrawBufferManager::prepareBindBuffer(skgpu::graphite::DrawBufferManager::BufferInfo*, unsigned int, std::__1::basic_string_view<char, std::__1::char_traits<char> >, bool, skgpu::graphite::ClearBuffer) Unexecuted instantiation: skgpu::graphite::DrawBufferManager::prepareBindBuffer(skgpu::graphite::DrawBufferManager::BufferInfo*, unsigned int, std::__1::basic_string_view<char, std::__1::char_traits<char> >, bool, skgpu::graphite::ClearBuffer) |
505 | | |
506 | 0 | sk_sp<Buffer> DrawBufferManager::findReusableSbo(size_t bufferSize) { |
507 | 0 | SkASSERT(bufferSize); |
508 | 0 | SkASSERT(!fMappingFailed); |
509 | |
|
510 | 0 | for (int i = 0; i < fReusableScratchStorageBuffers.size(); ++i) { |
511 | 0 | sk_sp<Buffer>* buffer = &fReusableScratchStorageBuffers[i]; |
512 | 0 | if ((*buffer)->size() >= bufferSize) { |
513 | 0 | auto found = std::move(*buffer); |
514 | | // Fill the hole left by the move (if necessary) and shrink the pool. |
515 | 0 | if (i < fReusableScratchStorageBuffers.size() - 1) { |
516 | 0 | *buffer = std::move(fReusableScratchStorageBuffers.back()); |
517 | 0 | } |
518 | 0 | fReusableScratchStorageBuffers.pop_back(); |
519 | 0 | return found; |
520 | 0 | } |
521 | 0 | } |
522 | 0 | return nullptr; |
523 | 0 | } Unexecuted instantiation: skgpu::graphite::DrawBufferManager::findReusableSbo(unsigned long) Unexecuted instantiation: skgpu::graphite::DrawBufferManager::findReusableSbo(unsigned long) |
524 | | |
525 | | // ------------------------------------------------------------------------------------------------ |
526 | | // StaticBufferManager |
527 | | |
528 | | StaticBufferManager::StaticBufferManager(ResourceProvider* resourceProvider, |
529 | | const Caps* caps) |
530 | | : fResourceProvider(resourceProvider) |
531 | | , fUploadManager(resourceProvider, caps) |
532 | | , fRequiredTransferAlignment(SkTo<uint32_t>(caps->requiredTransferBufferAlignment())) |
533 | | , fVertexBufferInfo(BufferType::kVertex, caps) |
534 | 0 | , fIndexBufferInfo(BufferType::kIndex, caps) {} |
535 | 0 | StaticBufferManager::~StaticBufferManager() = default; |
536 | | |
537 | | StaticBufferManager::BufferInfo::BufferInfo(BufferType type, const Caps* caps) |
538 | | : fBufferType(type) |
539 | | , fAlignment(starting_alignment(type, /*useTransferBuffers=*/true, caps)) |
540 | 0 | , fTotalRequiredBytes(0) {} |
541 | | |
542 | 0 | VertexWriter StaticBufferManager::getVertexWriter(size_t size, BindBufferInfo* binding) { |
543 | 0 | void* data = this->prepareStaticData(&fVertexBufferInfo, size, binding); |
544 | 0 | return VertexWriter{data, size}; |
545 | 0 | } |
546 | | |
547 | 0 | VertexWriter StaticBufferManager::getIndexWriter(size_t size, BindBufferInfo* binding) { |
548 | 0 | void* data = this->prepareStaticData(&fIndexBufferInfo, size, binding); |
549 | 0 | return VertexWriter{data, size}; |
550 | 0 | } |
551 | | |
552 | | void* StaticBufferManager::prepareStaticData(BufferInfo* info, |
553 | | size_t size, |
554 | 0 | BindBufferInfo* target) { |
555 | | // Zero-out the target binding in the event of any failure in actually transfering data later. |
556 | 0 | SkASSERT(target); |
557 | 0 | *target = {nullptr, 0}; |
558 | 0 | uint32_t size32 = validate_size(size); |
559 | 0 | if (!size32 || fMappingFailed) { |
560 | 0 | return nullptr; |
561 | 0 | } |
562 | | |
563 | | // Both the transfer buffer and static buffers are aligned to the max required alignment for |
564 | | // the pair of buffer types involved (transfer cpu->gpu and either index or vertex). Copies |
565 | | // must also copy an aligned amount of bytes. |
566 | 0 | size32 = SkAlignTo(size32, info->fAlignment); |
567 | |
|
568 | 0 | auto [transferMapPtr, transferBindInfo] = |
569 | 0 | fUploadManager.makeBindInfo(size32, |
570 | 0 | fRequiredTransferAlignment, |
571 | 0 | "TransferForStaticBuffer"); |
572 | 0 | if (!transferMapPtr) { |
573 | 0 | SKGPU_LOG_E("Failed to create or map transfer buffer that initializes static GPU data."); |
574 | 0 | fMappingFailed = true; |
575 | 0 | return nullptr; |
576 | 0 | } |
577 | | |
578 | 0 | info->fData.push_back({transferBindInfo, target}); |
579 | 0 | info->fTotalRequiredBytes += size32; |
580 | 0 | return transferMapPtr; |
581 | 0 | } Unexecuted instantiation: skgpu::graphite::StaticBufferManager::prepareStaticData(skgpu::graphite::StaticBufferManager::BufferInfo*, unsigned long, skgpu::graphite::BindBufferInfo*) Unexecuted instantiation: skgpu::graphite::StaticBufferManager::prepareStaticData(skgpu::graphite::StaticBufferManager::BufferInfo*, unsigned long, skgpu::graphite::BindBufferInfo*) |
582 | | |
583 | | bool StaticBufferManager::BufferInfo::createAndUpdateBindings( |
584 | | ResourceProvider* resourceProvider, |
585 | | Context* context, |
586 | | QueueManager* queueManager, |
587 | | GlobalCache* globalCache, |
588 | 0 | std::string_view label) const { |
589 | 0 | if (!fTotalRequiredBytes) { |
590 | 0 | return true; // No buffer needed |
591 | 0 | } |
592 | | |
593 | 0 | sk_sp<Buffer> staticBuffer = resourceProvider->findOrCreateBuffer( |
594 | 0 | fTotalRequiredBytes, fBufferType, AccessPattern::kGpuOnly, std::move(label)); |
595 | 0 | if (!staticBuffer) { |
596 | 0 | SKGPU_LOG_E("Failed to create static buffer for type %d of size %u bytes.\n", |
597 | 0 | (int) fBufferType, fTotalRequiredBytes); |
598 | 0 | return false; |
599 | 0 | } |
600 | | |
601 | 0 | uint32_t offset = 0; |
602 | 0 | for (const CopyRange& data : fData) { |
603 | | // Each copy range's size should be aligned to the max of the required buffer alignment and |
604 | | // the transfer alignment, so we can just increment the offset into the static buffer. |
605 | 0 | SkASSERT(offset % fAlignment == 0); |
606 | 0 | uint32_t size = data.fSource.fSize; |
607 | 0 | data.fTarget->fBuffer = staticBuffer.get(); |
608 | 0 | data.fTarget->fOffset = offset; |
609 | 0 | data.fTarget->fSize = size; |
610 | |
|
611 | 0 | auto copyTask = CopyBufferToBufferTask::Make( |
612 | 0 | data.fSource.fBuffer, data.fSource.fOffset, |
613 | 0 | sk_ref_sp(data.fTarget->fBuffer), data.fTarget->fOffset, |
614 | 0 | size); |
615 | 0 | if (!queueManager->addTask(copyTask.get(), context)) { |
616 | 0 | SKGPU_LOG_E("Failed to copy data to static buffer.\n"); |
617 | 0 | return false; |
618 | 0 | } |
619 | | |
620 | 0 | offset += size; |
621 | 0 | } |
622 | | |
623 | 0 | SkASSERT(offset == fTotalRequiredBytes); |
624 | 0 | globalCache->addStaticResource(std::move(staticBuffer)); |
625 | 0 | return true; |
626 | 0 | } Unexecuted instantiation: skgpu::graphite::StaticBufferManager::BufferInfo::createAndUpdateBindings(skgpu::graphite::ResourceProvider*, skgpu::graphite::Context*, skgpu::graphite::QueueManager*, skgpu::graphite::GlobalCache*, std::__1::basic_string_view<char, std::__1::char_traits<char> >) const Unexecuted instantiation: skgpu::graphite::StaticBufferManager::BufferInfo::createAndUpdateBindings(skgpu::graphite::ResourceProvider*, skgpu::graphite::Context*, skgpu::graphite::QueueManager*, skgpu::graphite::GlobalCache*, std::__1::basic_string_view<char, std::__1::char_traits<char> >) const |
627 | | |
628 | | StaticBufferManager::FinishResult StaticBufferManager::finalize(Context* context, |
629 | | QueueManager* queueManager, |
630 | 0 | GlobalCache* globalCache) { |
631 | 0 | if (fMappingFailed) { |
632 | 0 | return FinishResult::kFailure; |
633 | 0 | } |
634 | | |
635 | 0 | const size_t totalRequiredBytes = fVertexBufferInfo.fTotalRequiredBytes + |
636 | 0 | fIndexBufferInfo.fTotalRequiredBytes; |
637 | 0 | SkASSERT(totalRequiredBytes <= kMaxStaticDataSize); |
638 | 0 | if (!totalRequiredBytes) { |
639 | 0 | return FinishResult::kNoWork; |
640 | 0 | } |
641 | | |
642 | 0 | if (!fVertexBufferInfo.createAndUpdateBindings(fResourceProvider, |
643 | 0 | context, |
644 | 0 | queueManager, |
645 | 0 | globalCache, |
646 | 0 | "StaticVertexBuffer")) { |
647 | 0 | return FinishResult::kFailure; |
648 | 0 | } |
649 | 0 | if (!fIndexBufferInfo.createAndUpdateBindings(fResourceProvider, |
650 | 0 | context, |
651 | 0 | queueManager, |
652 | 0 | globalCache, |
653 | 0 | "StaticIndexBuffer")) { |
654 | 0 | return FinishResult::kFailure; |
655 | 0 | } |
656 | 0 | queueManager->addUploadBufferManagerRefs(&fUploadManager); |
657 | | |
658 | | // Reset the static buffer manager since the Recording's copy tasks now manage ownership of |
659 | | // the transfer buffers and the GlobalCache owns the final static buffers. |
660 | 0 | fVertexBufferInfo.reset(); |
661 | 0 | fIndexBufferInfo.reset(); |
662 | |
|
663 | 0 | return FinishResult::kSuccess; |
664 | 0 | } Unexecuted instantiation: skgpu::graphite::StaticBufferManager::finalize(skgpu::graphite::Context*, skgpu::graphite::QueueManager*, skgpu::graphite::GlobalCache*) Unexecuted instantiation: skgpu::graphite::StaticBufferManager::finalize(skgpu::graphite::Context*, skgpu::graphite::QueueManager*, skgpu::graphite::GlobalCache*) |
665 | | |
666 | | } // namespace skgpu::graphite |