/src/skia/src/gpu/graphite/compute/DispatchGroup.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright 2023 Google LLC |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license that can be |
5 | | * found in the LICENSE file. |
6 | | */ |
7 | | |
8 | | #include "src/gpu/graphite/compute/DispatchGroup.h" |
9 | | |
10 | | #include "include/gpu/graphite/Recorder.h" |
11 | | #include "src/gpu/graphite/BufferManager.h" |
12 | | #include "src/gpu/graphite/Caps.h" |
13 | | #include "src/gpu/graphite/CommandBuffer.h" |
14 | | #include "src/gpu/graphite/ComputePipeline.h" |
15 | | #include "src/gpu/graphite/Log.h" |
16 | | #include "src/gpu/graphite/PipelineData.h" |
17 | | #include "src/gpu/graphite/RecorderPriv.h" |
18 | | #include "src/gpu/graphite/ResourceProvider.h" |
19 | | #include "src/gpu/graphite/Texture.h" |
20 | | #include "src/gpu/graphite/UniformManager.h" |
21 | | #include "src/gpu/graphite/task/ClearBuffersTask.h" |
22 | | |
23 | | namespace skgpu::graphite { |
24 | | |
25 | 0 | DispatchGroup::~DispatchGroup() = default; |
26 | | |
27 | 0 | bool DispatchGroup::prepareResources(ResourceProvider* resourceProvider) { |
28 | 0 | fPipelines.reserve(fPipelines.size() + fPipelineDescs.size()); |
29 | 0 | for (const ComputePipelineDesc& desc : fPipelineDescs) { |
30 | 0 | auto pipeline = resourceProvider->findOrCreateComputePipeline(desc); |
31 | 0 | if (!pipeline) { |
32 | 0 | SKGPU_LOG_W("Failed to create ComputePipeline for dispatch group. Dropping group!"); |
33 | 0 | return false; |
34 | 0 | } |
35 | 0 | fPipelines.push_back(std::move(pipeline)); |
36 | 0 | } |
37 | | |
38 | 0 | for (int i = 0; i < fTextures.size(); ++i) { |
39 | 0 | if (!fTextures[i]->textureInfo().isValid()) { |
40 | 0 | SKGPU_LOG_W("Failed to validate bound texture. Dropping dispatch group!"); |
41 | 0 | return false; |
42 | 0 | } |
43 | 0 | if (!TextureProxy::InstantiateIfNotLazy(resourceProvider, fTextures[i].get())) { |
44 | 0 | SKGPU_LOG_W("Failed to instantiate bound texture. Dropping dispatch group!"); |
45 | 0 | return false; |
46 | 0 | } |
47 | 0 | } |
48 | | |
49 | 0 | for (const SamplerDesc& desc : fSamplerDescs) { |
50 | 0 | sk_sp<Sampler> sampler = resourceProvider->findOrCreateCompatibleSampler(desc); |
51 | 0 | if (!sampler) { |
52 | 0 | SKGPU_LOG_W("Failed to create sampler. Dropping dispatch group!"); |
53 | 0 | return false; |
54 | 0 | } |
55 | 0 | fSamplers.push_back(std::move(sampler)); |
56 | 0 | } |
57 | | |
58 | | // The DispatchGroup may be long lived on a Recording and we no longer need the descriptors |
59 | | // once we've created pipelines. |
60 | 0 | fPipelineDescs.clear(); |
61 | 0 | fSamplerDescs.clear(); |
62 | |
|
63 | 0 | return true; |
64 | 0 | } |
65 | | |
66 | 0 | void DispatchGroup::addResourceRefs(CommandBuffer* commandBuffer) const { |
67 | 0 | for (int i = 0; i < fPipelines.size(); ++i) { |
68 | 0 | commandBuffer->trackResource(fPipelines[i]); |
69 | 0 | } |
70 | 0 | for (int i = 0; i < fTextures.size(); ++i) { |
71 | 0 | commandBuffer->trackCommandBufferResource(fTextures[i]->refTexture()); |
72 | 0 | } |
73 | 0 | } |
74 | | |
75 | 0 | sk_sp<Task> DispatchGroup::snapChildTask() { |
76 | 0 | if (fClearList.empty()) { |
77 | 0 | return nullptr; |
78 | 0 | } |
79 | 0 | return ClearBuffersTask::Make(std::move(fClearList)); |
80 | 0 | } |
81 | | |
82 | 0 | const Texture* DispatchGroup::getTexture(size_t index) const { |
83 | 0 | SkASSERT(index < SkToSizeT(fTextures.size())); |
84 | 0 | SkASSERT(fTextures[index]); |
85 | 0 | SkASSERT(fTextures[index]->texture()); |
86 | 0 | return fTextures[index]->texture(); |
87 | 0 | } Unexecuted instantiation: skgpu::graphite::DispatchGroup::getTexture(unsigned long) const Unexecuted instantiation: skgpu::graphite::DispatchGroup::getTexture(unsigned long) const |
88 | | |
89 | 0 | const Sampler* DispatchGroup::getSampler(size_t index) const { |
90 | 0 | SkASSERT(index < SkToSizeT(fSamplers.size())); |
91 | 0 | SkASSERT(fSamplers[index]); |
92 | 0 | return fSamplers[index].get(); |
93 | 0 | } Unexecuted instantiation: skgpu::graphite::DispatchGroup::getSampler(unsigned long) const Unexecuted instantiation: skgpu::graphite::DispatchGroup::getSampler(unsigned long) const |
94 | | |
95 | | using Builder = DispatchGroup::Builder; |
96 | | |
97 | 0 | Builder::Builder(Recorder* recorder) : fObj(new DispatchGroup()), fRecorder(recorder) { |
98 | 0 | SkASSERT(fRecorder); |
99 | 0 | } Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::Builder(skgpu::graphite::Recorder*) Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::Builder(skgpu::graphite::Recorder*) |
100 | | |
101 | 0 | bool Builder::appendStep(const ComputeStep* step, std::optional<WorkgroupSize> globalSize) { |
102 | 0 | return this->appendStepInternal(step, |
103 | 0 | globalSize ? *globalSize : step->calculateGlobalDispatchSize()); |
104 | 0 | } |
105 | | |
106 | 0 | bool Builder::appendStepIndirect(const ComputeStep* step, BufferView indirectBuffer) { |
107 | 0 | return this->appendStepInternal(step, indirectBuffer); |
108 | 0 | } |
109 | | |
110 | | bool Builder::appendStepInternal( |
111 | | const ComputeStep* step, |
112 | 0 | const std::variant<WorkgroupSize, BufferView>& globalSizeOrIndirect) { |
113 | 0 | SkASSERT(fObj); |
114 | 0 | SkASSERT(step); |
115 | |
|
116 | 0 | Dispatch dispatch; |
117 | | |
118 | | // Process the step's resources. |
119 | 0 | auto resources = step->resources(); |
120 | 0 | dispatch.fBindings.reserve(resources.size()); |
121 | | |
122 | | // `nextIndex` matches the declaration order of resources as specified by the ComputeStep. |
123 | 0 | int nextIndex = 0; |
124 | | |
125 | | // We assign buffer, texture, and sampler indices from separate ranges. This is compatible with |
126 | | // how Graphite assigns indices on Metal, as these map directly to the buffer/texture/sampler |
127 | | // index ranges. On Dawn/Vulkan buffers and textures/samplers are allocated from separate bind |
128 | | // groups/descriptor sets but texture and sampler indices need to not overlap. |
129 | 0 | const auto& bindingReqs = fRecorder->priv().caps()->resourceBindingRequirements(); |
130 | 0 | bool distinctRanges = bindingReqs.fDistinctIndexRanges; |
131 | 0 | bool separateSampler = bindingReqs.fSeparateTextureAndSamplerBinding; |
132 | 0 | int bufferOrGlobalIndex = 0; |
133 | 0 | int texIndex = 0; |
134 | | // NOTE: SkSL Metal codegen always assigns the same binding index to a texture and its sampler. |
135 | | // TODO: This could cause sampler indices to not be tightly packed if the sampler2D declaration |
136 | | // comes after 1 or more storage texture declarations (which don't have samplers). |
137 | 0 | for (const ComputeStep::ResourceDesc& r : resources) { |
138 | 0 | SkASSERT(r.fSlot == -1 || (r.fSlot >= 0 && r.fSlot < kMaxComputeDataFlowSlots)); |
139 | 0 | const int index = nextIndex++; |
140 | |
|
141 | 0 | DispatchResourceOptional maybeResource; |
142 | |
|
143 | 0 | using DataFlow = ComputeStep::DataFlow; |
144 | 0 | using Type = ComputeStep::ResourceType; |
145 | 0 | switch (r.fFlow) { |
146 | 0 | case DataFlow::kPrivate: |
147 | | // A sampled or fetched-type readonly texture must either get assigned via |
148 | | // `assignSharedTexture()` or internally allocated as a storage texture of a |
149 | | // preceding step. Such a texture always has a data slot. |
150 | 0 | SkASSERT(r.fType != Type::kReadOnlyTexture); |
151 | 0 | SkASSERT(r.fType != Type::kSampledTexture); |
152 | 0 | maybeResource = this->allocateResource(step, r, index); |
153 | 0 | break; |
154 | 0 | case DataFlow::kShared: { |
155 | 0 | SkASSERT(r.fSlot >= 0); |
156 | | // Allocate a new resource only if the shared slot is empty (except for a |
157 | | // SampledTexture which needs its sampler to be allocated internally). |
158 | 0 | DispatchResourceOptional* slot = &fOutputTable.fSharedSlots[r.fSlot]; |
159 | 0 | if (std::holds_alternative<std::monostate>(*slot)) { |
160 | 0 | SkASSERT(r.fType != Type::kReadOnlyTexture); |
161 | 0 | SkASSERT(r.fType != Type::kSampledTexture); |
162 | 0 | maybeResource = this->allocateResource(step, r, index); |
163 | 0 | *slot = maybeResource; |
164 | 0 | } else { |
165 | 0 | SkASSERT(((r.fType == Type::kUniformBuffer || |
166 | 0 | r.fType == Type::kStorageBuffer || |
167 | 0 | r.fType == Type::kReadOnlyStorageBuffer || |
168 | 0 | r.fType == Type::kIndirectBuffer) && |
169 | 0 | std::holds_alternative<BufferView>(*slot)) || |
170 | 0 | ((r.fType == Type::kReadOnlyTexture || |
171 | 0 | r.fType == Type::kSampledTexture || |
172 | 0 | r.fType == Type::kWriteOnlyStorageTexture) && |
173 | 0 | std::holds_alternative<TextureIndex>(*slot))); |
174 | | #ifdef SK_DEBUG |
175 | | // Ensure that the texture has the right format if it was assigned via |
176 | | // `assignSharedTexture()`. |
177 | | const TextureIndex* texIdx = std::get_if<TextureIndex>(slot); |
178 | 0 | if (texIdx && r.fType == Type::kWriteOnlyStorageTexture) { |
179 | 0 | const TextureProxy* t = fObj->fTextures[texIdx->fValue].get(); |
180 | 0 | SkASSERT(t); |
181 | 0 | auto [_, colorType] = step->calculateTextureParameters(index, r); |
182 | 0 | SkASSERT(t->textureInfo().isCompatible( |
183 | 0 | fRecorder->priv().caps()->getDefaultStorageTextureInfo(colorType))); |
184 | 0 | } |
185 | | #endif // SK_DEBUG |
186 | |
|
187 | 0 | maybeResource = *slot; |
188 | |
|
189 | 0 | if (r.fType == Type::kSampledTexture) { |
190 | | // The shared slot holds the texture part of the sampled texture but we |
191 | | // still need to allocate the sampler. |
192 | 0 | SkASSERT(std::holds_alternative<TextureIndex>(*slot)); |
193 | 0 | auto samplerResource = this->allocateResource(step, r, index); |
194 | 0 | const SamplerIndex* samplerIdx = |
195 | 0 | std::get_if<SamplerIndex>(&samplerResource); |
196 | 0 | SkASSERT(samplerIdx); |
197 | 0 | int bindingIndex = distinctRanges ? texIndex |
198 | 0 | : separateSampler ? bufferOrGlobalIndex++ |
199 | 0 | : bufferOrGlobalIndex; |
200 | 0 | dispatch.fBindings.push_back( |
201 | 0 | {static_cast<BindingIndex>(bindingIndex), *samplerIdx}); |
202 | 0 | } |
203 | 0 | } |
204 | 0 | break; |
205 | 0 | } |
206 | 0 | } |
207 | | |
208 | 0 | int bindingIndex = 0; |
209 | 0 | DispatchResource dispatchResource; |
210 | 0 | if (const BufferView* buffer = std::get_if<BufferView>(&maybeResource)) { |
211 | 0 | dispatchResource = *buffer; |
212 | 0 | bindingIndex = bufferOrGlobalIndex++; |
213 | 0 | } else if (const TextureIndex* texIdx = std::get_if<TextureIndex>(&maybeResource)) { |
214 | 0 | dispatchResource = *texIdx; |
215 | 0 | bindingIndex = distinctRanges ? texIndex++ : bufferOrGlobalIndex++; |
216 | 0 | } else { |
217 | 0 | SKGPU_LOG_W("Failed to allocate resource for compute dispatch"); |
218 | 0 | return false; |
219 | 0 | } |
220 | 0 | dispatch.fBindings.push_back({static_cast<BindingIndex>(bindingIndex), dispatchResource}); |
221 | 0 | } |
222 | | |
223 | 0 | auto wgBufferDescs = step->workgroupBuffers(); |
224 | 0 | if (!wgBufferDescs.empty()) { |
225 | 0 | dispatch.fWorkgroupBuffers.push_back_n(wgBufferDescs.size(), wgBufferDescs.data()); |
226 | 0 | } |
227 | | |
228 | | // We need to switch pipelines if this step uses a different pipeline from the previous step. |
229 | 0 | if (fObj->fPipelineDescs.empty() || |
230 | 0 | fObj->fPipelineDescs.back().uniqueID() != step->uniqueID()) { |
231 | 0 | fObj->fPipelineDescs.push_back(ComputePipelineDesc(step)); |
232 | 0 | } |
233 | |
|
234 | 0 | dispatch.fPipelineIndex = fObj->fPipelineDescs.size() - 1; |
235 | 0 | dispatch.fLocalSize = step->localDispatchSize(); |
236 | 0 | dispatch.fGlobalSizeOrIndirect = globalSizeOrIndirect; |
237 | |
|
238 | 0 | fObj->fDispatchList.push_back(std::move(dispatch)); |
239 | |
|
240 | 0 | return true; |
241 | 0 | } Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::appendStepInternal(skgpu::graphite::ComputeStep const*, std::__1::variant<skgpu::graphite::WorkgroupSize, skgpu::graphite::BufferView> const&) Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::appendStepInternal(skgpu::graphite::ComputeStep const*, std::__1::variant<skgpu::graphite::WorkgroupSize, skgpu::graphite::BufferView> const&) |
242 | | |
243 | 0 | void Builder::assignSharedBuffer(BufferView buffer, unsigned int slot, ClearBuffer cleared) { |
244 | 0 | SkASSERT(fObj); |
245 | 0 | SkASSERT(buffer.fInfo); |
246 | 0 | SkASSERT(buffer.fSize); |
247 | |
|
248 | 0 | fOutputTable.fSharedSlots[slot] = buffer; |
249 | 0 | if (cleared == ClearBuffer::kYes) { |
250 | 0 | fObj->fClearList.push_back({buffer.fInfo.fBuffer, buffer.fInfo.fOffset, buffer.fSize}); |
251 | 0 | } |
252 | 0 | } Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::assignSharedBuffer(skgpu::graphite::BufferView, unsigned int, skgpu::graphite::ClearBuffer) Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::assignSharedBuffer(skgpu::graphite::BufferView, unsigned int, skgpu::graphite::ClearBuffer) |
253 | | |
254 | 0 | void Builder::assignSharedTexture(sk_sp<TextureProxy> texture, unsigned int slot) { |
255 | 0 | SkASSERT(fObj); |
256 | 0 | SkASSERT(texture); |
257 | |
|
258 | 0 | fObj->fTextures.push_back(std::move(texture)); |
259 | 0 | fOutputTable.fSharedSlots[slot] = TextureIndex{fObj->fTextures.size() - 1u}; |
260 | 0 | } Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::assignSharedTexture(sk_sp<skgpu::graphite::TextureProxy>, unsigned int) Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::assignSharedTexture(sk_sp<skgpu::graphite::TextureProxy>, unsigned int) |
261 | | |
262 | 0 | std::unique_ptr<DispatchGroup> Builder::finalize() { |
263 | 0 | auto obj = std::move(fObj); |
264 | 0 | fOutputTable.reset(); |
265 | 0 | return obj; |
266 | 0 | } |
267 | | |
268 | | #if defined(GRAPHITE_TEST_UTILS) |
269 | 0 | void Builder::reset() { |
270 | 0 | fOutputTable.reset(); |
271 | 0 | fObj.reset(new DispatchGroup); |
272 | 0 | } |
273 | | #endif |
274 | | |
275 | 0 | BindBufferInfo Builder::getSharedBufferResource(unsigned int slot) const { |
276 | 0 | SkASSERT(fObj); |
277 | |
|
278 | 0 | BindBufferInfo info; |
279 | 0 | if (const BufferView* slotValue = std::get_if<BufferView>(&fOutputTable.fSharedSlots[slot])) { |
280 | 0 | info = slotValue->fInfo; |
281 | 0 | } |
282 | 0 | return info; |
283 | 0 | } Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::getSharedBufferResource(unsigned int) const Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::getSharedBufferResource(unsigned int) const |
284 | | |
285 | 0 | sk_sp<TextureProxy> Builder::getSharedTextureResource(unsigned int slot) const { |
286 | 0 | SkASSERT(fObj); |
287 | |
|
288 | 0 | const TextureIndex* idx = std::get_if<TextureIndex>(&fOutputTable.fSharedSlots[slot]); |
289 | 0 | if (!idx) { |
290 | 0 | return nullptr; |
291 | 0 | } |
292 | | |
293 | 0 | SkASSERT(idx->fValue < SkToSizeT(fObj->fTextures.size())); |
294 | 0 | return fObj->fTextures[idx->fValue]; |
295 | 0 | } Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::getSharedTextureResource(unsigned int) const Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::getSharedTextureResource(unsigned int) const |
296 | | |
297 | | DispatchResourceOptional Builder::allocateResource(const ComputeStep* step, |
298 | | const ComputeStep::ResourceDesc& resource, |
299 | 0 | int resourceIdx) { |
300 | 0 | SkASSERT(step); |
301 | 0 | SkASSERT(fObj); |
302 | 0 | using Type = ComputeStep::ResourceType; |
303 | 0 | using ResourcePolicy = ComputeStep::ResourcePolicy; |
304 | |
|
305 | 0 | DrawBufferManager* bufferMgr = fRecorder->priv().drawBufferManager(); |
306 | 0 | DispatchResourceOptional result; |
307 | 0 | switch (resource.fType) { |
308 | 0 | case Type::kReadOnlyStorageBuffer: |
309 | 0 | case Type::kStorageBuffer: { |
310 | 0 | size_t bufferSize = step->calculateBufferSize(resourceIdx, resource); |
311 | 0 | SkASSERT(bufferSize); |
312 | 0 | if (resource.fPolicy == ResourcePolicy::kMapped) { |
313 | 0 | auto [ptr, bufInfo] = bufferMgr->getStoragePointer(bufferSize); |
314 | 0 | if (ptr) { |
315 | 0 | step->prepareStorageBuffer(resourceIdx, resource, ptr, bufferSize); |
316 | 0 | result = BufferView{bufInfo, bufferSize}; |
317 | 0 | } |
318 | 0 | } else { |
319 | 0 | auto bufInfo = bufferMgr->getStorage(bufferSize, |
320 | 0 | resource.fPolicy == ResourcePolicy::kClear |
321 | 0 | ? ClearBuffer::kYes |
322 | 0 | : ClearBuffer::kNo); |
323 | 0 | if (bufInfo) { |
324 | 0 | result = BufferView{bufInfo, bufferSize}; |
325 | 0 | } |
326 | 0 | } |
327 | 0 | break; |
328 | 0 | } |
329 | 0 | case Type::kIndirectBuffer: { |
330 | 0 | SkASSERT(resource.fPolicy != ResourcePolicy::kMapped); |
331 | |
|
332 | 0 | size_t bufferSize = step->calculateBufferSize(resourceIdx, resource); |
333 | 0 | SkASSERT(bufferSize); |
334 | 0 | auto bufInfo = bufferMgr->getIndirectStorage(bufferSize, |
335 | 0 | resource.fPolicy == ResourcePolicy::kClear |
336 | 0 | ? ClearBuffer::kYes |
337 | 0 | : ClearBuffer::kNo); |
338 | 0 | if (bufInfo) { |
339 | 0 | result = BufferView{bufInfo, bufferSize}; |
340 | 0 | } |
341 | 0 | break; |
342 | 0 | } |
343 | 0 | case Type::kUniformBuffer: { |
344 | 0 | SkASSERT(resource.fPolicy == ResourcePolicy::kMapped); |
345 | |
|
346 | 0 | const auto& resourceReqs = fRecorder->priv().caps()->resourceBindingRequirements(); |
347 | 0 | UniformManager uboMgr(resourceReqs.fUniformBufferLayout); |
348 | 0 | step->prepareUniformBuffer(resourceIdx, resource, &uboMgr); |
349 | |
|
350 | 0 | auto dataBlock = uboMgr.finishUniformDataBlock(); |
351 | 0 | SkASSERT(dataBlock.size()); |
352 | |
|
353 | 0 | auto [writer, bufInfo] = bufferMgr->getUniformWriter(dataBlock.size()); |
354 | 0 | if (bufInfo) { |
355 | 0 | writer.write(dataBlock.data(), dataBlock.size()); |
356 | 0 | result = BufferView{bufInfo, dataBlock.size()}; |
357 | 0 | } |
358 | 0 | break; |
359 | 0 | } |
360 | 0 | case Type::kWriteOnlyStorageTexture: { |
361 | 0 | auto [size, colorType] = step->calculateTextureParameters(resourceIdx, resource); |
362 | 0 | SkASSERT(!size.isEmpty()); |
363 | 0 | SkASSERT(colorType != kUnknown_SkColorType); |
364 | |
|
365 | 0 | auto textureInfo = fRecorder->priv().caps()->getDefaultStorageTextureInfo(colorType); |
366 | 0 | sk_sp<TextureProxy> texture = TextureProxy::Make( |
367 | 0 | fRecorder->priv().caps(), fRecorder->priv().resourceProvider(), |
368 | 0 | size, textureInfo, "DispatchWriteOnlyStorageTexture", skgpu::Budgeted::kYes); |
369 | 0 | if (texture) { |
370 | 0 | fObj->fTextures.push_back(std::move(texture)); |
371 | 0 | result = TextureIndex{fObj->fTextures.size() - 1u}; |
372 | 0 | } |
373 | 0 | break; |
374 | 0 | } |
375 | 0 | case Type::kReadOnlyTexture: |
376 | | // This resource type is meant to be populated externally (e.g. by an upload or a render |
377 | | // pass) and only read/sampled by a ComputeStep. It's not meaningful to allocate an |
378 | | // internal texture for a DispatchGroup if none of the ComputeSteps will write to it. |
379 | | // |
380 | | // Instead of using internal allocation, this texture must be assigned explicitly to a |
381 | | // slot by calling the Builder::assignSharedTexture() method. |
382 | | // |
383 | | // Note: A ComputeStep is allowed to read/sample from a storage texture that a previous |
384 | | // ComputeStep has written to. |
385 | 0 | SK_ABORT("a readonly texture must be externally assigned to a ComputeStep"); |
386 | 0 | break; |
387 | 0 | case Type::kSampledTexture: { |
388 | 0 | fObj->fSamplerDescs.push_back(step->calculateSamplerParameters(resourceIdx, resource)); |
389 | 0 | result = SamplerIndex{fObj->fSamplerDescs.size() - 1u}; |
390 | 0 | break; |
391 | 0 | } |
392 | 0 | } |
393 | 0 | return result; |
394 | 0 | } Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::allocateResource(skgpu::graphite::ComputeStep const*, skgpu::graphite::ComputeStep::ResourceDesc const&, int) Unexecuted instantiation: skgpu::graphite::DispatchGroup::Builder::allocateResource(skgpu::graphite::ComputeStep const*, skgpu::graphite::ComputeStep::ResourceDesc const&, int) |
395 | | |
396 | | } // namespace skgpu::graphite |