/src/mozilla-central/gfx/layers/mlgpu/FrameBuilder.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "FrameBuilder.h" |
8 | | #include "ContainerLayerMLGPU.h" |
9 | | #include "GeckoProfiler.h" // for profiler_* |
10 | | #include "LayerMLGPU.h" |
11 | | #include "LayerManagerMLGPU.h" |
12 | | #include "MaskOperation.h" |
13 | | #include "MLGDevice.h" // for MLGSwapChain |
14 | | #include "RenderPassMLGPU.h" |
15 | | #include "RenderViewMLGPU.h" |
16 | | #include "mozilla/gfx/Logging.h" |
17 | | #include "mozilla/gfx/Polygon.h" |
18 | | #include "mozilla/layers/BSPTree.h" |
19 | | #include "mozilla/layers/LayersHelpers.h" |
20 | | |
21 | | namespace mozilla { |
22 | | namespace layers { |
23 | | |
24 | | using namespace mlg; |
25 | | |
26 | | FrameBuilder::FrameBuilder(LayerManagerMLGPU* aManager, MLGSwapChain* aSwapChain) |
27 | | : mManager(aManager), |
28 | | mDevice(aManager->GetDevice()), |
29 | | mSwapChain(aSwapChain) |
30 | 0 | { |
31 | 0 | // test_bug1124898.html has a root ColorLayer, so we don't assume the root is |
32 | 0 | // a container. |
33 | 0 | mRoot = mManager->GetRoot()->AsHostLayer()->AsLayerMLGPU(); |
34 | 0 | } |
35 | | |
36 | | FrameBuilder::~FrameBuilder() |
37 | 0 | { |
38 | 0 | } |
39 | | |
40 | | bool |
41 | | FrameBuilder::Build() |
42 | 0 | { |
43 | 0 | AUTO_PROFILER_LABEL("FrameBuilder::Build", GRAPHICS); |
44 | 0 |
|
45 | 0 | // AcquireBackBuffer can fail, so we check the result here. |
46 | 0 | RefPtr<MLGRenderTarget> target = mSwapChain->AcquireBackBuffer(); |
47 | 0 | if (!target) { |
48 | 0 | return false; |
49 | 0 | } |
50 | 0 | |
51 | 0 | // This updates the frame sequence number, so layers can quickly check if |
52 | 0 | // they've already been prepared. |
53 | 0 | LayerMLGPU::BeginFrame(); |
54 | 0 |
|
55 | 0 | // Note: we don't clip draw calls to the invalid region per se, but instead |
56 | 0 | // the region bounds. Clipping all draw calls would incur a significant |
57 | 0 | // CPU cost on large layer trees, and would greatly complicate how draw |
58 | 0 | // rects are added in RenderPassMLGPU, since we would need to break |
59 | 0 | // each call into additional items based on the intersection with the |
60 | 0 | // invalid region. |
61 | 0 | // |
62 | 0 | // Instead we scissor to the invalid region bounds. As a result, all items |
63 | 0 | // affecting the invalid bounds are redrawn, even if not all are in the |
64 | 0 | // precise region. |
65 | 0 | const nsIntRegion& region = mSwapChain->GetBackBufferInvalidRegion(); |
66 | 0 |
|
67 | 0 | mWidgetRenderView = new RenderViewMLGPU(this, target, region); |
68 | 0 |
|
69 | 0 | // Traverse the layer tree and compute visible region for intermediate surfaces |
70 | 0 | if (ContainerLayerMLGPU* root = mRoot->AsLayerMLGPU()->AsContainerLayerMLGPU()) { |
71 | 0 | root->ComputeIntermediateSurfaceBounds(); |
72 | 0 | } |
73 | 0 |
|
74 | 0 | // Traverse the layer tree and assign each layer to tiles. |
75 | 0 | { |
76 | 0 | Maybe<gfx::Polygon> geometry; |
77 | 0 | RenderTargetIntRect clip(0, 0, target->GetSize().width, target->GetSize().height); |
78 | 0 |
|
79 | 0 | AssignLayer(mRoot->GetLayer(), mWidgetRenderView, clip, std::move(geometry)); |
80 | 0 | } |
81 | 0 |
|
82 | 0 | // Build the default mask buffer. |
83 | 0 | { |
84 | 0 | MaskInformation defaultMaskInfo(1.0f, false); |
85 | 0 | if (!mDevice->GetSharedPSBuffer()->Allocate(&mDefaultMaskInfo, defaultMaskInfo)) { |
86 | 0 | return false; |
87 | 0 | } |
88 | 0 | } |
89 | 0 | |
90 | 0 | // Build render passes and buffer information for each pass. |
91 | 0 | mWidgetRenderView->FinishBuilding(); |
92 | 0 | mWidgetRenderView->Prepare(); |
93 | 0 |
|
94 | 0 | // Prepare masks that need to be combined. |
95 | 0 | for (const auto& pair : mCombinedTextureMasks) { |
96 | 0 | pair.second->PrepareForRendering(); |
97 | 0 | } |
98 | 0 |
|
99 | 0 | FinishCurrentLayerBuffer(); |
100 | 0 | FinishCurrentMaskRectBuffer(); |
101 | 0 | return true; |
102 | 0 | } |
103 | | |
104 | | void |
105 | | FrameBuilder::Render() |
106 | 0 | { |
107 | 0 | AUTO_PROFILER_LABEL("FrameBuilder::Render", GRAPHICS); |
108 | 0 |
|
109 | 0 | // Render combined masks into single mask textures. |
110 | 0 | for (const auto& pair : mCombinedTextureMasks) { |
111 | 0 | pair.second->Render(); |
112 | 0 | } |
113 | 0 |
|
114 | 0 | // Render to all targets, front-to-back. |
115 | 0 | mWidgetRenderView->Render(); |
116 | 0 | } |
117 | | |
118 | | void |
119 | | FrameBuilder::AssignLayer(Layer* aLayer, |
120 | | RenderViewMLGPU* aView, |
121 | | const RenderTargetIntRect& aClipRect, |
122 | | Maybe<gfx::Polygon>&& aGeometry) |
123 | 0 | { |
124 | 0 | LayerMLGPU* layer = aLayer->AsHostLayer()->AsLayerMLGPU(); |
125 | 0 |
|
126 | 0 | if (ContainerLayer* container = aLayer->AsContainerLayer()) { |
127 | 0 | // This returns false if we don't need to (or can't) process the layer any |
128 | 0 | // further. This always returns false for non-leaf ContainerLayers. |
129 | 0 | if (!ProcessContainerLayer(container, aView, aClipRect, aGeometry)) { |
130 | 0 | return; |
131 | 0 | } |
132 | 0 | } else { |
133 | 0 | // Set the precomputed clip and any textures/resources that are needed. |
134 | 0 | if (!layer->PrepareToRender(this, aClipRect)) { |
135 | 0 | return; |
136 | 0 | } |
137 | 0 | } |
138 | 0 | |
139 | 0 | // If we are dealing with a nested 3D context, we might need to transform |
140 | 0 | // the geometry back to the coordinate space of the current layer. |
141 | 0 | if (aGeometry) { |
142 | 0 | TransformLayerGeometry(aLayer, aGeometry); |
143 | 0 | } |
144 | 0 |
|
145 | 0 | // Finally, assign the layer to a rendering batch in the current render |
146 | 0 | // target. |
147 | 0 | layer->AssignToView(this, aView, std::move(aGeometry)); |
148 | 0 | } |
149 | | |
150 | | bool |
151 | | FrameBuilder::ProcessContainerLayer(ContainerLayer* aContainer, |
152 | | RenderViewMLGPU* aView, |
153 | | const RenderTargetIntRect& aClipRect, |
154 | | Maybe<gfx::Polygon>& aGeometry) |
155 | 0 | { |
156 | 0 | LayerMLGPU* layer = aContainer->AsHostLayer()->AsLayerMLGPU(); |
157 | 0 |
|
158 | 0 | // Diagnostic information for bug 1387467. |
159 | 0 | if (!layer) { |
160 | 0 | gfxDevCrash(gfx::LogReason::InvalidLayerType) << |
161 | 0 | "Layer type is invalid: " << aContainer->Name(); |
162 | 0 | return false; |
163 | 0 | } |
164 | 0 |
|
165 | 0 | // We don't want to traverse containers twice, so we only traverse them if |
166 | 0 | // they haven't been prepared yet. |
167 | 0 | bool isFirstVisit = !layer->IsPrepared(); |
168 | 0 | if (isFirstVisit && !layer->PrepareToRender(this, aClipRect)) { |
169 | 0 | return false; |
170 | 0 | } |
171 | 0 | |
172 | 0 | if (!aContainer->UseIntermediateSurface()) { |
173 | 0 | // In case the layer previously required an intermediate surface, we |
174 | 0 | // clear any intermediate render targets here. |
175 | 0 | layer->ClearCachedResources(); |
176 | 0 |
|
177 | 0 | // This is a pass-through container, so we just process children and |
178 | 0 | // instruct AssignLayer to early-return. |
179 | 0 | ProcessChildList(aContainer, aView, aClipRect, aGeometry); |
180 | 0 | return false; |
181 | 0 | } |
182 | 0 | |
183 | 0 | // If this is the first visit of the container this frame, and the |
184 | 0 | // container has an unpainted area, we traverse the container. Note that |
185 | 0 | // RefLayers do not have intermediate surfaces so this is guaranteed |
186 | 0 | // to be a full-fledged ContainerLayerMLGPU. |
187 | 0 | ContainerLayerMLGPU* viewContainer = layer->AsContainerLayerMLGPU(); |
188 | 0 | if (!viewContainer) { |
189 | 0 | gfxDevCrash(gfx::LogReason::InvalidLayerType) << |
190 | 0 | "Container layer type is invalid: " << aContainer->Name(); |
191 | 0 | return false; |
192 | 0 | } |
193 | 0 |
|
194 | 0 | if (isFirstVisit && !viewContainer->GetInvalidRect().IsEmpty()) { |
195 | 0 | // The RenderView constructor automatically attaches itself to the parent. |
196 | 0 | RefPtr<RenderViewMLGPU> view = new RenderViewMLGPU(this, viewContainer, aView); |
197 | 0 | ProcessChildList(aContainer, view, aClipRect, Nothing()); |
198 | 0 | view->FinishBuilding(); |
199 | 0 | } |
200 | 0 | return true; |
201 | 0 | } |
202 | | |
203 | | void |
204 | | FrameBuilder::ProcessChildList(ContainerLayer* aContainer, |
205 | | RenderViewMLGPU* aView, |
206 | | const RenderTargetIntRect& aParentClipRect, |
207 | | const Maybe<gfx::Polygon>& aParentGeometry) |
208 | 0 | { |
209 | 0 | nsTArray<LayerPolygon> polygons = |
210 | 0 | aContainer->SortChildrenBy3DZOrder(ContainerLayer::SortMode::WITH_GEOMETRY); |
211 | 0 |
|
212 | 0 | // Visit layers in front-to-back order. |
213 | 0 | for (auto iter = polygons.rbegin(); iter != polygons.rend(); iter++) { |
214 | 0 | LayerPolygon& entry = *iter; |
215 | 0 | Layer* child = entry.layer; |
216 | 0 | if (child->IsBackfaceHidden() || !child->IsVisible()) { |
217 | 0 | continue; |
218 | 0 | } |
219 | 0 | |
220 | 0 | RenderTargetIntRect clip = child->CalculateScissorRect(aParentClipRect); |
221 | 0 | if (clip.IsEmpty()) { |
222 | 0 | continue; |
223 | 0 | } |
224 | 0 | |
225 | 0 | Maybe<gfx::Polygon> geometry; |
226 | 0 | if (aParentGeometry && entry.geometry) { |
227 | 0 | // Both parent and child are split. |
228 | 0 | geometry = Some(aParentGeometry->ClipPolygon(*entry.geometry)); |
229 | 0 | } else if (aParentGeometry) { |
230 | 0 | geometry = aParentGeometry; |
231 | 0 | } else if (entry.geometry) { |
232 | 0 | geometry = std::move(entry.geometry); |
233 | 0 | } |
234 | 0 |
|
235 | 0 | AssignLayer(child, aView, clip, std::move(geometry)); |
236 | 0 | } |
237 | 0 | } |
238 | | |
239 | | bool |
240 | | FrameBuilder::AddLayerToConstantBuffer(ItemInfo& aItem) |
241 | 0 | { |
242 | 0 | LayerMLGPU* layer = aItem.layer; |
243 | 0 |
|
244 | 0 | // If this layer could appear multiple times, cache it. |
245 | 0 | if (aItem.geometry) { |
246 | 0 | if (mLayerBufferMap.Get(layer, &aItem.layerIndex)) { |
247 | 0 | return true; |
248 | 0 | } |
249 | 0 | } |
250 | 0 | |
251 | 0 | LayerConstants* info = AllocateLayerInfo(aItem); |
252 | 0 | if (!info) { |
253 | 0 | return false; |
254 | 0 | } |
255 | 0 | |
256 | 0 | // Note we do not use GetEffectiveTransformForBuffer, since we calculate |
257 | 0 | // the correct scaling when we build texture coordinates. |
258 | 0 | Layer* baseLayer = layer->GetLayer(); |
259 | 0 | const gfx::Matrix4x4& transform = baseLayer->GetEffectiveTransform(); |
260 | 0 |
|
261 | 0 | memcpy(&info->transform, &transform._11, 64); |
262 | 0 | info->clipRect = gfx::Rect(layer->GetComputedClipRect().ToUnknownRect()); |
263 | 0 | info->maskIndex = 0; |
264 | 0 | if (MaskOperation* op = layer->GetMask()) { |
265 | 0 | // Note: we use 0 as an invalid index, and so indices are offset by 1. |
266 | 0 | gfx::Rect rect = op->ComputeMaskRect(baseLayer); |
267 | 0 | AddMaskRect(rect, &info->maskIndex); |
268 | 0 | } |
269 | 0 |
|
270 | 0 | if (aItem.geometry) { |
271 | 0 | mLayerBufferMap.Put(layer, aItem.layerIndex); |
272 | 0 | } |
273 | 0 | return true; |
274 | 0 | } |
275 | | |
276 | | MaskOperation* |
277 | | FrameBuilder::AddMaskOperation(LayerMLGPU* aLayer) |
278 | 0 | { |
279 | 0 | Layer* layer = aLayer->GetLayer(); |
280 | 0 | MOZ_ASSERT(layer->HasMaskLayers()); |
281 | 0 |
|
282 | 0 | // Multiple masks are combined into a single mask. |
283 | 0 | if ((layer->GetMaskLayer() && layer->GetAncestorMaskLayerCount()) || |
284 | 0 | layer->GetAncestorMaskLayerCount() > 1) |
285 | 0 | { |
286 | 0 | // Since each mask can be moved independently of the other, we must create |
287 | 0 | // a separate combined mask for every new positioning we encounter. |
288 | 0 | MaskTextureList textures; |
289 | 0 | if (Layer* maskLayer = layer->GetMaskLayer()) { |
290 | 0 | AppendToMaskTextureList(textures, maskLayer); |
291 | 0 | } |
292 | 0 | for (size_t i = 0; i < layer->GetAncestorMaskLayerCount(); i++) { |
293 | 0 | AppendToMaskTextureList(textures, layer->GetAncestorMaskLayerAt(i)); |
294 | 0 | } |
295 | 0 |
|
296 | 0 | auto iter = mCombinedTextureMasks.find(textures); |
297 | 0 | if (iter != mCombinedTextureMasks.end()) { |
298 | 0 | return iter->second; |
299 | 0 | } |
300 | 0 | |
301 | 0 | RefPtr<MaskCombineOperation> op = new MaskCombineOperation(this); |
302 | 0 | op->Init(textures); |
303 | 0 |
|
304 | 0 | mCombinedTextureMasks[textures] = op; |
305 | 0 | return op; |
306 | 0 | } |
307 | 0 | |
308 | 0 | Layer* maskLayer = layer->GetMaskLayer() |
309 | 0 | ? layer->GetMaskLayer() |
310 | 0 | : layer->GetAncestorMaskLayerAt(0); |
311 | 0 | RefPtr<TextureSource> texture = GetMaskLayerTexture(maskLayer); |
312 | 0 | if (!texture) { |
313 | 0 | return nullptr; |
314 | 0 | } |
315 | 0 | |
316 | 0 | RefPtr<MaskOperation> op; |
317 | 0 | mSingleTextureMasks.Get(texture, getter_AddRefs(op)); |
318 | 0 | if (op) { |
319 | 0 | return op; |
320 | 0 | } |
321 | 0 | |
322 | 0 | RefPtr<MLGTexture> wrapped = mDevice->CreateTexture(texture); |
323 | 0 |
|
324 | 0 | op = new MaskOperation(this, wrapped); |
325 | 0 | mSingleTextureMasks.Put(texture, op); |
326 | 0 | return op; |
327 | 0 | } |
328 | | |
329 | | void |
330 | | FrameBuilder::RetainTemporaryLayer(LayerMLGPU* aLayer) |
331 | 0 | { |
332 | 0 | // This should only be used with temporary layers. Temporary layers do not |
333 | 0 | // have parents. |
334 | 0 | MOZ_ASSERT(!aLayer->GetLayer()->GetParent()); |
335 | 0 | mTemporaryLayers.push_back(aLayer->GetLayer()); |
336 | 0 | } |
337 | | |
338 | | LayerConstants* |
339 | | FrameBuilder::AllocateLayerInfo(ItemInfo& aItem) |
340 | 0 | { |
341 | 0 | if (((mCurrentLayerBuffer.Length() + 1) * sizeof(LayerConstants)) > |
342 | 0 | mDevice->GetMaxConstantBufferBindSize()) |
343 | 0 | { |
344 | 0 | FinishCurrentLayerBuffer(); |
345 | 0 | mLayerBufferMap.Clear(); |
346 | 0 | mCurrentLayerBuffer.ClearAndRetainStorage(); |
347 | 0 | } |
348 | 0 |
|
349 | 0 | LayerConstants* info = mCurrentLayerBuffer.AppendElement(mozilla::fallible); |
350 | 0 | if (!info) { |
351 | 0 | return nullptr; |
352 | 0 | } |
353 | 0 | |
354 | 0 | aItem.layerIndex = mCurrentLayerBuffer.Length() - 1; |
355 | 0 | return info; |
356 | 0 | } |
357 | | |
358 | | void |
359 | | FrameBuilder::FinishCurrentLayerBuffer() |
360 | 0 | { |
361 | 0 | if (mCurrentLayerBuffer.IsEmpty()) { |
362 | 0 | return; |
363 | 0 | } |
364 | 0 | |
365 | 0 | // Note: we append the buffer even if we couldn't allocate one, since |
366 | 0 | // that keeps the indices sane. |
367 | 0 | ConstantBufferSection section; |
368 | 0 | mDevice->GetSharedVSBuffer()->Allocate( |
369 | 0 | §ion, |
370 | 0 | mCurrentLayerBuffer.Elements(), |
371 | 0 | mCurrentLayerBuffer.Length()); |
372 | 0 | mLayerBuffers.AppendElement(section); |
373 | 0 | } |
374 | | |
375 | | size_t |
376 | | FrameBuilder::CurrentLayerBufferIndex() const |
377 | 0 | { |
378 | 0 | // The mask rect buffer list doesn't contain the buffer currently being |
379 | 0 | // built, so we don't subtract 1 here. |
380 | 0 | return mLayerBuffers.Length(); |
381 | 0 | } |
382 | | |
383 | | ConstantBufferSection |
384 | | FrameBuilder::GetLayerBufferByIndex(size_t aIndex) const |
385 | 0 | { |
386 | 0 | if (aIndex >= mLayerBuffers.Length()) { |
387 | 0 | return ConstantBufferSection(); |
388 | 0 | } |
389 | 0 | return mLayerBuffers[aIndex]; |
390 | 0 | } |
391 | | |
392 | | bool |
393 | | FrameBuilder::AddMaskRect(const gfx::Rect& aRect, uint32_t* aOutIndex) |
394 | 0 | { |
395 | 0 | if (((mCurrentMaskRectList.Length() + 1) * sizeof(gfx::Rect)) > |
396 | 0 | mDevice->GetMaxConstantBufferBindSize()) |
397 | 0 | { |
398 | 0 | FinishCurrentMaskRectBuffer(); |
399 | 0 | mCurrentMaskRectList.ClearAndRetainStorage(); |
400 | 0 | } |
401 | 0 |
|
402 | 0 | mCurrentMaskRectList.AppendElement(aRect); |
403 | 0 |
|
404 | 0 | // Mask indices start at 1 so the shader can use 0 as a no-mask indicator. |
405 | 0 | *aOutIndex = mCurrentMaskRectList.Length(); |
406 | 0 | return true; |
407 | 0 | } |
408 | | |
409 | | void |
410 | | FrameBuilder::FinishCurrentMaskRectBuffer() |
411 | 0 | { |
412 | 0 | if (mCurrentMaskRectList.IsEmpty()) { |
413 | 0 | return; |
414 | 0 | } |
415 | 0 | |
416 | 0 | // Note: we append the buffer even if we couldn't allocate one, since |
417 | 0 | // that keeps the indices sane. |
418 | 0 | ConstantBufferSection section; |
419 | 0 | mDevice->GetSharedVSBuffer()->Allocate( |
420 | 0 | §ion, |
421 | 0 | mCurrentMaskRectList.Elements(), |
422 | 0 | mCurrentMaskRectList.Length()); |
423 | 0 | mMaskRectBuffers.AppendElement(section); |
424 | 0 | } |
425 | | |
426 | | size_t |
427 | | FrameBuilder::CurrentMaskRectBufferIndex() const |
428 | 0 | { |
429 | 0 | // The mask rect buffer list doesn't contain the buffer currently being |
430 | 0 | // built, so we don't subtract 1 here. |
431 | 0 | return mMaskRectBuffers.Length(); |
432 | 0 | } |
433 | | |
434 | | ConstantBufferSection |
435 | | FrameBuilder::GetMaskRectBufferByIndex(size_t aIndex) const |
436 | 0 | { |
437 | 0 | if (aIndex >= mMaskRectBuffers.Length()) { |
438 | 0 | return ConstantBufferSection(); |
439 | 0 | } |
440 | 0 | return mMaskRectBuffers[aIndex]; |
441 | 0 | } |
442 | | |
443 | | } // namespace layers |
444 | | } // namespace mozilla |