/src/mozilla-central/gfx/layers/mlgpu/RenderViewMLGPU.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "RenderViewMLGPU.h" |
8 | | #include "ContainerLayerMLGPU.h" |
9 | | #include "FrameBuilder.h" |
10 | | #include "gfxPrefs.h" |
11 | | #include "LayersHelpers.h" |
12 | | #include "LayersLogging.h" |
13 | | #include "MLGDevice.h" |
14 | | #include "RenderPassMLGPU.h" |
15 | | #include "ShaderDefinitionsMLGPU.h" |
16 | | #include "Units.h" |
17 | | #include "UnitTransforms.h" |
18 | | #include "UtilityMLGPU.h" |
19 | | |
20 | | namespace mozilla { |
21 | | namespace layers { |
22 | | |
23 | | using namespace gfx; |
24 | | |
25 | | RenderViewMLGPU::RenderViewMLGPU(FrameBuilder* aBuilder, |
26 | | MLGRenderTarget* aTarget, |
27 | | const nsIntRegion& aInvalidRegion) |
28 | | : RenderViewMLGPU(aBuilder, nullptr) |
29 | 0 | { |
30 | 0 | mTarget = aTarget; |
31 | 0 | mInvalidBounds = aInvalidRegion.GetBounds(); |
32 | 0 |
|
33 | 0 | // The clear region on the layer manager is the area that must be clear after |
34 | 0 | // we finish drawing. |
35 | 0 | mPostClearRegion = aBuilder->GetManager()->GetRegionToClear(); |
36 | 0 |
|
37 | 0 | // Clamp the post-clear region to the invalid bounds, since clears don't go |
38 | 0 | // through the scissor rect if using ClearView. |
39 | 0 | mPostClearRegion.AndWith(mInvalidBounds); |
40 | 0 |
|
41 | 0 | // Since the post-clear will occlude everything, we include it in the final |
42 | 0 | // opaque area. |
43 | 0 | mOccludedRegion.OrWith( |
44 | 0 | ViewAs<LayerPixel>(mPostClearRegion, PixelCastJustification::RenderTargetIsParentLayerForRoot)); |
45 | 0 |
|
46 | 0 | AL_LOG("RenderView %p root with invalid area %s, clear area %s\n", |
47 | 0 | this, |
48 | 0 | Stringify(mInvalidBounds).c_str(), |
49 | 0 | Stringify(mPostClearRegion).c_str()); |
50 | 0 | } |
51 | | |
52 | | RenderViewMLGPU::RenderViewMLGPU(FrameBuilder* aBuilder, |
53 | | ContainerLayerMLGPU* aContainer, |
54 | | RenderViewMLGPU* aParent) |
55 | | : RenderViewMLGPU(aBuilder, aParent) |
56 | 0 | { |
57 | 0 | mContainer = aContainer; |
58 | 0 | mTargetOffset = aContainer->GetTargetOffset(); |
59 | 0 | mInvalidBounds = aContainer->GetInvalidRect(); |
60 | 0 | MOZ_ASSERT(!mInvalidBounds.IsEmpty()); |
61 | 0 |
|
62 | 0 | AL_LOG("RenderView %p starting with container %p and invalid area %s\n", |
63 | 0 | this, |
64 | 0 | aContainer->GetLayer(), |
65 | 0 | Stringify(mInvalidBounds).c_str()); |
66 | 0 |
|
67 | 0 | mContainer->SetRenderView(this); |
68 | 0 | } |
69 | | |
70 | | RenderViewMLGPU::RenderViewMLGPU(FrameBuilder* aBuilder, RenderViewMLGPU* aParent) |
71 | | : mBuilder(aBuilder), |
72 | | mDevice(aBuilder->GetDevice()), |
73 | | mParent(aParent), |
74 | | mContainer(nullptr), |
75 | | mFinishedBuilding(false), |
76 | | mCurrentLayerBufferIndex(kInvalidResourceIndex), |
77 | | mCurrentMaskRectBufferIndex(kInvalidResourceIndex), |
78 | | mCurrentDepthMode(MLGDepthTestMode::Disabled), |
79 | | mNextSortIndex(1), |
80 | | mUseDepthBuffer(gfxPrefs::AdvancedLayersEnableDepthBuffer()), |
81 | | mDepthBufferNeedsClear(false) |
82 | 0 | { |
83 | 0 | if (aParent) { |
84 | 0 | aParent->AddChild(this); |
85 | 0 | } |
86 | 0 | } |
87 | | |
88 | | RenderViewMLGPU::~RenderViewMLGPU() |
89 | 0 | { |
90 | 0 | for (const auto& child : mChildren) { |
91 | 0 | child->mParent = nullptr; |
92 | 0 | } |
93 | 0 | } |
94 | | |
95 | | IntSize |
96 | | RenderViewMLGPU::GetSize() const |
97 | 0 | { |
98 | 0 | MOZ_ASSERT(mFinishedBuilding); |
99 | 0 | return mTarget->GetSize(); |
100 | 0 | } |
101 | | |
102 | | MLGRenderTarget* |
103 | | RenderViewMLGPU::GetRenderTarget() const |
104 | 0 | { |
105 | 0 | MOZ_ASSERT(mFinishedBuilding); |
106 | 0 | return mTarget; |
107 | 0 | } |
108 | | |
109 | | void |
110 | | RenderViewMLGPU::AddChild(RenderViewMLGPU* aParent) |
111 | 0 | { |
112 | 0 | mChildren.push_back(aParent); |
113 | 0 | } |
114 | | |
115 | | void |
116 | | RenderViewMLGPU::Render() |
117 | 0 | { |
118 | 0 | // We render views depth-first to minimize render target switching. |
119 | 0 | for (const auto& child : mChildren) { |
120 | 0 | child->Render(); |
121 | 0 | } |
122 | 0 |
|
123 | 0 | // If the view requires a surface copy (of its backdrop), then we delay |
124 | 0 | // rendering it until it is added to a batch. |
125 | 0 | if (mContainer && mContainer->NeedsSurfaceCopy()) { |
126 | 0 | return; |
127 | 0 | } |
128 | 0 | ExecuteRendering(); |
129 | 0 | } |
130 | | |
131 | | void |
132 | | RenderViewMLGPU::RenderAfterBackdropCopy() |
133 | 0 | { |
134 | 0 | MOZ_ASSERT(mContainer && mContainer->NeedsSurfaceCopy()); |
135 | 0 |
|
136 | 0 | // Update the invalid bounds based on the container's visible region. This |
137 | 0 | // of course won't affect the prepared pipeline, but it will change the |
138 | 0 | // scissor rect in SetDeviceState. |
139 | 0 | mInvalidBounds = mContainer->GetRenderRegion().GetBounds().ToUnknownRect() - |
140 | 0 | GetTargetOffset(); |
141 | 0 |
|
142 | 0 | ExecuteRendering(); |
143 | 0 | } |
144 | | |
145 | | void |
146 | | RenderViewMLGPU::FinishBuilding() |
147 | 0 | { |
148 | 0 | MOZ_ASSERT(!mFinishedBuilding); |
149 | 0 | mFinishedBuilding = true; |
150 | 0 |
|
151 | 0 | if (mContainer) { |
152 | 0 | MOZ_ASSERT(!mTarget); |
153 | 0 |
|
154 | 0 | MLGRenderTargetFlags flags = MLGRenderTargetFlags::Default; |
155 | 0 | if (mUseDepthBuffer) { |
156 | 0 | flags |= MLGRenderTargetFlags::ZBuffer; |
157 | 0 | } |
158 | 0 | mTarget = mContainer->UpdateRenderTarget(mDevice, flags); |
159 | 0 | } |
160 | 0 | } |
161 | | |
162 | | void |
163 | | RenderViewMLGPU::AddItem(LayerMLGPU* aItem, |
164 | | const IntRect& aRect, |
165 | | Maybe<Polygon>&& aGeometry) |
166 | 0 | { |
167 | 0 | AL_LOG("RenderView %p analyzing layer %p\n", this, aItem->GetLayer()); |
168 | 0 |
|
169 | 0 | // If the item is not visible at all, skip it. |
170 | 0 | if (aItem->GetComputedOpacity() == 0.0f) { |
171 | 0 | AL_LOG("RenderView %p culling item %p with no opacity\n", |
172 | 0 | this, |
173 | 0 | aItem->GetLayer()); |
174 | 0 | return; |
175 | 0 | } |
176 | 0 | |
177 | 0 | // When using the depth buffer, the z-index for items is important. |
178 | 0 | // |
179 | 0 | // Sort order starts at 1 and goes to positive infinity, with smaller values |
180 | 0 | // being closer to the screen. Our viewport is the same, with anything |
181 | 0 | // outside of [0.0, 1.0] being culled, and lower values occluding higher |
182 | 0 | // values. To make this work our projection transform scales the z-axis. |
183 | 0 | // Note that we do not use 0 as a sorting index (when depth-testing is |
184 | 0 | // enabled) because this would result in a z-value of 1.0, which would be |
185 | 0 | // culled. |
186 | 0 | ItemInfo info(mBuilder, this, aItem, mNextSortIndex++, aRect, std::move(aGeometry)); |
187 | 0 |
|
188 | 0 | // If the item is not visible, or we can't add it to the layer constant |
189 | 0 | // buffer for some reason, bail out. |
190 | 0 | if (!UpdateVisibleRegion(info) || !mBuilder->AddLayerToConstantBuffer(info)) { |
191 | 0 | AL_LOG("RenderView %p culled item %p!\n", this, aItem->GetLayer()); |
192 | 0 | return; |
193 | 0 | } |
194 | 0 | |
195 | 0 | // We support all layer types now. |
196 | 0 | MOZ_ASSERT(info.type != RenderPassType::Unknown); |
197 | 0 |
|
198 | 0 | if (info.renderOrder == RenderOrder::FrontToBack) { |
199 | 0 | AddItemFrontToBack(aItem, info); |
200 | 0 | } else { |
201 | 0 | AddItemBackToFront(aItem, info); |
202 | 0 | } |
203 | 0 | } |
204 | | |
205 | | bool |
206 | | RenderViewMLGPU::UpdateVisibleRegion(ItemInfo& aItem) |
207 | 0 | { |
208 | 0 | // If the item has some kind of complex transform, we perform a very |
209 | 0 | // simple occlusion test and move on. We using a depth buffer we skip |
210 | 0 | // CPU-based occlusion culling as well, since the GPU will do most of our |
211 | 0 | // culling work for us. |
212 | 0 | if (mUseDepthBuffer || |
213 | 0 | !aItem.translation || |
214 | 0 | !gfxPrefs::AdvancedLayersEnableCPUOcclusion()) |
215 | 0 | { |
216 | 0 | // Update the render region even if we won't compute visibility, since some |
217 | 0 | // layer types (like Canvas and Image) need to have the visible region |
218 | 0 | // clamped. |
219 | 0 | LayerIntRegion region = aItem.layer->GetShadowVisibleRegion(); |
220 | 0 | aItem.layer->SetRenderRegion(std::move(region)); |
221 | 0 |
|
222 | 0 | AL_LOG("RenderView %p simple occlusion test, bounds=%s, translation?=%d\n", |
223 | 0 | this, |
224 | 0 | Stringify(aItem.bounds).c_str(), |
225 | 0 | aItem.translation ? 1 : 0); |
226 | 0 | return mInvalidBounds.Intersects(aItem.bounds); |
227 | 0 | } |
228 | 0 | |
229 | 0 | MOZ_ASSERT(aItem.rectilinear); |
230 | 0 |
|
231 | 0 | AL_LOG("RenderView %p starting visibility tests:\n", this); |
232 | 0 | AL_LOG(" occluded=%s\n", Stringify(mOccludedRegion).c_str()); |
233 | 0 |
|
234 | 0 | // Compute the translation into render target space. |
235 | 0 | LayerIntPoint translation = |
236 | 0 | LayerIntPoint::FromUnknownPoint(aItem.translation.value() - mTargetOffset); |
237 | 0 | AL_LOG(" translation=%s\n", Stringify(translation).c_str()); |
238 | 0 |
|
239 | 0 | IntRect clip = aItem.layer->GetComputedClipRect().ToUnknownRect(); |
240 | 0 | AL_LOG(" clip=%s\n", Stringify(translation).c_str()); |
241 | 0 |
|
242 | 0 | LayerIntRegion region = aItem.layer->GetShadowVisibleRegion(); |
243 | 0 | region.MoveBy(translation); |
244 | 0 | AL_LOG(" effective-visible=%s\n", Stringify(region).c_str()); |
245 | 0 |
|
246 | 0 | region.SubOut(mOccludedRegion); |
247 | 0 | region.AndWith(LayerIntRect::FromUnknownRect(mInvalidBounds)); |
248 | 0 | region.AndWith(LayerIntRect::FromUnknownRect(clip)); |
249 | 0 | if (region.IsEmpty()) { |
250 | 0 | return false; |
251 | 0 | } |
252 | 0 | |
253 | 0 | // Move the visible region back into layer space. |
254 | 0 | region.MoveBy(-translation); |
255 | 0 | AL_LOG(" new-local-visible=%s\n", Stringify(region).c_str()); |
256 | 0 |
|
257 | 0 | aItem.layer->SetRenderRegion(std::move(region)); |
258 | 0 |
|
259 | 0 | // Apply the new occluded area. We do another dance with the translation to |
260 | 0 | // avoid copying the region. We do this after the SetRegionToRender call to |
261 | 0 | // accomodate the possiblity of a layer changing its visible region. |
262 | 0 | if (aItem.opaque) { |
263 | 0 | mOccludedRegion.MoveBy(-translation); |
264 | 0 | mOccludedRegion.OrWith(aItem.layer->GetRenderRegion()); |
265 | 0 | mOccludedRegion.MoveBy(translation); |
266 | 0 | AL_LOG(" new-occluded=%s\n", Stringify(mOccludedRegion).c_str()); |
267 | 0 |
|
268 | 0 | // If the occluded region gets too complicated, we reset it. |
269 | 0 | if (mOccludedRegion.GetNumRects() >= 32) { |
270 | 0 | mOccludedRegion.SetEmpty(); |
271 | 0 | AL_LOG(" clear-occluded, too many rects\n"); |
272 | 0 | } |
273 | 0 | } |
274 | 0 | return true; |
275 | 0 | } |
276 | | |
277 | | void |
278 | | RenderViewMLGPU::AddItemFrontToBack(LayerMLGPU* aLayer, ItemInfo& aItem) |
279 | 0 | { |
280 | 0 | // We receive items in front-to-back order. Ideally we want to push items |
281 | 0 | // as far back into batches impossible, to ensure the GPU can do a good |
282 | 0 | // job at culling. However we also want to make sure we actually batch |
283 | 0 | // items versus drawing one primitive per pass. |
284 | 0 | // |
285 | 0 | // As a compromise we look at the most 3 recent batches and then give up. |
286 | 0 | // This can be tweaked in the future. |
287 | 0 | static const size_t kMaxSearch = 3; |
288 | 0 | size_t iterations = 0; |
289 | 0 | for (auto iter = mFrontToBack.rbegin(); iter != mFrontToBack.rend(); iter++) { |
290 | 0 | RenderPassMLGPU* pass = (*iter); |
291 | 0 | if (pass->IsCompatible(aItem) && pass->AcceptItem(aItem)) { |
292 | 0 | AL_LOG("RenderView %p added layer %p to pass %p (%d)\n", |
293 | 0 | this, aLayer->GetLayer(), pass, int(pass->GetType())); |
294 | 0 | return; |
295 | 0 | } |
296 | 0 | if (++iterations > kMaxSearch) { |
297 | 0 | break; |
298 | 0 | } |
299 | 0 | } |
300 | 0 |
|
301 | 0 | RefPtr<RenderPassMLGPU> pass = RenderPassMLGPU::CreatePass(mBuilder, aItem); |
302 | 0 | if (!pass || !pass->AcceptItem(aItem)) { |
303 | 0 | MOZ_ASSERT_UNREACHABLE("Could not build a pass for item!"); |
304 | 0 | return; |
305 | 0 | } |
306 | 0 | AL_LOG("RenderView %p added layer %p to new pass %p (%d)\n", |
307 | 0 | this, aLayer->GetLayer(), pass.get(), int(pass->GetType())); |
308 | 0 |
|
309 | 0 | mFrontToBack.push_back(pass); |
310 | 0 | } |
311 | | |
312 | | void |
313 | | RenderViewMLGPU::AddItemBackToFront(LayerMLGPU* aLayer, ItemInfo& aItem) |
314 | 0 | { |
315 | 0 | // We receive layers in front-to-back order, but there are two cases when we |
316 | 0 | // actually draw back-to-front: when the depth buffer is disabled, or when |
317 | 0 | // using the depth buffer and the item has transparent pixels (and therefore |
318 | 0 | // requires blending). In these cases we will build vertex and constant |
319 | 0 | // buffers in reverse, as well as execute batches in reverse, to ensure the |
320 | 0 | // correct ordering. |
321 | 0 | // |
322 | 0 | // Note: We limit the number of batches we search through, since it's better |
323 | 0 | // to add new draw calls than spend too much time finding compatible |
324 | 0 | // batches further down. |
325 | 0 | static const size_t kMaxSearch = 10; |
326 | 0 | size_t iterations = 0; |
327 | 0 | for (auto iter = mBackToFront.begin(); iter != mBackToFront.end(); iter++) { |
328 | 0 | RenderPassMLGPU* pass = (*iter); |
329 | 0 | if (pass->IsCompatible(aItem) && pass->AcceptItem(aItem)) { |
330 | 0 | AL_LOG("RenderView %p added layer %p to pass %p (%d)\n", |
331 | 0 | this, aLayer->GetLayer(), pass, int(pass->GetType())); |
332 | 0 | return; |
333 | 0 | } |
334 | 0 | if (pass->Intersects(aItem)) { |
335 | 0 | break; |
336 | 0 | } |
337 | 0 | if (++iterations > kMaxSearch) { |
338 | 0 | break; |
339 | 0 | } |
340 | 0 | } |
341 | 0 |
|
342 | 0 | RefPtr<RenderPassMLGPU> pass = RenderPassMLGPU::CreatePass(mBuilder, aItem); |
343 | 0 | if (!pass || !pass->AcceptItem(aItem)) { |
344 | 0 | MOZ_ASSERT_UNREACHABLE("Could not build a pass for item!"); |
345 | 0 | return; |
346 | 0 | } |
347 | 0 | AL_LOG("RenderView %p added layer %p to new pass %p (%d)\n", |
348 | 0 | this, aLayer->GetLayer(), pass.get(), int(pass->GetType())); |
349 | 0 |
|
350 | 0 | mBackToFront.push_front(pass); |
351 | 0 | } |
352 | | |
353 | | void |
354 | | RenderViewMLGPU::Prepare() |
355 | 0 | { |
356 | 0 | if (!mTarget) { |
357 | 0 | return; |
358 | 0 | } |
359 | 0 | |
360 | 0 | // Prepare front-to-back passes. These are only present when using the depth |
361 | 0 | // buffer, and they contain only opaque data. |
362 | 0 | for (RefPtr<RenderPassMLGPU>& pass : mFrontToBack) { |
363 | 0 | pass->PrepareForRendering(); |
364 | 0 | } |
365 | 0 |
|
366 | 0 | // Prepare the Clear buffer, which will fill the render target with transparent |
367 | 0 | // pixels. This must happen before we set up world constants, since it can |
368 | 0 | // create new z-indices. |
369 | 0 | PrepareClears(); |
370 | 0 |
|
371 | 0 | // Prepare the world constant buffer. This must be called after we've |
372 | 0 | // finished allocating all z-indices. |
373 | 0 | { |
374 | 0 | WorldConstants vsConstants; |
375 | 0 | Matrix4x4 projection = Matrix4x4::Translation(-1.0, 1.0, 0.0); |
376 | 0 | projection.PreScale(2.0 / float(mTarget->GetSize().width), |
377 | 0 | 2.0 / float(mTarget->GetSize().height), |
378 | 0 | 1.0f); |
379 | 0 | projection.PreScale(1.0f, -1.0f, 1.0f); |
380 | 0 |
|
381 | 0 | memcpy(vsConstants.projection, &projection._11, 64); |
382 | 0 | vsConstants.targetOffset = Point(mTargetOffset); |
383 | 0 | vsConstants.sortIndexOffset = PrepareDepthBuffer(); |
384 | 0 | vsConstants.debugFrameNumber = mBuilder->GetManager()->GetDebugFrameNumber(); |
385 | 0 |
|
386 | 0 | SharedConstantBuffer* shared = mDevice->GetSharedVSBuffer(); |
387 | 0 | if (!shared->Allocate(&mWorldConstants, vsConstants)) { |
388 | 0 | return; |
389 | 0 | } |
390 | 0 | } |
391 | 0 | |
392 | 0 | // Prepare back-to-front passes. In depth buffer mode, these contain draw |
393 | 0 | // calls that might produce transparent pixels. When using CPU-based occlusion |
394 | 0 | // culling, all draw calls are back-to-front. |
395 | 0 | for (RefPtr<RenderPassMLGPU>& pass : mBackToFront) { |
396 | 0 | pass->PrepareForRendering(); |
397 | 0 | } |
398 | 0 |
|
399 | 0 | // Now, process children. |
400 | 0 | for (const auto& iter : mChildren) { |
401 | 0 | iter->Prepare(); |
402 | 0 | } |
403 | 0 | } |
404 | | |
405 | | void |
406 | | RenderViewMLGPU::ExecuteRendering() |
407 | 0 | { |
408 | 0 | if (!mTarget) { |
409 | 0 | return; |
410 | 0 | } |
411 | 0 | if (!mWorldConstants.IsValid()) { |
412 | 0 | gfxWarning() << "Failed to allocate constant buffer for world transform"; |
413 | 0 | return; |
414 | 0 | } |
415 | 0 |
|
416 | 0 | SetDeviceState(); |
417 | 0 |
|
418 | 0 | // If using the depth buffer, clear it (if needed) and enable writes. |
419 | 0 | if (mUseDepthBuffer) { |
420 | 0 | if (mDepthBufferNeedsClear) { |
421 | 0 | mDevice->ClearDepthBuffer(mTarget); |
422 | 0 | } |
423 | 0 | SetDepthTestMode(MLGDepthTestMode::Write); |
424 | 0 | } |
425 | 0 |
|
426 | 0 | // Opaque items, rendered front-to-back. |
427 | 0 | for (auto iter = mFrontToBack.begin(); iter != mFrontToBack.end(); iter++) { |
428 | 0 | ExecutePass(*iter); |
429 | 0 | } |
430 | 0 |
|
431 | 0 | if (mUseDepthBuffer) { |
432 | 0 | // From now on we might be rendering transparent pixels, so we disable |
433 | 0 | // writing to the z-buffer. |
434 | 0 | SetDepthTestMode(MLGDepthTestMode::ReadOnly); |
435 | 0 | } |
436 | 0 |
|
437 | 0 | // Clear any pixels that are not occluded, and therefore might require |
438 | 0 | // blending. |
439 | 0 | mDevice->DrawClearRegion(mPreClear); |
440 | 0 |
|
441 | 0 | // Render back-to-front passes. |
442 | 0 | for (auto iter = mBackToFront.begin(); iter != mBackToFront.end(); iter++) { |
443 | 0 | ExecutePass(*iter); |
444 | 0 | } |
445 | 0 |
|
446 | 0 | // Make sure the post-clear area has no pixels. |
447 | 0 | if (!mPostClearRegion.IsEmpty()) { |
448 | 0 | mDevice->DrawClearRegion(mPostClear); |
449 | 0 | } |
450 | 0 |
|
451 | 0 | // We repaint the entire invalid region, even if it is partially occluded. |
452 | 0 | // Thus it's safe for us to clear the invalid area here. If we ever switch |
453 | 0 | // to nsIntRegions, we will have to take the difference between the paitned |
454 | 0 | // area and the invalid area. |
455 | 0 | if (mContainer) { |
456 | 0 | mContainer->ClearInvalidRect(); |
457 | 0 | } |
458 | 0 | } |
459 | | |
460 | | void |
461 | | RenderViewMLGPU::ExecutePass(RenderPassMLGPU* aPass) |
462 | 0 | { |
463 | 0 | if (!aPass->IsPrepared()) { |
464 | 0 | return; |
465 | 0 | } |
466 | 0 | |
467 | 0 | // Change the layer buffer if needed. |
468 | 0 | if (aPass->GetLayerBufferIndex() != mCurrentLayerBufferIndex) { |
469 | 0 | mCurrentLayerBufferIndex = aPass->GetLayerBufferIndex(); |
470 | 0 |
|
471 | 0 | ConstantBufferSection section = mBuilder->GetLayerBufferByIndex(mCurrentLayerBufferIndex); |
472 | 0 | mDevice->SetVSConstantBuffer(kLayerBufferSlot, §ion); |
473 | 0 | } |
474 | 0 |
|
475 | 0 | // Change the mask rect buffer if needed. |
476 | 0 | if (aPass->GetMaskRectBufferIndex() && |
477 | 0 | aPass->GetMaskRectBufferIndex().value() != mCurrentMaskRectBufferIndex) |
478 | 0 | { |
479 | 0 | mCurrentMaskRectBufferIndex = aPass->GetMaskRectBufferIndex().value(); |
480 | 0 |
|
481 | 0 | ConstantBufferSection section = mBuilder->GetMaskRectBufferByIndex(mCurrentMaskRectBufferIndex); |
482 | 0 | mDevice->SetVSConstantBuffer(kMaskBufferSlot, §ion); |
483 | 0 | } |
484 | 0 |
|
485 | 0 | aPass->ExecuteRendering(); |
486 | 0 | } |
487 | | |
488 | | void |
489 | | RenderViewMLGPU::SetDeviceState() |
490 | 0 | { |
491 | 0 | // Note: we unbind slot 0 (which is where the render target could have been |
492 | 0 | // bound on a previous frame). Otherwise we trigger D3D11_DEVICE_PSSETSHADERRESOURCES_HAZARD. |
493 | 0 | mDevice->UnsetPSTexture(0); |
494 | 0 | mDevice->SetRenderTarget(mTarget); |
495 | 0 | mDevice->SetViewport(IntRect(IntPoint(0, 0), mTarget->GetSize())); |
496 | 0 | mDevice->SetScissorRect(Some(mInvalidBounds)); |
497 | 0 | mDevice->SetVSConstantBuffer(kWorldConstantBufferSlot, &mWorldConstants); |
498 | 0 | } |
499 | | |
500 | | void |
501 | | RenderViewMLGPU::SetDepthTestMode(MLGDepthTestMode aMode) |
502 | 0 | { |
503 | 0 | mDevice->SetDepthTestMode(aMode); |
504 | 0 | mCurrentDepthMode = aMode; |
505 | 0 | } |
506 | | |
507 | | void |
508 | | RenderViewMLGPU::RestoreDeviceState() |
509 | 0 | { |
510 | 0 | SetDeviceState(); |
511 | 0 | mDevice->SetDepthTestMode(mCurrentDepthMode); |
512 | 0 | mCurrentLayerBufferIndex = kInvalidResourceIndex; |
513 | 0 | mCurrentMaskRectBufferIndex = kInvalidResourceIndex; |
514 | 0 | } |
515 | | |
516 | | int32_t |
517 | | RenderViewMLGPU::PrepareDepthBuffer() |
518 | 0 | { |
519 | 0 | if (!mUseDepthBuffer) { |
520 | 0 | return 0; |
521 | 0 | } |
522 | 0 | |
523 | 0 | // Rather than clear the depth buffer every frame, we offset z-indices each |
524 | 0 | // frame, starting with indices far away from the screen and moving toward |
525 | 0 | // the user each successive frame. This ensures that frames can re-use the |
526 | 0 | // depth buffer but never collide with previously written values. |
527 | 0 | // |
528 | 0 | // Once a frame runs out of sort indices, we finally clear the depth buffer |
529 | 0 | // and start over again. |
530 | 0 | |
531 | 0 | // Note: the lowest sort index (kDepthLimit) is always occluded since it will |
532 | 0 | // resolve to the clear value - kDepthLimit / kDepthLimit == 1.0. |
533 | 0 | // |
534 | 0 | // If we don't have any more indices to allocate, we need to clear the depth |
535 | 0 | // buffer and start fresh. |
536 | 0 | int32_t highestIndex = mTarget->GetLastDepthStart(); |
537 | 0 | if (highestIndex < mNextSortIndex) { |
538 | 0 | mDepthBufferNeedsClear = true; |
539 | 0 | highestIndex = kDepthLimit; |
540 | 0 | } |
541 | 0 |
|
542 | 0 | // We should not have more than kDepthLimit layers to draw. The last known |
543 | 0 | // sort index might appear in the depth buffer and occlude something, so |
544 | 0 | // we subtract 1. This ensures all our indices will compare less than all |
545 | 0 | // old indices. |
546 | 0 | int32_t sortOffset = highestIndex - mNextSortIndex - 1; |
547 | 0 | MOZ_ASSERT(sortOffset >= 0); |
548 | 0 |
|
549 | 0 | mTarget->SetLastDepthStart(sortOffset); |
550 | 0 | return sortOffset; |
551 | 0 | } |
552 | | |
553 | | void |
554 | | RenderViewMLGPU::PrepareClears() |
555 | 0 | { |
556 | 0 | // We don't do any clearing if we're copying from a source backdrop. |
557 | 0 | if (mContainer && mContainer->NeedsSurfaceCopy()) { |
558 | 0 | return; |
559 | 0 | } |
560 | 0 | |
561 | 0 | // Get the list of rects to clear. If using the depth buffer, we don't |
562 | 0 | // care if it's accurate since the GPU will do occlusion testing for us. |
563 | 0 | // If not using the depth buffer, we subtract out the occluded region. |
564 | 0 | LayerIntRegion region = LayerIntRect::FromUnknownRect(mInvalidBounds); |
565 | 0 | if (!mUseDepthBuffer) { |
566 | 0 | // Don't let the clear region become too complicated. |
567 | 0 | region.SubOut(mOccludedRegion); |
568 | 0 | region.SimplifyOutward(kMaxClearViewRects); |
569 | 0 | } |
570 | 0 |
|
571 | 0 | Maybe<int32_t> sortIndex; |
572 | 0 | if (mUseDepthBuffer) { |
573 | 0 | // Note that we use the lowest available sorting index, to ensure that when |
574 | 0 | // using the z-buffer, we don't draw over already-drawn content. |
575 | 0 | sortIndex = Some(mNextSortIndex++); |
576 | 0 | } |
577 | 0 |
|
578 | 0 | nsTArray<IntRect> rects = ToRectArray(region); |
579 | 0 | mDevice->PrepareClearRegion(&mPreClear, std::move(rects), sortIndex); |
580 | 0 |
|
581 | 0 | if (!mPostClearRegion.IsEmpty()) { |
582 | 0 | // Prepare the final clear as well. Note that we always do this clear at the |
583 | 0 | // very end, even when the depth buffer is enabled, so we don't bother |
584 | 0 | // setting a useful sorting index. If and when we try to ship the depth |
585 | 0 | // buffer, we would execute this clear earlier in the pipeline and give it |
586 | 0 | // the closest possible z-ordering to the screen. |
587 | 0 | nsTArray<IntRect> rects = ToRectArray(mPostClearRegion); |
588 | 0 | mDevice->PrepareClearRegion(&mPostClear, std::move(rects), Nothing()); |
589 | 0 | } |
590 | 0 | } |
591 | | |
592 | | } // namespace layers |
593 | | } // namespace mozilla |