/src/mozilla-central/gfx/layers/ipc/ISurfaceAllocator.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "ISurfaceAllocator.h" |
8 | | |
9 | | #include "gfxPrefs.h" |
10 | | #include "mozilla/layers/ImageBridgeParent.h" // for ImageBridgeParent |
11 | | #include "mozilla/layers/TextureHost.h" // for TextureHost |
12 | | #include "mozilla/layers/TextureForwarder.h" |
13 | | #include "mozilla/layers/CompositableForwarder.h" |
14 | | |
15 | | namespace mozilla { |
16 | | namespace layers { |
17 | | |
18 | | NS_IMPL_ISUPPORTS(GfxMemoryImageReporter, nsIMemoryReporter) |
19 | | |
20 | | mozilla::Atomic<ptrdiff_t> GfxMemoryImageReporter::sAmount(0); |
21 | | |
22 | | /* static */ uint32_t |
23 | 0 | CompositableForwarder::GetMaxFileDescriptorsPerMessage() { |
24 | 0 | #if defined(OS_POSIX) |
25 | 0 | static const uint32_t kMaxFileDescriptors = FileDescriptorSet::MAX_DESCRIPTORS_PER_MESSAGE; |
26 | | #else |
27 | | // default number that works everywhere else |
28 | | static const uint32_t kMaxFileDescriptors = 250; |
29 | | #endif |
30 | | return kMaxFileDescriptors; |
31 | 0 | } |
32 | | |
33 | | mozilla::ipc::SharedMemory::SharedMemoryType OptimalShmemType() |
34 | 0 | { |
35 | 0 | return ipc::SharedMemory::SharedMemoryType::TYPE_BASIC; |
36 | 0 | } |
37 | | |
38 | | void |
39 | | HostIPCAllocator::SendPendingAsyncMessages() |
40 | 0 | { |
41 | 0 | if (mPendingAsyncMessage.empty()) { |
42 | 0 | return; |
43 | 0 | } |
44 | 0 | |
45 | 0 | // Some type of AsyncParentMessageData message could have |
46 | 0 | // one file descriptor (e.g. OpDeliverFence). |
47 | 0 | // A number of file descriptors per gecko ipc message have a limitation |
48 | 0 | // on OS_POSIX (MACOSX or LINUX). |
49 | 0 | #if defined(OS_POSIX) |
50 | 0 | static const uint32_t kMaxMessageNumber = FileDescriptorSet::MAX_DESCRIPTORS_PER_MESSAGE; |
51 | | #else |
52 | | // default number that works everywhere else |
53 | | static const uint32_t kMaxMessageNumber = 250; |
54 | | #endif |
55 | |
|
56 | 0 | InfallibleTArray<AsyncParentMessageData> messages; |
57 | 0 | messages.SetCapacity(mPendingAsyncMessage.size()); |
58 | 0 | for (size_t i = 0; i < mPendingAsyncMessage.size(); i++) { |
59 | 0 | messages.AppendElement(mPendingAsyncMessage[i]); |
60 | 0 | // Limit maximum number of messages. |
61 | 0 | if (messages.Length() >= kMaxMessageNumber) { |
62 | 0 | SendAsyncMessage(messages); |
63 | 0 | // Initialize Messages. |
64 | 0 | messages.Clear(); |
65 | 0 | } |
66 | 0 | } |
67 | 0 |
|
68 | 0 | if (messages.Length() > 0) { |
69 | 0 | SendAsyncMessage(messages); |
70 | 0 | } |
71 | 0 | mPendingAsyncMessage.clear(); |
72 | 0 | } |
73 | | |
74 | | // XXX - We should actually figure out the minimum shmem allocation size on |
75 | | // a certain platform and use that. |
76 | | const uint32_t sShmemPageSize = 4096; |
77 | | |
78 | | #ifdef DEBUG |
79 | | const uint32_t sSupportedBlockSize = 4; |
80 | | #endif |
81 | | |
82 | | FixedSizeSmallShmemSectionAllocator::FixedSizeSmallShmemSectionAllocator(LayersIPCChannel* aShmProvider) |
83 | | : mShmProvider(aShmProvider) |
84 | 0 | { |
85 | 0 | MOZ_ASSERT(mShmProvider); |
86 | 0 | } |
87 | | |
88 | | FixedSizeSmallShmemSectionAllocator::~FixedSizeSmallShmemSectionAllocator() |
89 | 0 | { |
90 | 0 | ShrinkShmemSectionHeap(); |
91 | 0 | } |
92 | | |
93 | | bool |
94 | | FixedSizeSmallShmemSectionAllocator::IPCOpen() const |
95 | 0 | { |
96 | 0 | return mShmProvider->IPCOpen(); |
97 | 0 | } |
98 | | |
99 | | bool |
100 | | FixedSizeSmallShmemSectionAllocator::AllocShmemSection(uint32_t aSize, ShmemSection* aShmemSection) |
101 | 0 | { |
102 | 0 | // For now we only support sizes of 4. If we want to support different sizes |
103 | 0 | // some more complicated bookkeeping should be added. |
104 | 0 | MOZ_ASSERT(aSize == sSupportedBlockSize); |
105 | 0 | MOZ_ASSERT(aShmemSection); |
106 | 0 |
|
107 | 0 | if (!IPCOpen()) { |
108 | 0 | gfxCriticalError() << "Attempt to allocate a ShmemSection after shutdown."; |
109 | 0 | return false; |
110 | 0 | } |
111 | 0 |
|
112 | 0 | uint32_t allocationSize = (aSize + sizeof(ShmemSectionHeapAllocation)); |
113 | 0 |
|
114 | 0 | for (size_t i = 0; i < mUsedShmems.size(); i++) { |
115 | 0 | ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>(); |
116 | 0 | if ((header->mAllocatedBlocks + 1) * allocationSize + sizeof(ShmemSectionHeapHeader) < sShmemPageSize) { |
117 | 0 | aShmemSection->shmem() = mUsedShmems[i]; |
118 | 0 | MOZ_ASSERT(mUsedShmems[i].IsWritable()); |
119 | 0 | break; |
120 | 0 | } |
121 | 0 | } |
122 | 0 |
|
123 | 0 | if (!aShmemSection->shmem().IsWritable()) { |
124 | 0 | ipc::Shmem tmp; |
125 | 0 | if (!mShmProvider->AllocUnsafeShmem(sShmemPageSize, OptimalShmemType(), &tmp)) { |
126 | 0 | return false; |
127 | 0 | } |
128 | 0 | |
129 | 0 | ShmemSectionHeapHeader* header = tmp.get<ShmemSectionHeapHeader>(); |
130 | 0 | header->mTotalBlocks = 0; |
131 | 0 | header->mAllocatedBlocks = 0; |
132 | 0 |
|
133 | 0 | mUsedShmems.push_back(tmp); |
134 | 0 | aShmemSection->shmem() = tmp; |
135 | 0 | } |
136 | 0 |
|
137 | 0 | MOZ_ASSERT(aShmemSection->shmem().IsWritable()); |
138 | 0 |
|
139 | 0 | ShmemSectionHeapHeader* header = aShmemSection->shmem().get<ShmemSectionHeapHeader>(); |
140 | 0 | uint8_t* heap = aShmemSection->shmem().get<uint8_t>() + sizeof(ShmemSectionHeapHeader); |
141 | 0 |
|
142 | 0 | ShmemSectionHeapAllocation* allocHeader = nullptr; |
143 | 0 |
|
144 | 0 | if (header->mTotalBlocks > header->mAllocatedBlocks) { |
145 | 0 | // Search for the first available block. |
146 | 0 | for (size_t i = 0; i < header->mTotalBlocks; i++) { |
147 | 0 | allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap); |
148 | 0 |
|
149 | 0 | if (allocHeader->mStatus == STATUS_FREED) { |
150 | 0 | break; |
151 | 0 | } |
152 | 0 | heap += allocationSize; |
153 | 0 | } |
154 | 0 | MOZ_ASSERT(allocHeader && allocHeader->mStatus == STATUS_FREED); |
155 | 0 | MOZ_ASSERT(allocHeader->mSize == sSupportedBlockSize); |
156 | 0 | } else { |
157 | 0 | heap += header->mTotalBlocks * allocationSize; |
158 | 0 |
|
159 | 0 | header->mTotalBlocks++; |
160 | 0 | allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap); |
161 | 0 | allocHeader->mSize = aSize; |
162 | 0 | } |
163 | 0 |
|
164 | 0 | MOZ_ASSERT(allocHeader); |
165 | 0 | header->mAllocatedBlocks++; |
166 | 0 | allocHeader->mStatus = STATUS_ALLOCATED; |
167 | 0 |
|
168 | 0 | aShmemSection->size() = aSize; |
169 | 0 | aShmemSection->offset() = (heap + sizeof(ShmemSectionHeapAllocation)) - aShmemSection->shmem().get<uint8_t>(); |
170 | 0 | ShrinkShmemSectionHeap(); |
171 | 0 | return true; |
172 | 0 | } |
173 | | |
174 | | void |
175 | | FixedSizeSmallShmemSectionAllocator::FreeShmemSection(mozilla::layers::ShmemSection& aShmemSection) |
176 | 0 | { |
177 | 0 | MOZ_ASSERT(aShmemSection.size() == sSupportedBlockSize); |
178 | 0 | MOZ_ASSERT(aShmemSection.offset() < sShmemPageSize - sSupportedBlockSize); |
179 | 0 |
|
180 | 0 | if (!aShmemSection.shmem().IsWritable()) { |
181 | 0 | return; |
182 | 0 | } |
183 | 0 | |
184 | 0 | ShmemSectionHeapAllocation* allocHeader = |
185 | 0 | reinterpret_cast<ShmemSectionHeapAllocation*>(aShmemSection.shmem().get<char>() + |
186 | 0 | aShmemSection.offset() - |
187 | 0 | sizeof(ShmemSectionHeapAllocation)); |
188 | 0 |
|
189 | 0 | MOZ_ASSERT(allocHeader->mSize == aShmemSection.size()); |
190 | 0 |
|
191 | 0 | DebugOnly<bool> success = allocHeader->mStatus.compareExchange(STATUS_ALLOCATED, STATUS_FREED); |
192 | 0 | // If this fails something really weird is going on. |
193 | 0 | MOZ_ASSERT(success); |
194 | 0 |
|
195 | 0 | ShmemSectionHeapHeader* header = aShmemSection.shmem().get<ShmemSectionHeapHeader>(); |
196 | 0 | header->mAllocatedBlocks--; |
197 | 0 | } |
198 | | |
199 | | void |
200 | | FixedSizeSmallShmemSectionAllocator::DeallocShmemSection(mozilla::layers::ShmemSection& aShmemSection) |
201 | 0 | { |
202 | 0 | if (!IPCOpen()) { |
203 | 0 | gfxCriticalNote << "Attempt to dealloc a ShmemSections after shutdown."; |
204 | 0 | return; |
205 | 0 | } |
206 | 0 |
|
207 | 0 | FreeShmemSection(aShmemSection); |
208 | 0 | ShrinkShmemSectionHeap(); |
209 | 0 | } |
210 | | |
211 | | |
212 | | void |
213 | | FixedSizeSmallShmemSectionAllocator::ShrinkShmemSectionHeap() |
214 | 0 | { |
215 | 0 | if (!IPCOpen()) { |
216 | 0 | mUsedShmems.clear(); |
217 | 0 | return; |
218 | 0 | } |
219 | 0 | |
220 | 0 | // The loop will terminate as we either increase i, or decrease size |
221 | 0 | // every time through. |
222 | 0 | size_t i = 0; |
223 | 0 | while (i < mUsedShmems.size()) { |
224 | 0 | ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>(); |
225 | 0 | if (header->mAllocatedBlocks == 0) { |
226 | 0 | mShmProvider->DeallocShmem(mUsedShmems[i]); |
227 | 0 | // We don't particularly care about order, move the last one in the array |
228 | 0 | // to this position. |
229 | 0 | if (i < mUsedShmems.size() - 1) { |
230 | 0 | mUsedShmems[i] = mUsedShmems[mUsedShmems.size() - 1]; |
231 | 0 | } |
232 | 0 | mUsedShmems.pop_back(); |
233 | 0 | } else { |
234 | 0 | i++; |
235 | 0 | } |
236 | 0 | } |
237 | 0 | } |
238 | | |
239 | | } // namespace layers |
240 | | } // namespace mozilla |