Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_MARKING_H_
6 : #define V8_HEAP_MARKING_H_
7 :
8 : #include "src/base/atomic-utils.h"
9 : #include "src/utils.h"
10 :
11 : namespace v8 {
12 : namespace internal {
13 :
14 : class MarkBit {
15 : public:
16 : using CellType = uint32_t;
17 : STATIC_ASSERT(sizeof(CellType) == sizeof(base::Atomic32));
18 :
19 : inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
20 :
21 : #ifdef DEBUG
22 : bool operator==(const MarkBit& other) {
23 : return cell_ == other.cell_ && mask_ == other.mask_;
24 : }
25 : #endif
26 :
27 : private:
28 1315129 : inline MarkBit Next() {
29 689050160 : CellType new_mask = mask_ << 1;
30 689050172 : if (new_mask == 0) {
31 21351676 : return MarkBit(cell_ + 1, 1);
32 : } else {
33 1178903 : return MarkBit(cell_, new_mask);
34 : }
35 : }
36 :
37 : // The function returns true if it succeeded to
38 : // transition the bit from 0 to 1.
39 : template <AccessMode mode = AccessMode::NON_ATOMIC>
40 : inline bool Set();
41 :
42 : template <AccessMode mode = AccessMode::NON_ATOMIC>
43 : inline bool Get();
44 :
45 : // The function returns true if it succeeded to
46 : // transition the bit from 1 to 0.
47 : template <AccessMode mode = AccessMode::NON_ATOMIC>
48 : inline bool Clear();
49 :
50 : CellType* cell_;
51 : CellType mask_;
52 :
53 : friend class IncrementalMarking;
54 : friend class ConcurrentMarkingMarkbits;
55 : friend class Marking;
56 : };
57 :
58 : template <>
59 : inline bool MarkBit::Set<AccessMode::NON_ATOMIC>() {
60 341476 : CellType old_value = *cell_;
61 341488 : *cell_ = old_value | mask_;
62 341464 : return (old_value & mask_) == 0;
63 : }
64 :
65 : template <>
66 4984378 : inline bool MarkBit::Set<AccessMode::ATOMIC>() {
67 5645452823 : return base::AsAtomic32::SetBits(cell_, mask_, mask_);
68 : }
69 :
70 : template <>
71 : inline bool MarkBit::Get<AccessMode::NON_ATOMIC>() {
72 209295069 : return (*cell_ & mask_) != 0;
73 : }
74 :
75 : template <>
76 5753567 : inline bool MarkBit::Get<AccessMode::ATOMIC>() {
77 838457128 : return (base::AsAtomic32::Acquire_Load(cell_) & mask_) != 0;
78 : }
79 :
80 : template <>
81 : inline bool MarkBit::Clear<AccessMode::NON_ATOMIC>() {
82 49444 : CellType old_value = *cell_;
83 98888 : *cell_ = old_value & ~mask_;
84 : return (old_value & mask_) == mask_;
85 : }
86 :
87 : template <>
88 : inline bool MarkBit::Clear<AccessMode::ATOMIC>() {
89 : return base::AsAtomic32::SetBits(cell_, 0u, mask_);
90 : }
91 :
92 : // Bitmap is a sequence of cells each containing fixed number of bits.
93 : class V8_EXPORT_PRIVATE Bitmap {
94 : public:
95 : static const uint32_t kBitsPerCell = 32;
96 : static const uint32_t kBitsPerCellLog2 = 5;
97 : static const uint32_t kBitIndexMask = kBitsPerCell - 1;
98 : static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
99 : static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
100 :
101 : static const size_t kLength = (1 << kPageSizeBits) >> (kTaggedSizeLog2);
102 :
103 : static const size_t kSize = (1 << kPageSizeBits) >>
104 : (kTaggedSizeLog2 + kBitsPerByteLog2);
105 :
106 : static int CellsForLength(int length) {
107 : return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
108 : }
109 :
110 : int CellsCount() { return CellsForLength(kLength); }
111 :
112 : V8_INLINE static uint32_t IndexToCell(uint32_t index) {
113 2240808 : return index >> kBitsPerCellLog2;
114 : }
115 :
116 : V8_INLINE static uint32_t IndexInCell(uint32_t index) {
117 6568830646 : return index & kBitIndexMask;
118 : }
119 :
120 : // Retrieves the cell containing the provided markbit index.
121 : V8_INLINE static uint32_t CellAlignIndex(uint32_t index) {
122 1120404 : return index & ~kBitIndexMask;
123 : }
124 :
125 : V8_INLINE MarkBit::CellType* cells() {
126 : return reinterpret_cast<MarkBit::CellType*>(this);
127 : }
128 :
129 : V8_INLINE static Bitmap* FromAddress(Address addr) {
130 : return reinterpret_cast<Bitmap*>(addr);
131 : }
132 :
133 10758143 : inline MarkBit MarkBitFromIndex(uint32_t index) {
134 5994228232 : MarkBit::CellType mask = 1u << IndexInCell(index);
135 5998792868 : MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
136 10758143 : return MarkBit(cell, mask);
137 : }
138 : };
139 :
140 : template <AccessMode mode>
141 : class ConcurrentBitmap : public Bitmap {
142 : public:
143 : void Clear();
144 :
145 : void MarkAllBits();
146 :
147 : // Clears bits in the given cell. The mask specifies bits to clear: if a
148 : // bit is set in the mask then the corresponding bit is cleared in the cell.
149 : void ClearBitsInCell(uint32_t cell_index, uint32_t mask);
150 :
151 : // Sets bits in the given cell. The mask specifies bits to set: if a
152 : // bit is set in the mask then the corresponding bit is set in the cell.
153 : void SetBitsInCell(uint32_t cell_index, uint32_t mask);
154 :
155 : // Sets all bits in the range [start_index, end_index). If the access is
156 : // atomic, the cells at the boundary of the range are updated with atomic
157 : // compare and swap operation. The inner cells are updated with relaxed write.
158 : void SetRange(uint32_t start_index, uint32_t end_index);
159 :
160 : // Clears all bits in the range [start_index, end_index). If the access is
161 : // atomic, the cells at the boundary of the range are updated with atomic
162 : // compare and swap operation. The inner cells are updated with relaxed write.
163 : void ClearRange(uint32_t start_index, uint32_t end_index);
164 :
165 : // The following methods are *not* safe to use in a concurrent context so they
166 : // are not implemented for `AccessMode::ATOMIC`.
167 :
168 : // Returns true if all bits in the range [start_index, end_index) are set.
169 : bool AllBitsSetInRange(uint32_t start_index, uint32_t end_index);
170 :
171 : // Returns true if all bits in the range [start_index, end_index) are cleared.
172 : bool AllBitsClearInRange(uint32_t start_index, uint32_t end_index);
173 :
174 : void Print();
175 :
176 : bool IsClean();
177 :
178 : private:
179 : // Clear all bits in the cell range [start_cell_index, end_cell_index). If the
180 : // access is atomic then *still* use a relaxed memory ordering.
181 : void ClearCellRangeRelaxed(uint32_t start_cell_index,
182 : uint32_t end_cell_index);
183 :
184 : // Set all bits in the cell range [start_cell_index, end_cell_index). If the
185 : // access is atomic then *still* use a relaxed memory ordering.
186 : void SetCellRangeRelaxed(uint32_t start_cell_index, uint32_t end_cell_index);
187 : };
188 :
189 : template <>
190 : inline void ConcurrentBitmap<AccessMode::ATOMIC>::ClearCellRangeRelaxed(
191 : uint32_t start_cell_index, uint32_t end_cell_index) {
192 : base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
193 47280818 : for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
194 23610824 : base::Relaxed_Store(cell_base + i, 0);
195 : }
196 : }
197 :
198 : template <>
199 : inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::ClearCellRangeRelaxed(
200 : uint32_t start_cell_index, uint32_t end_cell_index) {
201 3050133270 : for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
202 1524321756 : cells()[i] = 0;
203 : }
204 : }
205 :
206 : template <>
207 : inline void ConcurrentBitmap<AccessMode::ATOMIC>::SetCellRangeRelaxed(
208 : uint32_t start_cell_index, uint32_t end_cell_index) {
209 : base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
210 82981406 : for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
211 41420581 : base::Relaxed_Store(cell_base + i, 0xffffffff);
212 : }
213 : }
214 :
215 : template <>
216 : inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::SetCellRangeRelaxed(
217 : uint32_t start_cell_index, uint32_t end_cell_index) {
218 127945712 : for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
219 63941633 : cells()[i] = 0xffffffff;
220 : }
221 : }
222 :
223 : template <AccessMode mode>
224 1 : inline void ConcurrentBitmap<mode>::Clear() {
225 : ClearCellRangeRelaxed(0, CellsCount());
226 : if (mode == AccessMode::ATOMIC) {
227 : // This fence prevents re-ordering of publishing stores with the mark-bit
228 : // setting stores.
229 : base::SeqCst_MemoryFence();
230 : }
231 1 : }
232 :
233 : template <AccessMode mode>
234 1 : inline void ConcurrentBitmap<mode>::MarkAllBits() {
235 : SetCellRangeRelaxed(0, CellsCount());
236 : if (mode == AccessMode::ATOMIC) {
237 : // This fence prevents re-ordering of publishing stores with the mark-bit
238 : // setting stores.
239 : base::SeqCst_MemoryFence();
240 : }
241 1 : }
242 :
243 : template <>
244 : inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::SetBitsInCell(
245 : uint32_t cell_index, uint32_t mask) {
246 7 : cells()[cell_index] |= mask;
247 : }
248 :
249 : template <>
250 : inline void ConcurrentBitmap<AccessMode::ATOMIC>::SetBitsInCell(
251 : uint32_t cell_index, uint32_t mask) {
252 300046 : base::AsAtomic32::SetBits(cells() + cell_index, mask, mask);
253 : }
254 :
255 : template <>
256 : inline void ConcurrentBitmap<AccessMode::NON_ATOMIC>::ClearBitsInCell(
257 : uint32_t cell_index, uint32_t mask) {
258 38 : cells()[cell_index] &= ~mask;
259 : }
260 :
261 : template <>
262 : inline void ConcurrentBitmap<AccessMode::ATOMIC>::ClearBitsInCell(
263 : uint32_t cell_index, uint32_t mask) {
264 176010 : base::AsAtomic32::SetBits(cells() + cell_index, 0u, mask);
265 : }
266 :
267 : template <AccessMode mode>
268 159806 : void ConcurrentBitmap<mode>::SetRange(uint32_t start_index,
269 : uint32_t end_index) {
270 159806 : if (start_index >= end_index) return;
271 159806 : end_index--;
272 :
273 159806 : unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
274 159806 : MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
275 :
276 159806 : unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
277 159806 : MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
278 :
279 159806 : if (start_cell_index != end_cell_index) {
280 : // Firstly, fill all bits from the start address to the end of the first
281 : // cell with 1s.
282 140245 : SetBitsInCell(start_cell_index, ~(start_index_mask - 1));
283 : // Then fill all in between cells with 1s.
284 140245 : SetCellRangeRelaxed(start_cell_index + 1, end_cell_index);
285 : // Finally, fill all bits until the end address in the last cell with 1s.
286 140245 : SetBitsInCell(end_cell_index, end_index_mask | (end_index_mask - 1));
287 : } else {
288 19561 : SetBitsInCell(start_cell_index,
289 : end_index_mask | (end_index_mask - start_index_mask));
290 : }
291 : if (mode == AccessMode::ATOMIC) {
292 : // This fence prevents re-ordering of publishing stores with the mark-bit
293 : // setting stores.
294 : base::SeqCst_MemoryFence();
295 : }
296 : }
297 :
298 : template <AccessMode mode>
299 116869 : void ConcurrentBitmap<mode>::ClearRange(uint32_t start_index,
300 : uint32_t end_index) {
301 116869 : if (start_index >= end_index) return;
302 116859 : end_index--;
303 :
304 116859 : unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
305 116859 : MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
306 :
307 116859 : unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
308 116859 : MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
309 :
310 116859 : if (start_cell_index != end_cell_index) {
311 : // Firstly, fill all bits from the start address to the end of the first
312 : // cell with 0s.
313 59186 : ClearBitsInCell(start_cell_index, ~(start_index_mask - 1));
314 : // Then fill all in between cells with 0s.
315 59186 : ClearCellRangeRelaxed(start_cell_index + 1, end_cell_index);
316 : // Finally, set all bits until the end address in the last cell with 0s.
317 59186 : ClearBitsInCell(end_cell_index, end_index_mask | (end_index_mask - 1));
318 : } else {
319 57673 : ClearBitsInCell(start_cell_index,
320 : end_index_mask | (end_index_mask - start_index_mask));
321 : }
322 : if (mode == AccessMode::ATOMIC) {
323 : // This fence prevents re-ordering of publishing stores with the mark-bit
324 : // clearing stores.
325 : base::SeqCst_MemoryFence();
326 : }
327 : }
328 :
329 : template <>
330 : V8_EXPORT_PRIVATE bool
331 : ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsSetInRange(
332 : uint32_t start_index, uint32_t end_index);
333 :
334 : template <>
335 : V8_EXPORT_PRIVATE bool
336 : ConcurrentBitmap<AccessMode::NON_ATOMIC>::AllBitsClearInRange(
337 : uint32_t start_index, uint32_t end_index);
338 :
339 : template <>
340 : void ConcurrentBitmap<AccessMode::NON_ATOMIC>::Print();
341 :
342 : template <>
343 : V8_EXPORT_PRIVATE bool ConcurrentBitmap<AccessMode::NON_ATOMIC>::IsClean();
344 :
345 : class Marking : public AllStatic {
346 : public:
347 : // TODO(hpayer): The current mark bit operations use as default NON_ATOMIC
348 : // mode for access. We should remove the default value or switch it with
349 : // ATOMIC as soon we add concurrency.
350 :
351 : // Impossible markbits: 01
352 : static const char* kImpossibleBitPattern;
353 : template <AccessMode mode = AccessMode::NON_ATOMIC>
354 : V8_INLINE static bool IsImpossible(MarkBit mark_bit) {
355 : if (mode == AccessMode::NON_ATOMIC) {
356 76 : return !mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
357 : }
358 : // If we are in concurrent mode we can only tell if an object has the
359 : // impossible bit pattern if we read the first bit again after reading
360 : // the first and the second bit. If the first bit is till zero and the
361 : // second bit is one then the object has the impossible bit pattern.
362 : bool is_impossible = !mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
363 : if (is_impossible) {
364 : return !mark_bit.Get<mode>();
365 : }
366 : return false;
367 : }
368 :
369 : // Black markbits: 11
370 : static const char* kBlackBitPattern;
371 : template <AccessMode mode = AccessMode::NON_ATOMIC>
372 : V8_INLINE static bool IsBlack(MarkBit mark_bit) {
373 87480094 : return mark_bit.Get<mode>() && mark_bit.Next().Get<mode>();
374 : }
375 :
376 : // White markbits: 00 - this is required by the mark bit clearer.
377 : static const char* kWhiteBitPattern;
378 : template <AccessMode mode = AccessMode::NON_ATOMIC>
379 : V8_INLINE static bool IsWhite(MarkBit mark_bit) {
380 : DCHECK(!IsImpossible<mode>(mark_bit));
381 10715056 : return !mark_bit.Get<mode>();
382 : }
383 :
384 : // Grey markbits: 10
385 : static const char* kGreyBitPattern;
386 : template <AccessMode mode = AccessMode::NON_ATOMIC>
387 : V8_INLINE static bool IsGrey(MarkBit mark_bit) {
388 5578513 : return mark_bit.Get<mode>() && !mark_bit.Next().Get<mode>();
389 : }
390 :
391 : // IsBlackOrGrey assumes that the first bit is set for black or grey
392 : // objects.
393 : template <AccessMode mode = AccessMode::NON_ATOMIC>
394 : V8_INLINE static bool IsBlackOrGrey(MarkBit mark_bit) {
395 1894362 : return mark_bit.Get<mode>();
396 : }
397 :
398 : template <AccessMode mode = AccessMode::NON_ATOMIC>
399 : V8_INLINE static void MarkWhite(MarkBit markbit) {
400 : STATIC_ASSERT(mode == AccessMode::NON_ATOMIC);
401 : markbit.Clear<mode>();
402 : markbit.Next().Clear<mode>();
403 : }
404 :
405 : // Warning: this method is not safe in general in concurrent scenarios.
406 : // If you know that nobody else will change the bits on the given location
407 : // then you may use it.
408 : template <AccessMode mode = AccessMode::NON_ATOMIC>
409 : V8_INLINE static void MarkBlack(MarkBit markbit) {
410 : markbit.Set<mode>();
411 : markbit.Next().Set<mode>();
412 : }
413 :
414 : template <AccessMode mode = AccessMode::NON_ATOMIC>
415 : V8_INLINE static bool WhiteToGrey(MarkBit markbit) {
416 4548074 : return markbit.Set<mode>();
417 : }
418 :
419 : template <AccessMode mode = AccessMode::NON_ATOMIC>
420 : V8_INLINE static bool WhiteToBlack(MarkBit markbit) {
421 11 : return markbit.Set<mode>() && markbit.Next().Set<mode>();
422 : }
423 :
424 : template <AccessMode mode = AccessMode::NON_ATOMIC>
425 : V8_INLINE static bool GreyToBlack(MarkBit markbit) {
426 1308111574 : return markbit.Get<mode>() && markbit.Next().Set<mode>();
427 : }
428 :
429 : enum ObjectColor {
430 : BLACK_OBJECT,
431 : WHITE_OBJECT,
432 : GREY_OBJECT,
433 : IMPOSSIBLE_COLOR
434 : };
435 :
436 : static const char* ColorName(ObjectColor color) {
437 : switch (color) {
438 : case BLACK_OBJECT:
439 : return "black";
440 : case WHITE_OBJECT:
441 : return "white";
442 : case GREY_OBJECT:
443 : return "grey";
444 : case IMPOSSIBLE_COLOR:
445 : return "impossible";
446 : }
447 : return "error";
448 : }
449 :
450 0 : static ObjectColor Color(MarkBit mark_bit) {
451 0 : if (IsBlack(mark_bit)) return BLACK_OBJECT;
452 0 : if (IsWhite(mark_bit)) return WHITE_OBJECT;
453 0 : if (IsGrey(mark_bit)) return GREY_OBJECT;
454 0 : UNREACHABLE();
455 : }
456 :
457 : private:
458 : DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
459 : };
460 :
461 : } // namespace internal
462 : } // namespace v8
463 :
464 : #endif // V8_HEAP_MARKING_H_
|