Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_HEAP_HEAP_INL_H_
6 : #define V8_HEAP_HEAP_INL_H_
7 :
8 : #include <cmath>
9 :
10 : // Clients of this interface shouldn't depend on lots of heap internals.
11 : // Do not include anything from src/heap other than src/heap/heap.h and its
12 : // write barrier here!
13 : #include "src/heap/heap-write-barrier.h"
14 : #include "src/heap/heap.h"
15 :
16 : #include "src/base/atomic-utils.h"
17 : #include "src/base/platform/platform.h"
18 : #include "src/feedback-vector.h"
19 :
20 : // TODO(mstarzinger): There is one more include to remove in order to no longer
21 : // leak heap internals to users of this interface!
22 : #include "src/heap/spaces-inl.h"
23 : #include "src/isolate-data.h"
24 : #include "src/isolate.h"
25 : #include "src/msan.h"
26 : #include "src/objects-inl.h"
27 : #include "src/objects/allocation-site-inl.h"
28 : #include "src/objects/api-callbacks-inl.h"
29 : #include "src/objects/cell-inl.h"
30 : #include "src/objects/descriptor-array.h"
31 : #include "src/objects/feedback-cell-inl.h"
32 : #include "src/objects/literal-objects-inl.h"
33 : #include "src/objects/oddball.h"
34 : #include "src/objects/property-cell.h"
35 : #include "src/objects/scope-info.h"
36 : #include "src/objects/script-inl.h"
37 : #include "src/objects/slots-inl.h"
38 : #include "src/objects/struct-inl.h"
39 : #include "src/profiler/heap-profiler.h"
40 : #include "src/string-hasher.h"
41 : #include "src/zone/zone-list-inl.h"
42 :
43 : namespace v8 {
44 : namespace internal {
45 :
46 : AllocationSpace AllocationResult::RetrySpace() {
47 : DCHECK(IsRetry());
48 22911 : return static_cast<AllocationSpace>(Smi::ToInt(object_));
49 : }
50 :
51 : HeapObject AllocationResult::ToObjectChecked() {
52 353 : CHECK(!IsRetry());
53 : return HeapObject::cast(object_);
54 : }
55 :
56 : Isolate* Heap::isolate() {
57 : return reinterpret_cast<Isolate*>(
58 1580764201 : reinterpret_cast<intptr_t>(this) -
59 793018578 : reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
60 : }
61 :
62 : int64_t Heap::external_memory() {
63 0 : return isolate()->isolate_data()->external_memory_;
64 : }
65 :
66 : void Heap::update_external_memory(int64_t delta) {
67 5303 : isolate()->isolate_data()->external_memory_ += delta;
68 : }
69 :
70 : void Heap::update_external_memory_concurrently_freed(intptr_t freed) {
71 : external_memory_concurrently_freed_ += freed;
72 : }
73 :
74 : void Heap::account_external_memory_concurrently_freed() {
75 : isolate()->isolate_data()->external_memory_ -=
76 163790 : external_memory_concurrently_freed_;
77 : external_memory_concurrently_freed_ = 0;
78 : }
79 :
80 : RootsTable& Heap::roots_table() { return isolate()->roots_table(); }
81 :
82 : #define ROOT_ACCESSOR(Type, name, CamelName) \
83 : Type Heap::name() { \
84 : return Type::cast(Object(roots_table()[RootIndex::k##CamelName])); \
85 : }
86 380737151 : MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
87 : #undef ROOT_ACCESSOR
88 :
89 : #define ROOT_ACCESSOR(type, name, CamelName) \
90 : void Heap::set_##name(type value) { \
91 : /* The deserializer makes use of the fact that these common roots are */ \
92 : /* never in new space and never on a page that is being compacted. */ \
93 : DCHECK_IMPLIES(deserialization_complete(), \
94 : !RootsTable::IsImmortalImmovable(RootIndex::k##CamelName)); \
95 : DCHECK_IMPLIES(RootsTable::IsImmortalImmovable(RootIndex::k##CamelName), \
96 : IsImmovable(HeapObject::cast(value))); \
97 : roots_table()[RootIndex::k##CamelName] = value->ptr(); \
98 : }
99 20620917 : ROOT_LIST(ROOT_ACCESSOR)
100 : #undef ROOT_ACCESSOR
101 :
102 : void Heap::SetRootMaterializedObjects(FixedArray objects) {
103 48 : roots_table()[RootIndex::kMaterializedObjects] = objects->ptr();
104 : }
105 :
106 : void Heap::SetRootScriptList(Object value) {
107 209 : roots_table()[RootIndex::kScriptList] = value->ptr();
108 : }
109 :
110 : void Heap::SetRootStringTable(StringTable value) {
111 14753266 : roots_table()[RootIndex::kStringTable] = value->ptr();
112 : }
113 :
114 : void Heap::SetRootNoScriptSharedFunctionInfos(Object value) {
115 0 : roots_table()[RootIndex::kNoScriptSharedFunctionInfos] = value->ptr();
116 : }
117 :
118 : void Heap::SetMessageListeners(TemplateList value) {
119 31436 : roots_table()[RootIndex::kMessageListeners] = value->ptr();
120 : }
121 :
122 : void Heap::SetPendingOptimizeForTestBytecode(Object hash_table) {
123 : DCHECK(hash_table->IsObjectHashTable() || hash_table->IsUndefined(isolate()));
124 81717 : roots_table()[RootIndex::kPendingOptimizeForTestBytecode] = hash_table->ptr();
125 : }
126 :
127 : PagedSpace* Heap::paged_space(int idx) {
128 : DCHECK_NE(idx, LO_SPACE);
129 : DCHECK_NE(idx, NEW_SPACE);
130 : DCHECK_NE(idx, CODE_LO_SPACE);
131 : DCHECK_NE(idx, NEW_LO_SPACE);
132 3589043 : return static_cast<PagedSpace*>(space_[idx]);
133 : }
134 :
135 7205360 : Space* Heap::space(int idx) { return space_[idx]; }
136 :
137 : Address* Heap::NewSpaceAllocationTopAddress() {
138 : return new_space_->allocation_top_address();
139 : }
140 :
141 : Address* Heap::NewSpaceAllocationLimitAddress() {
142 : return new_space_->allocation_limit_address();
143 : }
144 :
145 : Address* Heap::OldSpaceAllocationTopAddress() {
146 : return old_space_->allocation_top_address();
147 : }
148 :
149 : Address* Heap::OldSpaceAllocationLimitAddress() {
150 : return old_space_->allocation_limit_address();
151 : }
152 :
153 : void Heap::UpdateNewSpaceAllocationCounter() {
154 94944 : new_space_allocation_counter_ = NewSpaceAllocationCounter();
155 : }
156 :
157 : size_t Heap::NewSpaceAllocationCounter() {
158 190488 : return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
159 : }
160 :
161 327410987 : AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
162 : AllocationAlignment alignment) {
163 : DCHECK(AllowHandleAllocation::IsAllowed());
164 : DCHECK(AllowHeapAllocation::IsAllowed());
165 : DCHECK(gc_state_ == NOT_IN_GC);
166 : #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
167 : if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
168 : if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
169 : return AllocationResult::Retry();
170 : }
171 : }
172 : #endif
173 : #ifdef DEBUG
174 : IncrementObjectCounters();
175 : #endif
176 :
177 : bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
178 :
179 : HeapObject object;
180 : AllocationResult allocation;
181 :
182 327410987 : if (AllocationType::kYoung == type) {
183 169343688 : if (large_object) {
184 19231 : if (FLAG_young_generation_large_objects) {
185 19231 : allocation = new_lo_space_->AllocateRaw(size_in_bytes);
186 : } else {
187 : // If young generation large objects are disalbed we have to tenure the
188 : // allocation and violate the given allocation type. This could be
189 : // dangerous. We may want to remove FLAG_young_generation_large_objects
190 : // and avoid patching.
191 0 : allocation = lo_space_->AllocateRaw(size_in_bytes);
192 : }
193 : } else {
194 169324457 : allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
195 : }
196 158067299 : } else if (AllocationType::kOld == type) {
197 125017434 : if (large_object) {
198 2760 : allocation = lo_space_->AllocateRaw(size_in_bytes);
199 : } else {
200 125014674 : allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
201 : }
202 33049865 : } else if (AllocationType::kCode == type) {
203 1907027 : if (size_in_bytes <= code_space()->AreaSize() && !large_object) {
204 1906941 : allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
205 : } else {
206 86 : allocation = code_lo_space_->AllocateRaw(size_in_bytes);
207 : }
208 31142838 : } else if (AllocationType::kMap == type) {
209 30929982 : allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
210 212856 : } else if (AllocationType::kReadOnly == type) {
211 : #ifdef V8_USE_SNAPSHOT
212 : DCHECK(isolate_->serializer_enabled());
213 : #endif
214 : DCHECK(!large_object);
215 : DCHECK(CanAllocateInReadOnlySpace());
216 212856 : allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
217 : } else {
218 0 : UNREACHABLE();
219 : }
220 :
221 327410757 : if (allocation.To(&object)) {
222 327385229 : if (AllocationType::kCode == type) {
223 : // Unprotect the memory chunk of the object if it was not unprotected
224 : // already.
225 1212854 : UnprotectAndRegisterMemoryChunk(object);
226 1212854 : ZapCodeObject(object->address(), size_in_bytes);
227 : }
228 327385230 : OnAllocationEvent(object, size_in_bytes);
229 : }
230 :
231 327410806 : return allocation;
232 : }
233 :
234 769254918 : void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
235 769428873 : for (auto& tracker : allocation_trackers_) {
236 347910 : tracker->AllocationEvent(object->address(), size_in_bytes);
237 : }
238 :
239 : if (FLAG_verify_predictable) {
240 : ++allocations_count_;
241 : // Advance synthetic time by making a time request.
242 : MonotonicallyIncreasingTimeInMs();
243 :
244 : UpdateAllocationsHash(object);
245 : UpdateAllocationsHash(size_in_bytes);
246 :
247 : if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
248 : PrintAllocationsHash();
249 : }
250 769254918 : } else if (FLAG_fuzzer_gc_analysis) {
251 0 : ++allocations_count_;
252 769254918 : } else if (FLAG_trace_allocation_stack_interval > 0) {
253 0 : ++allocations_count_;
254 0 : if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
255 0 : isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
256 : }
257 : }
258 769254918 : }
259 :
260 : bool Heap::CanAllocateInReadOnlySpace() {
261 14671793 : return !deserialization_complete_ &&
262 0 : (isolate()->serializer_enabled() ||
263 : !isolate()->initialized_from_snapshot());
264 : }
265 :
266 : void Heap::UpdateAllocationsHash(HeapObject object) {
267 : Address object_address = object->address();
268 : MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
269 : AllocationSpace allocation_space = memory_chunk->owner()->identity();
270 :
271 : STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
272 : uint32_t value =
273 : static_cast<uint32_t>(object_address - memory_chunk->address()) |
274 : (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
275 :
276 : UpdateAllocationsHash(value);
277 : }
278 :
279 : void Heap::UpdateAllocationsHash(uint32_t value) {
280 : uint16_t c1 = static_cast<uint16_t>(value);
281 : uint16_t c2 = static_cast<uint16_t>(value >> 16);
282 : raw_allocations_hash_ =
283 : StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
284 : raw_allocations_hash_ =
285 : StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
286 : }
287 :
288 : void Heap::RegisterExternalString(String string) {
289 : DCHECK(string->IsExternalString());
290 : DCHECK(!string->IsThinString());
291 88027 : external_string_table_.AddString(string);
292 : }
293 :
294 88006 : void Heap::FinalizeExternalString(String string) {
295 : DCHECK(string->IsExternalString());
296 : Page* page = Page::FromHeapObject(string);
297 88006 : ExternalString ext_string = ExternalString::cast(string);
298 :
299 88006 : page->DecrementExternalBackingStoreBytes(
300 : ExternalBackingStoreType::kExternalString,
301 176012 : ext_string->ExternalPayloadSize());
302 :
303 : ext_string->DisposeResource();
304 88006 : }
305 :
306 : Address Heap::NewSpaceTop() { return new_space_->top(); }
307 :
308 0 : bool Heap::InYoungGeneration(Object object) {
309 : DCHECK(!HasWeakHeapObjectTag(object));
310 826377579 : return object->IsHeapObject() && InYoungGeneration(HeapObject::cast(object));
311 : }
312 :
313 : // static
314 0 : bool Heap::InYoungGeneration(MaybeObject object) {
315 : HeapObject heap_object;
316 0 : return object->GetHeapObject(&heap_object) && InYoungGeneration(heap_object);
317 : }
318 :
319 : // static
320 150717 : bool Heap::InYoungGeneration(HeapObject heap_object) {
321 : bool result = MemoryChunk::FromHeapObject(heap_object)->InYoungGeneration();
322 : #ifdef DEBUG
323 : // If in the young generation, then check we're either not in the middle of
324 : // GC or the object is in to-space.
325 : if (result) {
326 : // If the object is in the young generation, then it's not in RO_SPACE so
327 : // this is safe.
328 : Heap* heap = Heap::FromWritableHeapObject(heap_object);
329 : DCHECK_IMPLIES(heap->gc_state_ == NOT_IN_GC, InToPage(heap_object));
330 : }
331 : #endif
332 150717 : return result;
333 : }
334 :
335 : // static
336 : bool Heap::InFromPage(Object object) {
337 : DCHECK(!HasWeakHeapObjectTag(object));
338 261098 : return object->IsHeapObject() && InFromPage(HeapObject::cast(object));
339 : }
340 :
341 : // static
342 : bool Heap::InFromPage(MaybeObject object) {
343 : HeapObject heap_object;
344 66016719 : return object->GetHeapObject(&heap_object) && InFromPage(heap_object);
345 : }
346 :
347 : // static
348 313312 : bool Heap::InFromPage(HeapObject heap_object) {
349 313312 : return MemoryChunk::FromHeapObject(heap_object)->IsFromPage();
350 : }
351 :
352 : // static
353 : bool Heap::InToPage(Object object) {
354 : DCHECK(!HasWeakHeapObjectTag(object));
355 : return object->IsHeapObject() && InToPage(HeapObject::cast(object));
356 : }
357 :
358 : // static
359 86284 : bool Heap::InToPage(MaybeObject object) {
360 : HeapObject heap_object;
361 2223744 : return object->GetHeapObject(&heap_object) && InToPage(heap_object);
362 : }
363 :
364 : // static
365 : bool Heap::InToPage(HeapObject heap_object) {
366 : return MemoryChunk::FromHeapObject(heap_object)->IsToPage();
367 : }
368 :
369 : bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
370 :
371 : // static
372 : Heap* Heap::FromWritableHeapObject(const HeapObject obj) {
373 : MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
374 : // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
375 : // find a heap. The exception is when the ReadOnlySpace is writeable, during
376 : // bootstrapping, so explicitly allow this case.
377 : SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE ||
378 : static_cast<ReadOnlySpace*>(chunk->owner())->writable());
379 : Heap* heap = chunk->heap();
380 : SLOW_DCHECK(heap != nullptr);
381 : return heap;
382 : }
383 :
384 : bool Heap::ShouldBePromoted(Address old_address) {
385 : Page* page = Page::FromAddress(old_address);
386 : Address age_mark = new_space_->age_mark();
387 216836267 : return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
388 29026130 : (!page->ContainsLimit(age_mark) || old_address < age_mark);
389 : }
390 :
391 25138557 : void Heap::CopyBlock(Address dst, Address src, int byte_size) {
392 : DCHECK(IsAligned(byte_size, kTaggedSize));
393 174189625 : CopyTagged(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
394 25133815 : }
395 :
396 : template <Heap::FindMementoMode mode>
397 16157962 : AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
398 : Address object_address = object->address();
399 16157962 : Address memento_address = object_address + object->SizeFromMap(map);
400 16148229 : Address last_memento_word_address = memento_address + kTaggedSize;
401 : // If the memento would be on another page, bail out immediately.
402 16148229 : if (!Page::OnSamePage(object_address, last_memento_word_address)) {
403 475 : return AllocationMemento();
404 : }
405 : HeapObject candidate = HeapObject::FromAddress(memento_address);
406 : MapWordSlot candidate_map_slot = candidate->map_slot();
407 : // This fast check may peek at an uninitialized word. However, the slow check
408 : // below (memento_address == top) ensures that this is safe. Mark the word as
409 : // initialized to silence MemorySanitizer warnings.
410 : MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
411 16147754 : if (!candidate_map_slot.contains_value(
412 : ReadOnlyRoots(this).allocation_memento_map().ptr())) {
413 14309426 : return AllocationMemento();
414 : }
415 :
416 : // Bail out if the memento is below the age mark, which can happen when
417 : // mementos survived because a page got moved within new space.
418 : Page* object_page = Page::FromAddress(object_address);
419 1838328 : if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
420 : Address age_mark =
421 : reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
422 421146 : if (!object_page->Contains(age_mark)) {
423 3 : return AllocationMemento();
424 : }
425 : // Do an exact check in the case where the age mark is on the same page.
426 421143 : if (object_address < age_mark) {
427 0 : return AllocationMemento();
428 : }
429 : }
430 :
431 394749 : AllocationMemento memento_candidate = AllocationMemento::cast(candidate);
432 :
433 : // Depending on what the memento is used for, we might need to perform
434 : // additional checks.
435 : Address top;
436 : switch (mode) {
437 : case Heap::kForGC:
438 1443576 : return memento_candidate;
439 : case Heap::kForRuntime:
440 394749 : if (memento_candidate.is_null()) return AllocationMemento();
441 : // Either the object is the last object in the new space, or there is
442 : // another object of at least word size (the header map word) following
443 : // it, so suffices to compare ptr and top here.
444 : top = NewSpaceTop();
445 : DCHECK(memento_address == top ||
446 : memento_address + HeapObject::kHeaderSize <= top ||
447 : !Page::OnSamePage(memento_address, top - 1));
448 394749 : if ((memento_address != top) && memento_candidate->IsValid()) {
449 394731 : return memento_candidate;
450 : }
451 20 : return AllocationMemento();
452 : default:
453 : UNREACHABLE();
454 : }
455 : UNREACHABLE();
456 : }
457 :
458 142776204 : void Heap::UpdateAllocationSite(Map map, HeapObject object,
459 : PretenuringFeedbackMap* pretenuring_feedback) {
460 : DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
461 : #ifdef DEBUG
462 : MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
463 : DCHECK_IMPLIES(chunk->IsToPage(),
464 : chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
465 : DCHECK_IMPLIES(!chunk->InYoungGeneration(),
466 : chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
467 : #endif
468 285648480 : if (!FLAG_allocation_site_pretenuring ||
469 : !AllocationSite::CanTrack(map->instance_type())) {
470 : return;
471 : }
472 : AllocationMemento memento_candidate =
473 15567176 : FindAllocationMemento<kForGC>(map, object);
474 15533976 : if (memento_candidate.is_null()) return;
475 :
476 : // Entering cached feedback is used in the parallel case. We are not allowed
477 : // to dereference the allocation site and rather have to postpone all checks
478 : // till actually merging the data.
479 : Address key = memento_candidate->GetAllocationSiteUnchecked();
480 2888577 : (*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
481 : }
482 :
483 88027 : void Heap::ExternalStringTable::AddString(String string) {
484 : DCHECK(string->IsExternalString());
485 : DCHECK(!Contains(string));
486 :
487 88027 : if (InYoungGeneration(string)) {
488 466 : young_strings_.push_back(string);
489 : } else {
490 87561 : old_strings_.push_back(string);
491 : }
492 88027 : }
493 :
494 108 : Oddball Heap::ToBoolean(bool condition) {
495 : ReadOnlyRoots roots(this);
496 30888191 : return condition ? roots.true_value() : roots.false_value();
497 : }
498 :
499 : int Heap::NextScriptId() {
500 : int last_id = last_script_id()->value();
501 2917974 : if (last_id == Smi::kMaxValue) last_id = v8::UnboundScript::kNoScriptId;
502 2917974 : last_id++;
503 : set_last_script_id(Smi::FromInt(last_id));
504 : return last_id;
505 : }
506 :
507 : int Heap::NextDebuggingId() {
508 : int last_id = last_debugging_id()->value();
509 55 : if (last_id == DebugInfo::DebuggingIdBits::kMax) {
510 : last_id = DebugInfo::kNoDebuggingId;
511 : }
512 55 : last_id++;
513 : set_last_debugging_id(Smi::FromInt(last_id));
514 : return last_id;
515 : }
516 :
517 : int Heap::GetNextTemplateSerialNumber() {
518 4484833 : int next_serial_number = next_template_serial_number()->value() + 1;
519 : set_next_template_serial_number(Smi::FromInt(next_serial_number));
520 : return next_serial_number;
521 : }
522 :
523 : int Heap::MaxNumberToStringCacheSize() const {
524 : // Compute the size of the number string cache based on the max newspace size.
525 : // The number string cache has a minimum size based on twice the initial cache
526 : // size to ensure that it is bigger after being made 'full size'.
527 28174297 : size_t number_string_cache_size = max_semi_space_size_ / 512;
528 : number_string_cache_size =
529 : Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
530 : Min<size_t>(0x4000u, number_string_cache_size));
531 : // There is a string and a number per entry so the length is twice the number
532 : // of entries.
533 28174297 : return static_cast<int>(number_string_cache_size * 2);
534 : }
535 :
536 : void Heap::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
537 : size_t amount) {
538 : base::CheckedIncrement(&backing_store_bytes_, amount);
539 : // TODO(mlippautz): Implement interrupt for global memory allocations that can
540 : // trigger garbage collections.
541 : }
542 :
543 : void Heap::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
544 : size_t amount) {
545 : base::CheckedDecrement(&backing_store_bytes_, amount);
546 : }
547 :
548 : AlwaysAllocateScope::AlwaysAllocateScope(Heap* heap) : heap_(heap) {
549 : heap_->always_allocate_scope_count_++;
550 : }
551 :
552 : AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
553 : : AlwaysAllocateScope(isolate->heap()) {}
554 :
555 : AlwaysAllocateScope::~AlwaysAllocateScope() {
556 : heap_->always_allocate_scope_count_--;
557 : }
558 :
559 281836 : CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
560 281836 : : heap_(heap) {
561 281836 : if (heap_->write_protect_code_memory()) {
562 : heap_->increment_code_space_memory_modification_scope_depth();
563 281835 : heap_->code_space()->SetReadAndWritable();
564 281837 : LargePage* page = heap_->code_lo_space()->first_page();
565 976803 : while (page != nullptr) {
566 : DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
567 694966 : CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
568 347483 : page->SetReadAndWritable();
569 : page = page->next_page();
570 : }
571 : }
572 281838 : }
573 :
574 563672 : CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
575 281835 : if (heap_->write_protect_code_memory()) {
576 : heap_->decrement_code_space_memory_modification_scope_depth();
577 281835 : heap_->code_space()->SetDefaultCodePermissions();
578 281837 : LargePage* page = heap_->code_lo_space()->first_page();
579 976755 : while (page != nullptr) {
580 : DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
581 694918 : CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
582 347459 : page->SetDefaultCodePermissions();
583 : page = page->next_page();
584 : }
585 : }
586 281837 : }
587 :
588 : CodePageCollectionMemoryModificationScope::
589 : CodePageCollectionMemoryModificationScope(Heap* heap)
590 1907012 : : heap_(heap) {
591 1907012 : if (heap_->write_protect_code_memory() &&
592 : !heap_->code_space_memory_modification_scope_depth()) {
593 : heap_->EnableUnprotectedMemoryChunksRegistry();
594 : }
595 : }
596 :
597 1907010 : CodePageCollectionMemoryModificationScope::
598 1907012 : ~CodePageCollectionMemoryModificationScope() {
599 1907010 : if (heap_->write_protect_code_memory() &&
600 : !heap_->code_space_memory_modification_scope_depth()) {
601 1777089 : heap_->ProtectUnprotectedMemoryChunks();
602 1777091 : heap_->DisableUnprotectedMemoryChunksRegistry();
603 : }
604 1907012 : }
605 :
606 1324652 : CodePageMemoryModificationScope::CodePageMemoryModificationScope(
607 : MemoryChunk* chunk)
608 : : chunk_(chunk),
609 2649295 : scope_active_(chunk_->heap()->write_protect_code_memory() &&
610 2649304 : chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
611 1324652 : if (scope_active_) {
612 : DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
613 : (chunk_->owner()->identity() == CODE_LO_SPACE));
614 479794 : chunk_->SetReadAndWritable();
615 : }
616 1324651 : }
617 :
618 1324301 : CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
619 1324301 : if (scope_active_) {
620 479793 : chunk_->SetDefaultCodePermissions();
621 : }
622 : }
623 :
624 : } // namespace internal
625 : } // namespace v8
626 :
627 : #endif // V8_HEAP_HEAP_INL_H_
|