Line data Source code
1 : // Copyright 2011 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/store-buffer.h"
6 :
7 : #include <algorithm>
8 :
9 : #include "src/base/bits.h"
10 : #include "src/base/macros.h"
11 : #include "src/base/template-utils.h"
12 : #include "src/counters.h"
13 : #include "src/heap/incremental-marking.h"
14 : #include "src/heap/store-buffer-inl.h"
15 : #include "src/isolate.h"
16 : #include "src/objects-inl.h"
17 : #include "src/v8.h"
18 :
19 : namespace v8 {
20 : namespace internal {
21 :
22 61048 : StoreBuffer::StoreBuffer(Heap* heap)
23 61048 : : heap_(heap), top_(nullptr), current_(0), mode_(NOT_IN_GC) {
24 183144 : for (int i = 0; i < kStoreBuffers; i++) {
25 122096 : start_[i] = nullptr;
26 122096 : limit_[i] = nullptr;
27 122096 : lazy_top_[i] = nullptr;
28 : }
29 61048 : task_running_ = false;
30 61048 : insertion_callback = &InsertDuringRuntime;
31 61048 : deletion_callback = &DeleteDuringRuntime;
32 61048 : }
33 :
34 61049 : void StoreBuffer::SetUp() {
35 61049 : v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
36 : // Round up the requested size in order to fulfill the VirtualMemory's
37 : // requrements on the requested size alignment. This may cause a bit of
38 : // memory wastage if the actual CommitPageSize() will be bigger than the
39 : // kMinExpectedOSPageSize value but this is a trade-off for keeping the
40 : // store buffer overflow check in write barriers cheap.
41 : const size_t requested_size = RoundUp(kStoreBufferSize * kStoreBuffers,
42 122098 : page_allocator->CommitPageSize());
43 : // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
44 : // use a bit test to detect the ends of the buffers.
45 : STATIC_ASSERT(base::bits::IsPowerOfTwo(kStoreBufferSize));
46 : const size_t alignment =
47 122098 : std::max<size_t>(kStoreBufferSize, page_allocator->AllocatePageSize());
48 : void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
49 61049 : VirtualMemory reservation(page_allocator, requested_size, hint, alignment);
50 61049 : if (!reservation.IsReserved()) {
51 0 : heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
52 : }
53 :
54 : Address start = reservation.address();
55 : const size_t allocated_size = reservation.size();
56 :
57 61049 : start_[0] = reinterpret_cast<Address*>(start);
58 61049 : limit_[0] = start_[0] + (kStoreBufferSize / kSystemPointerSize);
59 61049 : start_[1] = limit_[0];
60 61049 : limit_[1] = start_[1] + (kStoreBufferSize / kSystemPointerSize);
61 :
62 : // Sanity check the buffers.
63 : Address* vm_limit = reinterpret_cast<Address*>(start + allocated_size);
64 : USE(vm_limit);
65 : for (int i = 0; i < kStoreBuffers; i++) {
66 : DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address());
67 : DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address());
68 : DCHECK(start_[i] <= vm_limit);
69 : DCHECK(limit_[i] <= vm_limit);
70 : DCHECK_EQ(0, reinterpret_cast<Address>(limit_[i]) & kStoreBufferMask);
71 : }
72 :
73 : // Set RW permissions only on the pages we use.
74 61049 : const size_t used_size = RoundUp(requested_size, CommitPageSize());
75 61049 : if (!reservation.SetPermissions(start, used_size,
76 61049 : PageAllocator::kReadWrite)) {
77 0 : heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
78 : }
79 61049 : current_ = 0;
80 61049 : top_ = start_[current_];
81 61049 : virtual_memory_.TakeControl(&reservation);
82 61049 : }
83 :
84 61034 : void StoreBuffer::TearDown() {
85 61034 : if (virtual_memory_.IsReserved()) virtual_memory_.Free();
86 61034 : top_ = nullptr;
87 183102 : for (int i = 0; i < kStoreBuffers; i++) {
88 122068 : start_[i] = nullptr;
89 122068 : limit_[i] = nullptr;
90 122068 : lazy_top_[i] = nullptr;
91 : }
92 61034 : }
93 :
94 17151 : void StoreBuffer::DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
95 : Address end) {
96 : DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
97 17151 : store_buffer->InsertDeletionIntoStoreBuffer(start, end);
98 17151 : }
99 :
100 111939474 : void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
101 : DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
102 111939474 : store_buffer->InsertIntoStoreBuffer(slot);
103 111939468 : }
104 :
105 17865 : void StoreBuffer::DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
106 : Address start, Address end) {
107 : // In GC the store buffer has to be empty at any time.
108 : DCHECK(store_buffer->Empty());
109 : DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
110 : Page* page = Page::FromAddress(start);
111 17865 : if (end) {
112 : RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
113 17865 : SlotSet::PREFREE_EMPTY_BUCKETS);
114 : } else {
115 0 : RememberedSet<OLD_TO_NEW>::Remove(page, start);
116 : }
117 17865 : }
118 :
119 4 : void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
120 : Address slot) {
121 : DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
122 4 : RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
123 4 : }
124 :
125 196000 : void StoreBuffer::SetMode(StoreBufferMode mode) {
126 196000 : mode_ = mode;
127 196000 : if (mode == NOT_IN_GC) {
128 98000 : insertion_callback = &InsertDuringRuntime;
129 98000 : deletion_callback = &DeleteDuringRuntime;
130 : } else {
131 98000 : insertion_callback = &InsertDuringGarbageCollection;
132 98000 : deletion_callback = &DeleteDuringGarbageCollection;
133 : }
134 196000 : }
135 :
136 42520 : int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
137 75609 : isolate->heap()->store_buffer()->FlipStoreBuffers();
138 75609 : isolate->counters()->store_buffer_overflows()->Increment();
139 : // Called by RecordWriteCodeStubAssembler, which doesnt accept void type
140 42520 : return 0;
141 : }
142 :
143 75609 : void StoreBuffer::FlipStoreBuffers() {
144 75609 : base::MutexGuard guard(&mutex_);
145 75609 : int other = (current_ + 1) % kStoreBuffers;
146 75609 : MoveEntriesToRememberedSet(other);
147 75609 : lazy_top_[current_] = top_;
148 75609 : current_ = other;
149 75609 : top_ = start_[current_];
150 :
151 75609 : if (!task_running_ && FLAG_concurrent_store_buffer) {
152 66834 : task_running_ = true;
153 66834 : V8::GetCurrentPlatform()->CallOnWorkerThread(
154 334170 : base::make_unique<Task>(heap_->isolate(), this));
155 : }
156 75609 : }
157 :
158 338400 : void StoreBuffer::MoveEntriesToRememberedSet(int index) {
159 676800 : if (!lazy_top_[index]) return;
160 : DCHECK_GE(index, 0);
161 : DCHECK_LT(index, kStoreBuffers);
162 : Address last_inserted_addr = kNullAddress;
163 : MemoryChunk* chunk = nullptr;
164 :
165 181932952 : for (Address* current = start_[index]; current < lazy_top_[index];
166 : current++) {
167 181759381 : Address addr = *current;
168 363383042 : if (chunk == nullptr ||
169 : MemoryChunk::BaseAddress(addr) != chunk->address()) {
170 : chunk = MemoryChunk::FromAnyPointerAddress(addr);
171 : }
172 181759381 : if (IsDeletionAddress(addr)) {
173 : last_inserted_addr = kNullAddress;
174 12149 : current++;
175 12149 : Address end = *current;
176 : DCHECK(!IsDeletionAddress(end));
177 : addr = UnmarkDeletionAddress(addr);
178 12149 : if (end) {
179 : RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, addr, end,
180 11167 : SlotSet::PREFREE_EMPTY_BUCKETS);
181 : } else {
182 982 : RememberedSet<OLD_TO_NEW>::Remove(chunk, addr);
183 : }
184 : } else {
185 : DCHECK(!IsDeletionAddress(addr));
186 181747232 : if (addr != last_inserted_addr) {
187 145799699 : RememberedSet<OLD_TO_NEW>::Insert(chunk, addr);
188 : last_inserted_addr = addr;
189 : }
190 : }
191 : }
192 173571 : lazy_top_[index] = nullptr;
193 : }
194 :
195 98000 : void StoreBuffer::MoveAllEntriesToRememberedSet() {
196 98000 : base::MutexGuard guard(&mutex_);
197 98000 : int other = (current_ + 1) % kStoreBuffers;
198 98000 : MoveEntriesToRememberedSet(other);
199 98000 : lazy_top_[current_] = top_;
200 98000 : MoveEntriesToRememberedSet(current_);
201 98000 : top_ = start_[current_];
202 98000 : }
203 :
204 66791 : void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
205 66791 : base::MutexGuard guard(&mutex_);
206 66791 : int other = (current_ + 1) % kStoreBuffers;
207 66791 : MoveEntriesToRememberedSet(other);
208 66791 : task_running_ = false;
209 66791 : }
210 :
211 : } // namespace internal
212 178779 : } // namespace v8
|