Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/snapshot/deserializer-allocator.h"
6 :
7 : #include "src/heap/heap-inl.h" // crbug.com/v8/8499
8 : #include "src/snapshot/deserializer.h"
9 : #include "src/snapshot/startup-deserializer.h"
10 :
11 : namespace v8 {
12 : namespace internal {
13 :
14 211894 : DeserializerAllocator::DeserializerAllocator(Deserializer* deserializer)
15 1695158 : : deserializer_(deserializer) {}
16 :
17 : // We know the space requirements before deserialization and can
18 : // pre-allocate that reserved space. During deserialization, all we need
19 : // to do is to bump up the pointer for each space in the reserved
20 : // space. This is also used for fixing back references.
21 : // We may have to split up the pre-allocation into several chunks
22 : // because it would not fit onto a single page. We do not have to keep
23 : // track of when to move to the next chunk. An opcode will signal this.
24 : // Since multiple large objects cannot be folded into one large object
25 : // space allocation, we have to do an actual allocation when deserializing
26 : // each large object. Instead of tracking offset for back references, we
27 : // reference large objects by index.
28 430575385 : Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
29 430575385 : if (space == LO_SPACE) {
30 : AlwaysAllocateScope scope(isolate());
31 : // Note that we currently do not support deserialization of large code
32 : // objects.
33 45 : LargeObjectSpace* lo_space = isolate()->heap()->lo_space();
34 45 : AllocationResult result = lo_space->AllocateRaw(size);
35 45 : HeapObject obj = result.ToObjectChecked();
36 45 : deserialized_large_objects_.push_back(obj);
37 : return obj->address();
38 430575340 : } else if (space == MAP_SPACE) {
39 : DCHECK_EQ(Map::kSize, size);
40 44915546 : return allocated_maps_[next_map_index_++];
41 : } else {
42 : DCHECK_LT(space, kNumberOfPreallocatedSpaces);
43 408117567 : Address address = high_water_[space];
44 : DCHECK_NE(address, kNullAddress);
45 408117567 : high_water_[space] += size;
46 : #ifdef DEBUG
47 : // Assert that the current reserved chunk is still big enough.
48 : const Heap::Reservation& reservation = reservations_[space];
49 : int chunk_index = current_chunk_[space];
50 : DCHECK_LE(high_water_[space], reservation[chunk_index].end);
51 : #endif
52 408117567 : if (space == CODE_SPACE) SkipList::Update(address, size);
53 408119739 : return address;
54 : }
55 : }
56 :
57 430572374 : Address DeserializerAllocator::Allocate(AllocationSpace space, int size) {
58 : Address address;
59 : HeapObject obj;
60 :
61 430572374 : if (next_alignment_ != kWordAligned) {
62 0 : const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
63 0 : address = AllocateRaw(space, reserved);
64 : obj = HeapObject::FromAddress(address);
65 : // If one of the following assertions fails, then we are deserializing an
66 : // aligned object when the filler maps have not been deserialized yet.
67 : // We require filler maps as padding to align the object.
68 0 : Heap* heap = isolate()->heap();
69 : DCHECK(ReadOnlyRoots(heap).free_space_map()->IsMap());
70 : DCHECK(ReadOnlyRoots(heap).one_pointer_filler_map()->IsMap());
71 : DCHECK(ReadOnlyRoots(heap).two_pointer_filler_map()->IsMap());
72 0 : obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
73 : address = obj->address();
74 0 : next_alignment_ = kWordAligned;
75 0 : return address;
76 : } else {
77 430572374 : return AllocateRaw(space, size);
78 : }
79 : }
80 :
81 2246281 : void DeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
82 : DCHECK_LT(space, kNumberOfPreallocatedSpaces);
83 2246281 : uint32_t chunk_index = current_chunk_[space];
84 6738843 : const Heap::Reservation& reservation = reservations_[space];
85 : // Make sure the current chunk is indeed exhausted.
86 4492562 : CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
87 : // Move to next reserved chunk.
88 2246281 : chunk_index = ++current_chunk_[space];
89 2246281 : CHECK_LT(chunk_index, reservation.size());
90 2246281 : high_water_[space] = reservation[chunk_index].start;
91 2246281 : }
92 :
93 12113802 : HeapObject DeserializerAllocator::GetMap(uint32_t index) {
94 : DCHECK_LT(index, next_map_index_);
95 36341406 : return HeapObject::FromAddress(allocated_maps_[index]);
96 : }
97 :
98 10 : HeapObject DeserializerAllocator::GetLargeObject(uint32_t index) {
99 : DCHECK_LT(index, deserialized_large_objects_.size());
100 20 : return deserialized_large_objects_[index];
101 : }
102 :
103 173055377 : HeapObject DeserializerAllocator::GetObject(AllocationSpace space,
104 : uint32_t chunk_index,
105 : uint32_t chunk_offset) {
106 : DCHECK_LT(space, kNumberOfPreallocatedSpaces);
107 : DCHECK_LE(chunk_index, current_chunk_[space]);
108 346110754 : Address address = reservations_[space][chunk_index].start + chunk_offset;
109 173055377 : if (next_alignment_ != kWordAligned) {
110 0 : int padding = Heap::GetFillToAlign(address, next_alignment_);
111 0 : next_alignment_ = kWordAligned;
112 : DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
113 0 : address += padding;
114 : }
115 173055377 : return HeapObject::FromAddress(address);
116 : }
117 :
118 211895 : void DeserializerAllocator::DecodeReservation(
119 : const std::vector<SerializedData::Reservation>& res) {
120 : DCHECK_EQ(0, reservations_[FIRST_SPACE].size());
121 : int current_space = FIRST_SPACE;
122 3941443 : for (auto& r : res) {
123 : reservations_[current_space].push_back(
124 7035305 : {r.chunk_size(), kNullAddress, kNullAddress});
125 3517653 : if (r.is_last()) current_space++;
126 : }
127 : DCHECK_EQ(kNumberOfSpaces, current_space);
128 847584 : for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
129 211896 : }
130 :
131 211896 : bool DeserializerAllocator::ReserveSpace() {
132 : #ifdef DEBUG
133 : for (int i = FIRST_SPACE; i < kNumberOfSpaces; ++i) {
134 : DCHECK_GT(reservations_[i].size(), 0);
135 : }
136 : #endif // DEBUG
137 : DCHECK(allocated_maps_.empty());
138 423792 : if (!isolate()->heap()->ReserveSpace(reservations_, &allocated_maps_)) {
139 : return false;
140 : }
141 847584 : for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
142 847584 : high_water_[i] = reservations_[i][0].start;
143 : }
144 : return true;
145 : }
146 :
147 0 : bool DeserializerAllocator::ReservationsAreFullyUsed() const {
148 0 : for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
149 0 : const uint32_t chunk_index = current_chunk_[space];
150 0 : if (reservations_[space].size() != chunk_index + 1) {
151 : return false;
152 : }
153 0 : if (reservations_[space][chunk_index].end != high_water_[space]) {
154 : return false;
155 : }
156 : }
157 0 : return (allocated_maps_.size() == next_map_index_);
158 : }
159 :
160 89910 : void DeserializerAllocator::RegisterDeserializedObjectsForBlackAllocation() {
161 : isolate()->heap()->RegisterDeserializedObjectsForBlackAllocation(
162 179820 : reservations_, deserialized_large_objects_, allocated_maps_);
163 89910 : }
164 :
165 0 : Isolate* DeserializerAllocator::isolate() const {
166 301896 : return deserializer_->isolate();
167 : }
168 :
169 : } // namespace internal
170 178779 : } // namespace v8
|