/src/mozilla-central/js/src/vm/ArrayBufferObject.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
2 | | * vim: set ts=8 sts=4 et sw=4 tw=99: |
3 | | * This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "vm/ArrayBufferObject-inl.h" |
8 | | #include "vm/ArrayBufferObject.h" |
9 | | |
10 | | #include "mozilla/Alignment.h" |
11 | | #include "mozilla/CheckedInt.h" |
12 | | #include "mozilla/FloatingPoint.h" |
13 | | #include "mozilla/Maybe.h" |
14 | | #include "mozilla/PodOperations.h" |
15 | | #include "mozilla/TaggedAnonymousMemory.h" |
16 | | |
17 | | #include <string.h> |
18 | | #ifndef XP_WIN |
19 | | # include <sys/mman.h> |
20 | | #endif |
21 | | #ifdef MOZ_VALGRIND |
22 | | # include <valgrind/memcheck.h> |
23 | | #endif |
24 | | |
25 | | #include "jsapi.h" |
26 | | #include "jsfriendapi.h" |
27 | | #include "jsnum.h" |
28 | | #include "jstypes.h" |
29 | | #include "jsutil.h" |
30 | | |
31 | | #include "builtin/Array.h" |
32 | | #include "builtin/DataViewObject.h" |
33 | | #include "gc/Barrier.h" |
34 | | #include "gc/FreeOp.h" |
35 | | #include "gc/Memory.h" |
36 | | #include "js/Conversions.h" |
37 | | #include "js/MemoryMetrics.h" |
38 | | #include "js/Wrapper.h" |
39 | | #include "util/Windows.h" |
40 | | #include "vm/GlobalObject.h" |
41 | | #include "vm/Interpreter.h" |
42 | | #include "vm/JSContext.h" |
43 | | #include "vm/JSObject.h" |
44 | | #include "vm/SharedArrayObject.h" |
45 | | #include "vm/WrapperObject.h" |
46 | | #include "wasm/WasmSignalHandlers.h" |
47 | | #include "wasm/WasmTypes.h" |
48 | | |
49 | | #include "gc/Marking-inl.h" |
50 | | #include "gc/Nursery-inl.h" |
51 | | #include "vm/JSAtom-inl.h" |
52 | | #include "vm/NativeObject-inl.h" |
53 | | #include "vm/Shape-inl.h" |
54 | | |
55 | | using JS::ToInt32; |
56 | | |
57 | | using mozilla::Atomic; |
58 | | using mozilla::CheckedInt; |
59 | | using mozilla::Some; |
60 | | using mozilla::Maybe; |
61 | | using mozilla::Nothing; |
62 | | using mozilla::Unused; |
63 | | |
64 | | using namespace js; |
65 | | |
66 | | /* |
67 | | * Convert |v| to an array index for an array of length |length| per |
68 | | * the Typed Array Specification section 7.0, |subarray|. If successful, |
69 | | * the output value is in the range [0, length]. |
70 | | */ |
71 | | bool |
72 | | js::ToClampedIndex(JSContext* cx, HandleValue v, uint32_t length, uint32_t* out) |
73 | 0 | { |
74 | 0 | int32_t result; |
75 | 0 | if (!ToInt32(cx, v, &result)) { |
76 | 0 | return false; |
77 | 0 | } |
78 | 0 | if (result < 0) { |
79 | 0 | result += length; |
80 | 0 | if (result < 0) { |
81 | 0 | result = 0; |
82 | 0 | } |
83 | 0 | } else if (uint32_t(result) > length) { |
84 | 0 | result = length; |
85 | 0 | } |
86 | 0 | *out = uint32_t(result); |
87 | 0 | return true; |
88 | 0 | } |
89 | | |
90 | | // If there are too many 4GB buffers live we run up against system resource |
91 | | // exhaustion (address space or number of memory map descriptors), see |
92 | | // bug 1068684, bug 1073934 for details. The limiting case seems to be |
93 | | // Windows Vista Home 64-bit, where the per-process address space is limited |
94 | | // to 8TB. Thus we track the number of live objects, and set a limit of |
95 | | // 1000 live objects per process and we throw an OOM error if the per-process |
96 | | // limit is exceeded. |
97 | | // |
98 | | // Since the MaximumLiveMappedBuffers limit is not generally accounted for by |
99 | | // any existing GC-trigger heuristics, we need an extra heuristic for triggering |
100 | | // GCs when the caller is allocating memories rapidly without other garbage. |
101 | | // Thus, once the live buffer count crosses a certain threshold, we start |
102 | | // triggering GCs every N allocations. As we get close to the limit, perform |
103 | | // expensive non-incremental full GCs as a last-ditch effort to avoid |
104 | | // unnecessary failure. The *Sans use a ton of vmem for bookkeeping leaving a |
105 | | // lot less for the program so use a lower limit. |
106 | | |
107 | | #if defined(MOZ_TSAN) || defined(MOZ_ASAN) |
108 | | static const int32_t MaximumLiveMappedBuffers = 500; |
109 | | #else |
110 | | static const int32_t MaximumLiveMappedBuffers = 1000; |
111 | | #endif |
112 | | static const int32_t StartTriggeringAtLiveBufferCount = 100; |
113 | | static const int32_t StartSyncFullGCAtLiveBufferCount = MaximumLiveMappedBuffers - 100; |
114 | | static const int32_t AllocatedBuffersPerTrigger = 100; |
115 | | |
116 | | static Atomic<int32_t, mozilla::ReleaseAcquire> liveBufferCount(0); |
117 | | static Atomic<int32_t, mozilla::ReleaseAcquire> allocatedSinceLastTrigger(0); |
118 | | |
119 | | int32_t |
120 | | js::LiveMappedBufferCount() |
121 | 0 | { |
122 | 0 | return liveBufferCount; |
123 | 0 | } |
124 | | |
125 | | void* |
126 | | js::MapBufferMemory(size_t mappedSize, size_t initialCommittedSize) |
127 | 0 | { |
128 | 0 | MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); |
129 | 0 | MOZ_ASSERT(initialCommittedSize % gc::SystemPageSize() == 0); |
130 | 0 | MOZ_ASSERT(initialCommittedSize <= mappedSize); |
131 | 0 |
|
132 | 0 | // Test >= to guard against the case where multiple extant runtimes |
133 | 0 | // race to allocate. |
134 | 0 | if (++liveBufferCount >= MaximumLiveMappedBuffers) { |
135 | 0 | if (OnLargeAllocationFailure) { |
136 | 0 | OnLargeAllocationFailure(); |
137 | 0 | } |
138 | 0 | if (liveBufferCount >= MaximumLiveMappedBuffers) { |
139 | 0 | liveBufferCount--; |
140 | 0 | return nullptr; |
141 | 0 | } |
142 | 0 | } |
143 | 0 | |
144 | | #ifdef XP_WIN |
145 | | void* data = VirtualAlloc(nullptr, mappedSize, MEM_RESERVE, PAGE_NOACCESS); |
146 | | if (!data) { |
147 | | liveBufferCount--; |
148 | | return nullptr; |
149 | | } |
150 | | |
151 | | if (!VirtualAlloc(data, initialCommittedSize, MEM_COMMIT, PAGE_READWRITE)) { |
152 | | VirtualFree(data, 0, MEM_RELEASE); |
153 | | liveBufferCount--; |
154 | | return nullptr; |
155 | | } |
156 | | #else // XP_WIN |
157 | 0 | void* data = MozTaggedAnonymousMmap(nullptr, mappedSize, PROT_NONE, |
158 | 0 | MAP_PRIVATE | MAP_ANON, -1, 0, "wasm-reserved"); |
159 | 0 | if (data == MAP_FAILED) { |
160 | 0 | liveBufferCount--; |
161 | 0 | return nullptr; |
162 | 0 | } |
163 | 0 | |
164 | 0 | // Note we will waste a page on zero-sized memories here |
165 | 0 | if (mprotect(data, initialCommittedSize, PROT_READ | PROT_WRITE)) { |
166 | 0 | munmap(data, mappedSize); |
167 | 0 | liveBufferCount--; |
168 | 0 | return nullptr; |
169 | 0 | } |
170 | 0 | #endif // !XP_WIN |
171 | 0 | |
172 | | #if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE) |
173 | | VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)data + initialCommittedSize, |
174 | | mappedSize - initialCommittedSize); |
175 | | #endif |
176 | | |
177 | 0 | return data; |
178 | 0 | } |
179 | | |
180 | | bool |
181 | | js::CommitBufferMemory(void* dataEnd, uint32_t delta) |
182 | 0 | { |
183 | 0 | MOZ_ASSERT(delta); |
184 | 0 | MOZ_ASSERT(delta % gc::SystemPageSize() == 0); |
185 | 0 |
|
186 | | #ifdef XP_WIN |
187 | | if (!VirtualAlloc(dataEnd, delta, MEM_COMMIT, PAGE_READWRITE)) { |
188 | | return false; |
189 | | } |
190 | | #else // XP_WIN |
191 | 0 | if (mprotect(dataEnd, delta, PROT_READ | PROT_WRITE)) { |
192 | 0 | return false; |
193 | 0 | } |
194 | 0 | #endif // !XP_WIN |
195 | 0 | |
196 | | #if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE) |
197 | | VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, delta); |
198 | | #endif |
199 | | |
200 | 0 | return true; |
201 | 0 | } |
202 | | |
203 | | #ifndef WASM_HUGE_MEMORY |
204 | | bool |
205 | | js::ExtendBufferMapping(void* dataPointer, size_t mappedSize, size_t newMappedSize) |
206 | | { |
207 | | MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); |
208 | | MOZ_ASSERT(newMappedSize % gc::SystemPageSize() == 0); |
209 | | MOZ_ASSERT(newMappedSize >= mappedSize); |
210 | | |
211 | | #ifdef XP_WIN |
212 | | void* mappedEnd = (char*)dataPointer + mappedSize; |
213 | | uint32_t delta = newMappedSize - mappedSize; |
214 | | if (!VirtualAlloc(mappedEnd, delta, MEM_RESERVE, PAGE_NOACCESS)) { |
215 | | return false; |
216 | | } |
217 | | return true; |
218 | | #elif defined(XP_LINUX) |
219 | | // Note this will not move memory (no MREMAP_MAYMOVE specified) |
220 | | if (MAP_FAILED == mremap(dataPointer, mappedSize, newMappedSize, 0)) { |
221 | | return false; |
222 | | } |
223 | | return true; |
224 | | #else |
225 | | // No mechanism for remapping on MacOS and other Unices. Luckily |
226 | | // shouldn't need it here as most of these are 64-bit. |
227 | | return false; |
228 | | #endif |
229 | | } |
230 | | #endif |
231 | | |
232 | | void |
233 | | js::UnmapBufferMemory(void* base, size_t mappedSize) |
234 | 0 | { |
235 | 0 | MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); |
236 | 0 |
|
237 | | #ifdef XP_WIN |
238 | | VirtualFree(base, 0, MEM_RELEASE); |
239 | | #else // XP_WIN |
240 | | munmap(base, mappedSize); |
241 | 0 | #endif // !XP_WIN |
242 | 0 |
|
243 | | #if defined(MOZ_VALGRIND) && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE) |
244 | | VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)base, mappedSize); |
245 | | #endif |
246 | |
|
247 | 0 | // Decrement the buffer counter at the end -- otherwise, a race condition |
248 | 0 | // could enable the creation of unlimited buffers. |
249 | 0 | liveBufferCount--; |
250 | 0 | } |
251 | | |
252 | | /* |
253 | | * ArrayBufferObject |
254 | | * |
255 | | * This class holds the underlying raw buffer that the TypedArrayObject classes |
256 | | * access. It can be created explicitly and passed to a TypedArrayObject, or |
257 | | * can be created implicitly by constructing a TypedArrayObject with a size. |
258 | | */ |
259 | | |
260 | | /* |
261 | | * ArrayBufferObject (base) |
262 | | */ |
263 | | |
264 | | static JSObject* |
265 | | CreateArrayBufferPrototype(JSContext* cx, JSProtoKey key) |
266 | 0 | { |
267 | 0 | return GlobalObject::createBlankPrototype(cx, cx->global(), &ArrayBufferObject::protoClass_); |
268 | 0 | } |
269 | | |
270 | | static const ClassOps ArrayBufferObjectClassOps = { |
271 | | nullptr, /* addProperty */ |
272 | | nullptr, /* delProperty */ |
273 | | nullptr, /* enumerate */ |
274 | | nullptr, /* newEnumerate */ |
275 | | nullptr, /* resolve */ |
276 | | nullptr, /* mayResolve */ |
277 | | ArrayBufferObject::finalize, |
278 | | nullptr, /* call */ |
279 | | nullptr, /* hasInstance */ |
280 | | nullptr, /* construct */ |
281 | | ArrayBufferObject::trace, |
282 | | }; |
283 | | |
284 | | static const JSFunctionSpec arraybuffer_functions[] = { |
285 | | JS_FN("isView", ArrayBufferObject::fun_isView, 1, 0), |
286 | | JS_FS_END |
287 | | }; |
288 | | |
289 | | static const JSPropertySpec arraybuffer_properties[] = { |
290 | | JS_SELF_HOSTED_SYM_GET(species, "ArrayBufferSpecies", 0), |
291 | | JS_PS_END |
292 | | }; |
293 | | |
294 | | |
295 | | static const JSFunctionSpec arraybuffer_proto_functions[] = { |
296 | | JS_SELF_HOSTED_FN("slice", "ArrayBufferSlice", 2, 0), |
297 | | JS_FS_END |
298 | | }; |
299 | | |
300 | | static const JSPropertySpec arraybuffer_proto_properties[] = { |
301 | | JS_PSG("byteLength", ArrayBufferObject::byteLengthGetter, 0), |
302 | | JS_STRING_SYM_PS(toStringTag, "ArrayBuffer", JSPROP_READONLY), |
303 | | JS_PS_END |
304 | | }; |
305 | | |
306 | | static const ClassSpec ArrayBufferObjectClassSpec = { |
307 | | GenericCreateConstructor<ArrayBufferObject::class_constructor, 1, gc::AllocKind::FUNCTION>, |
308 | | CreateArrayBufferPrototype, |
309 | | arraybuffer_functions, |
310 | | arraybuffer_properties, |
311 | | arraybuffer_proto_functions, |
312 | | arraybuffer_proto_properties |
313 | | }; |
314 | | |
315 | | static const ClassExtension ArrayBufferObjectClassExtension = { |
316 | | nullptr, /* weakmapKeyDelegateOp */ |
317 | | ArrayBufferObject::objectMoved |
318 | | }; |
319 | | |
320 | | const Class ArrayBufferObject::class_ = { |
321 | | "ArrayBuffer", |
322 | | JSCLASS_DELAY_METADATA_BUILDER | |
323 | | JSCLASS_HAS_RESERVED_SLOTS(RESERVED_SLOTS) | |
324 | | JSCLASS_HAS_CACHED_PROTO(JSProto_ArrayBuffer) | |
325 | | JSCLASS_BACKGROUND_FINALIZE, |
326 | | &ArrayBufferObjectClassOps, |
327 | | &ArrayBufferObjectClassSpec, |
328 | | &ArrayBufferObjectClassExtension |
329 | | }; |
330 | | |
331 | | const Class ArrayBufferObject::protoClass_ = { |
332 | | "ArrayBufferPrototype", |
333 | | JSCLASS_HAS_CACHED_PROTO(JSProto_ArrayBuffer), |
334 | | JS_NULL_CLASS_OPS, |
335 | | &ArrayBufferObjectClassSpec |
336 | | }; |
337 | | |
338 | | bool |
339 | | js::IsArrayBuffer(HandleValue v) |
340 | 0 | { |
341 | 0 | return v.isObject() && v.toObject().is<ArrayBufferObject>(); |
342 | 0 | } |
343 | | |
344 | | bool |
345 | | js::IsArrayBuffer(HandleObject obj) |
346 | 0 | { |
347 | 0 | return obj->is<ArrayBufferObject>(); |
348 | 0 | } |
349 | | |
350 | | bool |
351 | | js::IsArrayBuffer(JSObject* obj) |
352 | 0 | { |
353 | 0 | return obj->is<ArrayBufferObject>(); |
354 | 0 | } |
355 | | |
356 | | ArrayBufferObject& |
357 | | js::AsArrayBuffer(HandleObject obj) |
358 | 0 | { |
359 | 0 | MOZ_ASSERT(IsArrayBuffer(obj)); |
360 | 0 | return obj->as<ArrayBufferObject>(); |
361 | 0 | } |
362 | | |
363 | | ArrayBufferObject& |
364 | | js::AsArrayBuffer(JSObject* obj) |
365 | 0 | { |
366 | 0 | MOZ_ASSERT(IsArrayBuffer(obj)); |
367 | 0 | return obj->as<ArrayBufferObject>(); |
368 | 0 | } |
369 | | |
370 | | bool |
371 | | js::IsArrayBufferMaybeShared(HandleValue v) |
372 | 0 | { |
373 | 0 | return v.isObject() && v.toObject().is<ArrayBufferObjectMaybeShared>(); |
374 | 0 | } |
375 | | |
376 | | bool |
377 | | js::IsArrayBufferMaybeShared(HandleObject obj) |
378 | 0 | { |
379 | 0 | return obj->is<ArrayBufferObjectMaybeShared>(); |
380 | 0 | } |
381 | | |
382 | | bool |
383 | | js::IsArrayBufferMaybeShared(JSObject* obj) |
384 | 0 | { |
385 | 0 | return obj->is<ArrayBufferObjectMaybeShared>(); |
386 | 0 | } |
387 | | |
388 | | ArrayBufferObjectMaybeShared& |
389 | | js::AsArrayBufferMaybeShared(HandleObject obj) |
390 | 0 | { |
391 | 0 | MOZ_ASSERT(IsArrayBufferMaybeShared(obj)); |
392 | 0 | return obj->as<ArrayBufferObjectMaybeShared>(); |
393 | 0 | } |
394 | | |
395 | | ArrayBufferObjectMaybeShared& |
396 | | js::AsArrayBufferMaybeShared(JSObject* obj) |
397 | 0 | { |
398 | 0 | MOZ_ASSERT(IsArrayBufferMaybeShared(obj)); |
399 | 0 | return obj->as<ArrayBufferObjectMaybeShared>(); |
400 | 0 | } |
401 | | |
402 | | MOZ_ALWAYS_INLINE bool |
403 | | ArrayBufferObject::byteLengthGetterImpl(JSContext* cx, const CallArgs& args) |
404 | 0 | { |
405 | 0 | MOZ_ASSERT(IsArrayBuffer(args.thisv())); |
406 | 0 | args.rval().setInt32(args.thisv().toObject().as<ArrayBufferObject>().byteLength()); |
407 | 0 | return true; |
408 | 0 | } |
409 | | |
410 | | bool |
411 | | ArrayBufferObject::byteLengthGetter(JSContext* cx, unsigned argc, Value* vp) |
412 | 0 | { |
413 | 0 | CallArgs args = CallArgsFromVp(argc, vp); |
414 | 0 | return CallNonGenericMethod<IsArrayBuffer, byteLengthGetterImpl>(cx, args); |
415 | 0 | } |
416 | | |
417 | | /* |
418 | | * ArrayBuffer.isView(obj); ES6 (Dec 2013 draft) 24.1.3.1 |
419 | | */ |
420 | | bool |
421 | | ArrayBufferObject::fun_isView(JSContext* cx, unsigned argc, Value* vp) |
422 | 0 | { |
423 | 0 | CallArgs args = CallArgsFromVp(argc, vp); |
424 | 0 | args.rval().setBoolean(args.get(0).isObject() && |
425 | 0 | JS_IsArrayBufferViewObject(&args.get(0).toObject())); |
426 | 0 | return true; |
427 | 0 | } |
428 | | |
429 | | // ES2017 draft 24.1.2.1 |
430 | | bool |
431 | | ArrayBufferObject::class_constructor(JSContext* cx, unsigned argc, Value* vp) |
432 | 0 | { |
433 | 0 | CallArgs args = CallArgsFromVp(argc, vp); |
434 | 0 |
|
435 | 0 | // Step 1. |
436 | 0 | if (!ThrowIfNotConstructing(cx, args, "ArrayBuffer")) { |
437 | 0 | return false; |
438 | 0 | } |
439 | 0 | |
440 | 0 | // Step 2. |
441 | 0 | uint64_t byteLength; |
442 | 0 | if (!ToIndex(cx, args.get(0), &byteLength)) { |
443 | 0 | return false; |
444 | 0 | } |
445 | 0 | |
446 | 0 | // Step 3 (Inlined 24.1.1.1 AllocateArrayBuffer). |
447 | 0 | // 24.1.1.1, step 1 (Inlined 9.1.14 OrdinaryCreateFromConstructor). |
448 | 0 | RootedObject proto(cx); |
449 | 0 | if (!GetPrototypeFromBuiltinConstructor(cx, args, &proto)) { |
450 | 0 | return false; |
451 | 0 | } |
452 | 0 | |
453 | 0 | // 24.1.1.1, step 3 (Inlined 6.2.6.1 CreateByteDataBlock, step 2). |
454 | 0 | // Refuse to allocate too large buffers, currently limited to ~2 GiB. |
455 | 0 | if (byteLength > INT32_MAX) { |
456 | 0 | JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_ARRAY_LENGTH); |
457 | 0 | return false; |
458 | 0 | } |
459 | 0 | |
460 | 0 | // 24.1.1.1, steps 1 and 4-6. |
461 | 0 | JSObject* bufobj = create(cx, uint32_t(byteLength), proto); |
462 | 0 | if (!bufobj) { |
463 | 0 | return false; |
464 | 0 | } |
465 | 0 | args.rval().setObject(*bufobj); |
466 | 0 | return true; |
467 | 0 | } |
468 | | |
469 | | static ArrayBufferObject::BufferContents |
470 | | AllocateArrayBufferContents(JSContext* cx, uint32_t nbytes) |
471 | 0 | { |
472 | 0 | uint8_t* p = cx->pod_callocCanGC<uint8_t>(nbytes, |
473 | 0 | js::ArrayBufferContentsArena); |
474 | 0 | return ArrayBufferObject::BufferContents::create<ArrayBufferObject::PLAIN>(p); |
475 | 0 | } |
476 | | |
477 | | static void |
478 | | NoteViewBufferWasDetached(ArrayBufferViewObject* view, |
479 | | ArrayBufferObject::BufferContents newContents, |
480 | | JSContext* cx) |
481 | 0 | { |
482 | 0 | view->notifyBufferDetached(cx, newContents.data()); |
483 | 0 |
|
484 | 0 | // Notify compiled jit code that the base pointer has moved. |
485 | 0 | MarkObjectStateChange(cx, view); |
486 | 0 | } |
487 | | |
488 | | /* static */ void |
489 | | ArrayBufferObject::detach(JSContext* cx, Handle<ArrayBufferObject*> buffer, |
490 | | BufferContents newContents) |
491 | 0 | { |
492 | 0 | cx->check(buffer); |
493 | 0 | MOZ_ASSERT(!buffer->isPreparedForAsmJS()); |
494 | 0 |
|
495 | 0 | // When detaching buffers where we don't know all views, the new data must |
496 | 0 | // match the old data. All missing views are typed objects, which do not |
497 | 0 | // expect their data to ever change. |
498 | 0 | MOZ_ASSERT_IF(buffer->forInlineTypedObject(), |
499 | 0 | newContents.data() == buffer->dataPointer()); |
500 | 0 |
|
501 | 0 | // When detaching a buffer with typed object views, any jitcode accessing |
502 | 0 | // such views must be deoptimized so that detachment checks are performed. |
503 | 0 | // This is done by setting a zone-wide flag indicating that buffers with |
504 | 0 | // typed object views have been detached. |
505 | 0 | if (buffer->hasTypedObjectViews()) { |
506 | 0 | // Make sure the global object's group has been instantiated, so the |
507 | 0 | // flag change will be observed. |
508 | 0 | AutoEnterOOMUnsafeRegion oomUnsafe; |
509 | 0 | if (!JSObject::getGroup(cx, cx->global())) { |
510 | 0 | oomUnsafe.crash("ArrayBufferObject::detach"); |
511 | 0 | } |
512 | 0 | MarkObjectGroupFlags(cx, cx->global(), OBJECT_FLAG_TYPED_OBJECT_HAS_DETACHED_BUFFER); |
513 | 0 | cx->zone()->detachedTypedObjects = 1; |
514 | 0 | } |
515 | 0 |
|
516 | 0 | // Update all views of the buffer to account for the buffer having been |
517 | 0 | // detached, and clear the buffer's data and list of views. |
518 | 0 |
|
519 | 0 | auto& innerViews = ObjectRealm::get(buffer).innerViews.get(); |
520 | 0 | if (InnerViewTable::ViewVector* views = innerViews.maybeViewsUnbarriered(buffer)) { |
521 | 0 | for (size_t i = 0; i < views->length(); i++) { |
522 | 0 | NoteViewBufferWasDetached((*views)[i], newContents, cx); |
523 | 0 | } |
524 | 0 | innerViews.removeViews(buffer); |
525 | 0 | } |
526 | 0 | if (buffer->firstView()) { |
527 | 0 | if (buffer->forInlineTypedObject()) { |
528 | 0 | // The buffer points to inline data in its first view, so to keep |
529 | 0 | // this pointer alive we don't clear out the first view. |
530 | 0 | MOZ_ASSERT(buffer->firstView()->is<InlineTransparentTypedObject>()); |
531 | 0 | } else { |
532 | 0 | NoteViewBufferWasDetached(buffer->firstView(), newContents, cx); |
533 | 0 | buffer->setFirstView(nullptr); |
534 | 0 | } |
535 | 0 | } |
536 | 0 |
|
537 | 0 | if (newContents.data() != buffer->dataPointer()) { |
538 | 0 | buffer->setNewData(cx->runtime()->defaultFreeOp(), newContents, OwnsData); |
539 | 0 | } |
540 | 0 |
|
541 | 0 | buffer->setByteLength(0); |
542 | 0 | buffer->setIsDetached(); |
543 | 0 | } |
544 | | |
545 | | void |
546 | | ArrayBufferObject::setNewData(FreeOp* fop, BufferContents newContents, OwnsState ownsState) |
547 | 0 | { |
548 | 0 | if (ownsData()) { |
549 | 0 | MOZ_ASSERT(newContents.data() != dataPointer()); |
550 | 0 | releaseData(fop); |
551 | 0 | } |
552 | 0 |
|
553 | 0 | setDataPointer(newContents, ownsState); |
554 | 0 | } |
555 | | |
556 | | // This is called *only* from changeContents(), below. |
557 | | // By construction, every view parameter will be mapping unshared memory (an ArrayBuffer). |
558 | | // Hence no reason to worry about shared memory here. |
559 | | |
560 | | void |
561 | | ArrayBufferObject::changeViewContents(JSContext* cx, ArrayBufferViewObject* view, |
562 | | uint8_t* oldDataPointer, BufferContents newContents) |
563 | 0 | { |
564 | 0 | MOZ_ASSERT(!view->isSharedMemory()); |
565 | 0 |
|
566 | 0 | // Watch out for NULL data pointers in views. This means that the view |
567 | 0 | // is not fully initialized (in which case it'll be initialized later |
568 | 0 | // with the correct pointer). |
569 | 0 | JS::AutoCheckCannotGC nogc; |
570 | 0 | uint8_t* viewDataPointer = view->dataPointerUnshared(nogc); |
571 | 0 | if (viewDataPointer) { |
572 | 0 | MOZ_ASSERT(newContents); |
573 | 0 | ptrdiff_t offset = viewDataPointer - oldDataPointer; |
574 | 0 | viewDataPointer = static_cast<uint8_t*>(newContents.data()) + offset; |
575 | 0 | view->setDataPointerUnshared(viewDataPointer); |
576 | 0 | } |
577 | 0 |
|
578 | 0 | // Notify compiled jit code that the base pointer has moved. |
579 | 0 | MarkObjectStateChange(cx, view); |
580 | 0 | } |
581 | | |
582 | | // BufferContents is specific to ArrayBuffer, hence it will not represent shared memory. |
583 | | |
584 | | void |
585 | | ArrayBufferObject::changeContents(JSContext* cx, BufferContents newContents, |
586 | | OwnsState ownsState) |
587 | 0 | { |
588 | 0 | MOZ_RELEASE_ASSERT(!isWasm()); |
589 | 0 | MOZ_ASSERT(!forInlineTypedObject()); |
590 | 0 |
|
591 | 0 | // Change buffer contents. |
592 | 0 | uint8_t* oldDataPointer = dataPointer(); |
593 | 0 | setNewData(cx->runtime()->defaultFreeOp(), newContents, ownsState); |
594 | 0 |
|
595 | 0 | // Update all views. |
596 | 0 | auto& innerViews = ObjectRealm::get(this).innerViews.get(); |
597 | 0 | if (InnerViewTable::ViewVector* views = innerViews.maybeViewsUnbarriered(this)) { |
598 | 0 | for (size_t i = 0; i < views->length(); i++) { |
599 | 0 | changeViewContents(cx, (*views)[i], oldDataPointer, newContents); |
600 | 0 | } |
601 | 0 | } |
602 | 0 | if (firstView()) { |
603 | 0 | changeViewContents(cx, firstView(), oldDataPointer, newContents); |
604 | 0 | } |
605 | 0 | } |
606 | | |
607 | | /* |
608 | | * [SMDOC] WASM Linear Memory structure |
609 | | * |
610 | | * Wasm Raw Buf Linear Memory Structure |
611 | | * |
612 | | * The linear heap in Wasm is an mmaped array buffer. Several |
613 | | * constants manage its lifetime: |
614 | | * |
615 | | * - length - the wasm-visible current length of the buffer. Accesses in the |
616 | | * range [0, length] succeed. May only increase. |
617 | | * |
618 | | * - boundsCheckLimit - when !WASM_HUGE_MEMORY, the size against which we |
619 | | * perform bounds checks. It is always a constant offset smaller than |
620 | | * mappedSize. Currently that constant offset is 64k (wasm::GuardSize). |
621 | | * |
622 | | * - maxSize - the optional declared limit on how much length can grow. |
623 | | * |
624 | | * - mappedSize - the actual mmaped size. Access in the range |
625 | | * [0, mappedSize] will either succeed, or be handled by the wasm signal |
626 | | * handlers. |
627 | | * |
628 | | * The below diagram shows the layout of the wasm heap. The wasm-visible |
629 | | * portion of the heap starts at 0. There is one extra page prior to the |
630 | | * start of the wasm heap which contains the WasmArrayRawBuffer struct at |
631 | | * its end (i.e. right before the start of the WASM heap). |
632 | | * |
633 | | * WasmArrayRawBuffer |
634 | | * \ ArrayBufferObject::dataPointer() |
635 | | * \ / |
636 | | * \ | |
637 | | * ______|_|____________________________________________________________ |
638 | | * |______|_|______________|___________________|____________|____________| |
639 | | * 0 length maxSize boundsCheckLimit mappedSize |
640 | | * |
641 | | * \_______________________/ |
642 | | * COMMITED |
643 | | * \____________________________________________/ |
644 | | * SLOP |
645 | | * \_____________________________________________________________________/ |
646 | | * MAPPED |
647 | | * |
648 | | * Invariants: |
649 | | * - length only increases |
650 | | * - 0 <= length <= maxSize (if present) <= boundsCheckLimit <= mappedSize |
651 | | * - on ARM boundsCheckLimit must be a valid ARM immediate. |
652 | | * - if maxSize is not specified, boundsCheckLimit/mappedSize may grow. They are |
653 | | * otherwise constant. |
654 | | * |
655 | | * NOTE: For asm.js on non-x64 we guarantee that |
656 | | * |
657 | | * length == maxSize == boundsCheckLimit == mappedSize |
658 | | * |
659 | | * That is, signal handlers will not be invoked, since they cannot emulate |
660 | | * asm.js accesses on non-x64 architectures. |
661 | | * |
662 | | * The region between length and mappedSize is the SLOP - an area where we use |
663 | | * signal handlers to catch things that slip by bounds checks. Logically it has |
664 | | * two parts: |
665 | | * |
666 | | * - from length to boundsCheckLimit - this part of the SLOP serves to catch |
667 | | * accesses to memory we have reserved but not yet grown into. This allows us |
668 | | * to grow memory up to max (when present) without having to patch/update the |
669 | | * bounds checks. |
670 | | * |
671 | | * - from boundsCheckLimit to mappedSize - this part of the SLOP allows us to |
672 | | * bounds check against base pointers and fold some constant offsets inside |
673 | | * loads. This enables better Bounds Check Elimination. |
674 | | * |
675 | | */ |
676 | | |
677 | | class js::WasmArrayRawBuffer |
678 | | { |
679 | | Maybe<uint32_t> maxSize_; |
680 | | size_t mappedSize_; // Not including the header page |
681 | | |
682 | | protected: |
683 | | WasmArrayRawBuffer(uint8_t* buffer, const Maybe<uint32_t>& maxSize, size_t mappedSize) |
684 | | : maxSize_(maxSize), mappedSize_(mappedSize) |
685 | 0 | { |
686 | 0 | MOZ_ASSERT(buffer == dataPointer()); |
687 | 0 | } |
688 | | |
689 | | public: |
690 | | static WasmArrayRawBuffer* Allocate(uint32_t numBytes, const Maybe<uint32_t>& maxSize); |
691 | | static void Release(void* mem); |
692 | | |
693 | 0 | uint8_t* dataPointer() { |
694 | 0 | uint8_t* ptr = reinterpret_cast<uint8_t*>(this); |
695 | 0 | return ptr + sizeof(WasmArrayRawBuffer); |
696 | 0 | } |
697 | | |
698 | 0 | uint8_t* basePointer() { |
699 | 0 | return dataPointer() - gc::SystemPageSize(); |
700 | 0 | } |
701 | | |
702 | 0 | size_t mappedSize() const { |
703 | 0 | return mappedSize_; |
704 | 0 | } |
705 | | |
706 | 0 | Maybe<uint32_t> maxSize() const { |
707 | 0 | return maxSize_; |
708 | 0 | } |
709 | | |
710 | | #ifndef WASM_HUGE_MEMORY |
711 | | uint32_t boundsCheckLimit() const { |
712 | | MOZ_ASSERT(mappedSize_ <= UINT32_MAX); |
713 | | MOZ_ASSERT(mappedSize_ >= wasm::GuardSize); |
714 | | MOZ_ASSERT(wasm::IsValidBoundsCheckImmediate(mappedSize_ - wasm::GuardSize)); |
715 | | return mappedSize_ - wasm::GuardSize; |
716 | | } |
717 | | #endif |
718 | | |
719 | 0 | MOZ_MUST_USE bool growToSizeInPlace(uint32_t oldSize, uint32_t newSize) { |
720 | 0 | MOZ_ASSERT(newSize >= oldSize); |
721 | 0 | MOZ_ASSERT_IF(maxSize(), newSize <= maxSize().value()); |
722 | 0 | MOZ_ASSERT(newSize <= mappedSize()); |
723 | 0 |
|
724 | 0 | uint32_t delta = newSize - oldSize; |
725 | 0 | MOZ_ASSERT(delta % wasm::PageSize == 0); |
726 | 0 |
|
727 | 0 | uint8_t* dataEnd = dataPointer() + oldSize; |
728 | 0 | MOZ_ASSERT(uintptr_t(dataEnd) % gc::SystemPageSize() == 0); |
729 | 0 |
|
730 | 0 | if (delta && !CommitBufferMemory(dataEnd, delta)) { |
731 | 0 | return false; |
732 | 0 | } |
733 | 0 | |
734 | 0 | return true; |
735 | 0 | } |
736 | | |
737 | | #ifndef WASM_HUGE_MEMORY |
738 | | bool extendMappedSize(uint32_t maxSize) { |
739 | | size_t newMappedSize = wasm::ComputeMappedSize(maxSize); |
740 | | MOZ_ASSERT(mappedSize_ <= newMappedSize); |
741 | | if (mappedSize_ == newMappedSize) { |
742 | | return true; |
743 | | } |
744 | | |
745 | | if (!ExtendBufferMapping(dataPointer(), mappedSize_, newMappedSize)) { |
746 | | return false; |
747 | | } |
748 | | |
749 | | mappedSize_ = newMappedSize; |
750 | | return true; |
751 | | } |
752 | | |
753 | | // Try and grow the mapped region of memory. Does not change current size. |
754 | | // Does not move memory if no space to grow. |
755 | | void tryGrowMaxSizeInPlace(uint32_t deltaMaxSize) { |
756 | | CheckedInt<uint32_t> newMaxSize = maxSize_.value(); |
757 | | newMaxSize += deltaMaxSize; |
758 | | MOZ_ASSERT(newMaxSize.isValid()); |
759 | | MOZ_ASSERT(newMaxSize.value() % wasm::PageSize == 0); |
760 | | |
761 | | if (!extendMappedSize(newMaxSize.value())) { |
762 | | return; |
763 | | } |
764 | | |
765 | | maxSize_ = Some(newMaxSize.value()); |
766 | | } |
767 | | #endif // WASM_HUGE_MEMORY |
768 | | }; |
769 | | |
770 | | /* static */ WasmArrayRawBuffer* |
771 | | WasmArrayRawBuffer::Allocate(uint32_t numBytes, const Maybe<uint32_t>& maxSize) |
772 | 0 | { |
773 | 0 | MOZ_RELEASE_ASSERT(numBytes <= ArrayBufferObject::MaxBufferByteLength); |
774 | 0 |
|
775 | 0 | size_t mappedSize; |
776 | 0 | #ifdef WASM_HUGE_MEMORY |
777 | 0 | mappedSize = wasm::HugeMappedSize; |
778 | | #else |
779 | | mappedSize = wasm::ComputeMappedSize(maxSize.valueOr(numBytes)); |
780 | | #endif |
781 | |
|
782 | 0 | MOZ_RELEASE_ASSERT(mappedSize <= SIZE_MAX - gc::SystemPageSize()); |
783 | 0 | MOZ_RELEASE_ASSERT(numBytes <= maxSize.valueOr(UINT32_MAX)); |
784 | 0 | MOZ_ASSERT(numBytes % gc::SystemPageSize() == 0); |
785 | 0 | MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); |
786 | 0 |
|
787 | 0 | uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize(); |
788 | 0 | uint64_t numBytesWithHeader = numBytes + gc::SystemPageSize(); |
789 | 0 |
|
790 | 0 | void* data = MapBufferMemory((size_t) mappedSizeWithHeader, (size_t) numBytesWithHeader); |
791 | 0 | if (!data) { |
792 | 0 | return nullptr; |
793 | 0 | } |
794 | 0 | |
795 | 0 | uint8_t* base = reinterpret_cast<uint8_t*>(data) + gc::SystemPageSize(); |
796 | 0 | uint8_t* header = base - sizeof(WasmArrayRawBuffer); |
797 | 0 |
|
798 | 0 | auto rawBuf = new (header) WasmArrayRawBuffer(base, maxSize, mappedSize); |
799 | 0 | return rawBuf; |
800 | 0 | } |
801 | | |
802 | | /* static */ void |
803 | | WasmArrayRawBuffer::Release(void* mem) |
804 | 0 | { |
805 | 0 | WasmArrayRawBuffer* header = (WasmArrayRawBuffer*)((uint8_t*)mem - sizeof(WasmArrayRawBuffer)); |
806 | 0 |
|
807 | 0 | MOZ_RELEASE_ASSERT(header->mappedSize() <= SIZE_MAX - gc::SystemPageSize()); |
808 | 0 | size_t mappedSizeWithHeader = header->mappedSize() + gc::SystemPageSize(); |
809 | 0 |
|
810 | 0 | UnmapBufferMemory(header->basePointer(), mappedSizeWithHeader); |
811 | 0 | } |
812 | | |
813 | | WasmArrayRawBuffer* |
814 | | ArrayBufferObject::BufferContents::wasmBuffer() const |
815 | 0 | { |
816 | 0 | MOZ_RELEASE_ASSERT(kind_ == WASM); |
817 | 0 | return (WasmArrayRawBuffer*)(data_ - sizeof(WasmArrayRawBuffer)); |
818 | 0 | } |
819 | | |
820 | | template<typename ObjT, typename RawbufT> |
821 | | static bool |
822 | | CreateBuffer(JSContext* cx, uint32_t initialSize, const Maybe<uint32_t>& maxSize, |
823 | | MutableHandleArrayBufferObjectMaybeShared maybeSharedObject) |
824 | 0 | { |
825 | 0 | #define ROUND_UP(v, a) ((v) % (a) == 0 ? (v) : v + a - ((v) % (a))) |
826 | 0 |
|
827 | 0 | RawbufT* buffer = RawbufT::Allocate(initialSize, maxSize); |
828 | 0 | if (!buffer) { |
829 | 0 | #ifdef WASM_HUGE_MEMORY |
830 | 0 | ReportOutOfMemory(cx); |
831 | 0 | return false; |
832 | | #else |
833 | | // If we fail, and have a maxSize, try to reserve the biggest chunk in |
834 | | // the range [initialSize, maxSize) using log backoff. |
835 | | if (!maxSize) { |
836 | | ReportOutOfMemory(cx); |
837 | | return false; |
838 | | } |
839 | | |
840 | | uint32_t cur = maxSize.value() / 2; |
841 | | |
842 | | for (; cur > initialSize; cur /= 2) { |
843 | | buffer = RawbufT::Allocate(initialSize, mozilla::Some(ROUND_UP(cur, wasm::PageSize))); |
844 | | if (buffer) { |
845 | | break; |
846 | | } |
847 | | } |
848 | | |
849 | | if (!buffer) { |
850 | | ReportOutOfMemory(cx); |
851 | | return false; |
852 | | } |
853 | | |
854 | | // Try to grow our chunk as much as possible. |
855 | | for (size_t d = cur / 2; d >= wasm::PageSize; d /= 2) { |
856 | | buffer->tryGrowMaxSizeInPlace(ROUND_UP(d, wasm::PageSize)); |
857 | | } |
858 | | #endif |
859 | | } |
860 | 0 |
|
861 | 0 | #undef ROUND_UP |
862 | 0 |
|
863 | 0 | // ObjT::createFromNewRawBuffer assumes ownership of |buffer| even in case |
864 | 0 | // of failure. |
865 | 0 | ObjT* object = ObjT::createFromNewRawBuffer(cx, buffer, initialSize); |
866 | 0 | if (!object) { |
867 | 0 | return false; |
868 | 0 | } |
869 | 0 | |
870 | 0 | maybeSharedObject.set(object); |
871 | 0 |
|
872 | 0 | // See MaximumLiveMappedBuffers comment above. |
873 | 0 | if (liveBufferCount > StartSyncFullGCAtLiveBufferCount) { |
874 | 0 | JS::PrepareForFullGC(cx); |
875 | 0 | JS::NonIncrementalGC(cx, GC_NORMAL, JS::gcreason::TOO_MUCH_WASM_MEMORY); |
876 | 0 | allocatedSinceLastTrigger = 0; |
877 | 0 | } else if (liveBufferCount > StartTriggeringAtLiveBufferCount) { |
878 | 0 | allocatedSinceLastTrigger++; |
879 | 0 | if (allocatedSinceLastTrigger > AllocatedBuffersPerTrigger) { |
880 | 0 | Unused << cx->runtime()->gc.triggerGC(JS::gcreason::TOO_MUCH_WASM_MEMORY); |
881 | 0 | allocatedSinceLastTrigger = 0; |
882 | 0 | } |
883 | 0 | } else { |
884 | 0 | allocatedSinceLastTrigger = 0; |
885 | 0 | } |
886 | 0 |
|
887 | 0 | return true; |
888 | 0 | } Unexecuted instantiation: Unified_cpp_js_src31.cpp:bool CreateBuffer<js::SharedArrayBufferObject, js::SharedArrayRawBuffer>(JSContext*, unsigned int, mozilla::Maybe<unsigned int> const&, JS::MutableHandle<js::ArrayBufferObjectMaybeShared*>) Unexecuted instantiation: Unified_cpp_js_src31.cpp:bool CreateBuffer<js::ArrayBufferObject, js::WasmArrayRawBuffer>(JSContext*, unsigned int, mozilla::Maybe<unsigned int> const&, JS::MutableHandle<js::ArrayBufferObjectMaybeShared*>) |
889 | | |
890 | | bool |
891 | | js::CreateWasmBuffer(JSContext* cx, const wasm::Limits& memory, |
892 | | MutableHandleArrayBufferObjectMaybeShared buffer) |
893 | 0 | { |
894 | 0 | MOZ_ASSERT(memory.initial % wasm::PageSize == 0); |
895 | 0 | MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers()); |
896 | 0 | MOZ_RELEASE_ASSERT((memory.initial / wasm::PageSize) <= wasm::MaxMemoryInitialPages); |
897 | 0 |
|
898 | 0 | // Prevent applications specifying a large max (like UINT32_MAX) from |
899 | 0 | // unintentially OOMing the browser on 32-bit: they just want "a lot of |
900 | 0 | // memory". Maintain the invariant that initialSize <= maxSize. |
901 | 0 |
|
902 | 0 | Maybe<uint32_t> maxSize = memory.maximum; |
903 | 0 | if (sizeof(void*) == 4 && maxSize) { |
904 | 0 | static const uint32_t OneGiB = 1 << 30; |
905 | 0 | uint32_t clamp = Max(OneGiB, memory.initial); |
906 | 0 | maxSize = Some(Min(clamp, *maxSize)); |
907 | 0 | } |
908 | 0 |
|
909 | | #ifndef WASM_HUGE_MEMORY |
910 | | if (sizeof(void*) == 8 && maxSize && maxSize.value() >= (UINT32_MAX - wasm::PageSize)) { |
911 | | // On 64-bit platforms that don't define WASM_HUGE_MEMORY |
912 | | // clamp maxSize to smaller value that satisfies the 32-bit invariants |
913 | | // maxSize + wasm::PageSize < UINT32_MAX and maxSize % wasm::PageSize == 0 |
914 | | uint32_t clamp = (wasm::MaxMemoryMaximumPages - 2) * wasm::PageSize; |
915 | | MOZ_ASSERT(clamp < UINT32_MAX); |
916 | | MOZ_ASSERT(memory.initial <= clamp); |
917 | | maxSize = Some(clamp); |
918 | | } |
919 | | #endif |
920 | |
|
921 | 0 | if (memory.shared == wasm::Shareable::True) { |
922 | 0 | if (!cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled()) { |
923 | 0 | JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_NO_SHMEM_LINK); |
924 | 0 | return false; |
925 | 0 | } |
926 | 0 | return CreateBuffer<SharedArrayBufferObject, SharedArrayRawBuffer>(cx, memory.initial, |
927 | 0 | maxSize, buffer); |
928 | 0 | } |
929 | 0 | return CreateBuffer<ArrayBufferObject, WasmArrayRawBuffer>(cx, memory.initial, maxSize, |
930 | 0 | buffer); |
931 | 0 | } |
932 | | |
933 | | // Note this function can return false with or without an exception pending. The |
934 | | // asm.js caller checks cx->isExceptionPending before propagating failure. |
935 | | // Returning false without throwing means that asm.js linking will fail which |
936 | | // will recompile as non-asm.js. |
937 | | /* static */ bool |
938 | | ArrayBufferObject::prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer, bool needGuard) |
939 | 0 | { |
940 | 0 | #ifdef WASM_HUGE_MEMORY |
941 | 0 | MOZ_ASSERT(needGuard); |
942 | 0 | #endif |
943 | 0 | MOZ_ASSERT(buffer->byteLength() % wasm::PageSize == 0); |
944 | 0 | MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers()); |
945 | 0 |
|
946 | 0 | if (buffer->forInlineTypedObject()) { |
947 | 0 | return false; |
948 | 0 | } |
949 | 0 | |
950 | 0 | if (needGuard) { |
951 | 0 | if (buffer->isWasm() && buffer->isPreparedForAsmJS()) { |
952 | 0 | return true; |
953 | 0 | } |
954 | 0 | |
955 | 0 | // Non-prepared-for-asm.js wasm buffers can be detached at any time. |
956 | 0 | // This error can only be triggered for Atomics on !WASM_HUGE_MEMORY |
957 | 0 | // so this error is only visible in testing. |
958 | 0 | if (buffer->isWasm() || buffer->isPreparedForAsmJS()) { |
959 | 0 | return false; |
960 | 0 | } |
961 | 0 | |
962 | 0 | uint32_t length = buffer->byteLength(); |
963 | 0 | WasmArrayRawBuffer* wasmBuf = WasmArrayRawBuffer::Allocate(length, Some(length)); |
964 | 0 | if (!wasmBuf) { |
965 | 0 | ReportOutOfMemory(cx); |
966 | 0 | return false; |
967 | 0 | } |
968 | 0 | |
969 | 0 | void* data = wasmBuf->dataPointer(); |
970 | 0 | memcpy(data, buffer->dataPointer(), length); |
971 | 0 |
|
972 | 0 | // Swap the new elements into the ArrayBufferObject. Mark the |
973 | 0 | // ArrayBufferObject so we don't do this again. |
974 | 0 | buffer->changeContents(cx, BufferContents::create<WASM>(data), OwnsData); |
975 | 0 | buffer->setIsPreparedForAsmJS(); |
976 | 0 | MOZ_ASSERT(data == buffer->dataPointer()); |
977 | 0 | cx->updateMallocCounter(wasmBuf->mappedSize()); |
978 | 0 | return true; |
979 | 0 | } |
980 | 0 |
|
981 | 0 | if (!buffer->isWasm() && buffer->isPreparedForAsmJS()) { |
982 | 0 | return true; |
983 | 0 | } |
984 | 0 | |
985 | 0 | // Non-prepared-for-asm.js wasm buffers can be detached at any time. |
986 | 0 | if (buffer->isWasm()) { |
987 | 0 | return false; |
988 | 0 | } |
989 | 0 | |
990 | 0 | if (!buffer->ownsData()) { |
991 | 0 | BufferContents contents = AllocateArrayBufferContents(cx, buffer->byteLength()); |
992 | 0 | if (!contents) { |
993 | 0 | return false; |
994 | 0 | } |
995 | 0 | memcpy(contents.data(), buffer->dataPointer(), buffer->byteLength()); |
996 | 0 | buffer->changeContents(cx, contents, OwnsData); |
997 | 0 | } |
998 | 0 |
|
999 | 0 | buffer->setIsPreparedForAsmJS(); |
1000 | 0 | return true; |
1001 | 0 | } |
1002 | | |
1003 | | ArrayBufferObject::BufferContents |
1004 | | ArrayBufferObject::createMappedContents(int fd, size_t offset, size_t length) |
1005 | 0 | { |
1006 | 0 | void* data = gc::AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT); |
1007 | 0 | return BufferContents::create<MAPPED>(data); |
1008 | 0 | } |
1009 | | |
1010 | | uint8_t* |
1011 | | ArrayBufferObject::inlineDataPointer() const |
1012 | 0 | { |
1013 | 0 | return static_cast<uint8_t*>(fixedData(JSCLASS_RESERVED_SLOTS(&class_))); |
1014 | 0 | } |
1015 | | |
1016 | | uint8_t* |
1017 | | ArrayBufferObject::dataPointer() const |
1018 | 0 | { |
1019 | 0 | return static_cast<uint8_t*>(getFixedSlot(DATA_SLOT).toPrivate()); |
1020 | 0 | } |
1021 | | |
1022 | | SharedMem<uint8_t*> |
1023 | | ArrayBufferObject::dataPointerShared() const |
1024 | 0 | { |
1025 | 0 | return SharedMem<uint8_t*>::unshared(getFixedSlot(DATA_SLOT).toPrivate()); |
1026 | 0 | } |
1027 | | |
1028 | | ArrayBufferObject::FreeInfo* |
1029 | | ArrayBufferObject::freeInfo() const |
1030 | 0 | { |
1031 | 0 | MOZ_ASSERT(isExternal()); |
1032 | 0 | return reinterpret_cast<FreeInfo*>(inlineDataPointer()); |
1033 | 0 | } |
1034 | | |
1035 | | void |
1036 | | ArrayBufferObject::releaseData(FreeOp* fop) |
1037 | 0 | { |
1038 | 0 | MOZ_ASSERT(ownsData()); |
1039 | 0 |
|
1040 | 0 | switch (bufferKind()) { |
1041 | 0 | case PLAIN: |
1042 | 0 | fop->free_(dataPointer()); |
1043 | 0 | break; |
1044 | 0 | case MAPPED: |
1045 | 0 | gc::DeallocateMappedContent(dataPointer(), byteLength()); |
1046 | 0 | break; |
1047 | 0 | case WASM: |
1048 | 0 | WasmArrayRawBuffer::Release(dataPointer()); |
1049 | 0 | break; |
1050 | 0 | case EXTERNAL: |
1051 | 0 | if (freeInfo()->freeFunc) { |
1052 | 0 | // The analyzer can't know for sure whether the embedder-supplied |
1053 | 0 | // free function will GC. We give the analyzer a hint here. |
1054 | 0 | // (Doing a GC in the free function is considered a programmer |
1055 | 0 | // error.) |
1056 | 0 | JS::AutoSuppressGCAnalysis nogc; |
1057 | 0 | freeInfo()->freeFunc(dataPointer(), freeInfo()->freeUserData); |
1058 | 0 | } |
1059 | 0 | break; |
1060 | 0 | } |
1061 | 0 | } |
1062 | | |
1063 | | void |
1064 | | ArrayBufferObject::setDataPointer(BufferContents contents, OwnsState ownsData) |
1065 | 0 | { |
1066 | 0 | setFixedSlot(DATA_SLOT, PrivateValue(contents.data())); |
1067 | 0 | setOwnsData(ownsData); |
1068 | 0 | setFlags((flags() & ~KIND_MASK) | contents.kind()); |
1069 | 0 |
|
1070 | 0 | if (isExternal()) { |
1071 | 0 | auto info = freeInfo(); |
1072 | 0 | info->freeFunc = contents.freeFunc(); |
1073 | 0 | info->freeUserData = contents.freeUserData(); |
1074 | 0 | } |
1075 | 0 | } |
1076 | | |
1077 | | uint32_t |
1078 | | ArrayBufferObject::byteLength() const |
1079 | 0 | { |
1080 | 0 | return getFixedSlot(BYTE_LENGTH_SLOT).toInt32(); |
1081 | 0 | } |
1082 | | |
1083 | | void |
1084 | | ArrayBufferObject::setByteLength(uint32_t length) |
1085 | 0 | { |
1086 | 0 | MOZ_ASSERT(length <= INT32_MAX); |
1087 | 0 | setFixedSlot(BYTE_LENGTH_SLOT, Int32Value(length)); |
1088 | 0 | } |
1089 | | |
1090 | | size_t |
1091 | | ArrayBufferObject::wasmMappedSize() const |
1092 | 0 | { |
1093 | 0 | if (isWasm()) { |
1094 | 0 | return contents().wasmBuffer()->mappedSize(); |
1095 | 0 | } |
1096 | 0 | return byteLength(); |
1097 | 0 | } |
1098 | | |
1099 | | size_t |
1100 | | js::WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf) |
1101 | 0 | { |
1102 | 0 | if (buf->is<ArrayBufferObject>()) { |
1103 | 0 | return buf->as<ArrayBufferObject>().wasmMappedSize(); |
1104 | 0 | } |
1105 | 0 | return buf->as<SharedArrayBufferObject>().wasmMappedSize(); |
1106 | 0 | } |
1107 | | |
1108 | | Maybe<uint32_t> |
1109 | | ArrayBufferObject::wasmMaxSize() const |
1110 | 0 | { |
1111 | 0 | if (isWasm()) { |
1112 | 0 | return contents().wasmBuffer()->maxSize(); |
1113 | 0 | } else { |
1114 | 0 | return Some<uint32_t>(byteLength()); |
1115 | 0 | } |
1116 | 0 | } |
1117 | | |
1118 | | Maybe<uint32_t> |
1119 | | js::WasmArrayBufferMaxSize(const ArrayBufferObjectMaybeShared* buf) |
1120 | 0 | { |
1121 | 0 | if (buf->is<ArrayBufferObject>()) { |
1122 | 0 | return buf->as<ArrayBufferObject>().wasmMaxSize(); |
1123 | 0 | } |
1124 | 0 | return buf->as<SharedArrayBufferObject>().wasmMaxSize(); |
1125 | 0 | } |
1126 | | |
1127 | | /* static */ bool |
1128 | | ArrayBufferObject::wasmGrowToSizeInPlace(uint32_t newSize, |
1129 | | HandleArrayBufferObject oldBuf, |
1130 | | MutableHandleArrayBufferObject newBuf, |
1131 | | JSContext* cx) |
1132 | 0 | { |
1133 | 0 | // On failure, do not throw and ensure that the original buffer is |
1134 | 0 | // unmodified and valid. After WasmArrayRawBuffer::growToSizeInPlace(), the |
1135 | 0 | // wasm-visible length of the buffer has been increased so it must be the |
1136 | 0 | // last fallible operation. |
1137 | 0 |
|
1138 | 0 | if (newSize > ArrayBufferObject::MaxBufferByteLength) { |
1139 | 0 | return false; |
1140 | 0 | } |
1141 | 0 | |
1142 | 0 | newBuf.set(ArrayBufferObject::createEmpty(cx)); |
1143 | 0 | if (!newBuf) { |
1144 | 0 | cx->clearPendingException(); |
1145 | 0 | return false; |
1146 | 0 | } |
1147 | 0 | |
1148 | 0 | if (!oldBuf->contents().wasmBuffer()->growToSizeInPlace(oldBuf->byteLength(), newSize)) { |
1149 | 0 | return false; |
1150 | 0 | } |
1151 | 0 | |
1152 | 0 | bool hasStealableContents = true; |
1153 | 0 | BufferContents contents = ArrayBufferObject::stealContents(cx, oldBuf, hasStealableContents); |
1154 | 0 | MOZ_ASSERT(contents); |
1155 | 0 | newBuf->initialize(newSize, contents, OwnsData); |
1156 | 0 | return true; |
1157 | 0 | } |
1158 | | |
1159 | | #ifndef WASM_HUGE_MEMORY |
1160 | | /* static */ bool |
1161 | | ArrayBufferObject::wasmMovingGrowToSize(uint32_t newSize, |
1162 | | HandleArrayBufferObject oldBuf, |
1163 | | MutableHandleArrayBufferObject newBuf, |
1164 | | JSContext* cx) |
1165 | | { |
1166 | | // On failure, do not throw and ensure that the original buffer is |
1167 | | // unmodified and valid. |
1168 | | |
1169 | | if (newSize > ArrayBufferObject::MaxBufferByteLength) { |
1170 | | return false; |
1171 | | } |
1172 | | |
1173 | | if (newSize <= oldBuf->wasmBoundsCheckLimit() || |
1174 | | oldBuf->contents().wasmBuffer()->extendMappedSize(newSize)) |
1175 | | { |
1176 | | return wasmGrowToSizeInPlace(newSize, oldBuf, newBuf, cx); |
1177 | | } |
1178 | | |
1179 | | newBuf.set(ArrayBufferObject::createEmpty(cx)); |
1180 | | if (!newBuf) { |
1181 | | cx->clearPendingException(); |
1182 | | return false; |
1183 | | } |
1184 | | |
1185 | | WasmArrayRawBuffer* newRawBuf = WasmArrayRawBuffer::Allocate(newSize, Nothing()); |
1186 | | if (!newRawBuf) { |
1187 | | return false; |
1188 | | } |
1189 | | BufferContents contents = BufferContents::create<WASM>(newRawBuf->dataPointer()); |
1190 | | newBuf->initialize(newSize, contents, OwnsData); |
1191 | | |
1192 | | memcpy(newBuf->dataPointer(), oldBuf->dataPointer(), oldBuf->byteLength()); |
1193 | | ArrayBufferObject::detach(cx, oldBuf, BufferContents::createPlain(nullptr)); |
1194 | | return true; |
1195 | | } |
1196 | | |
1197 | | uint32_t |
1198 | | ArrayBufferObject::wasmBoundsCheckLimit() const |
1199 | | { |
1200 | | if (isWasm()) { |
1201 | | return contents().wasmBuffer()->boundsCheckLimit(); |
1202 | | } |
1203 | | return byteLength(); |
1204 | | } |
1205 | | |
1206 | | uint32_t |
1207 | | ArrayBufferObjectMaybeShared::wasmBoundsCheckLimit() const |
1208 | | { |
1209 | | if (is<ArrayBufferObject>()) { |
1210 | | return as<ArrayBufferObject>().wasmBoundsCheckLimit(); |
1211 | | } |
1212 | | return as<SharedArrayBufferObject>().wasmBoundsCheckLimit(); |
1213 | | } |
1214 | | #endif |
1215 | | |
1216 | | uint32_t |
1217 | | ArrayBufferObject::flags() const |
1218 | 0 | { |
1219 | 0 | return uint32_t(getFixedSlot(FLAGS_SLOT).toInt32()); |
1220 | 0 | } |
1221 | | |
1222 | | void |
1223 | | ArrayBufferObject::setFlags(uint32_t flags) |
1224 | 0 | { |
1225 | 0 | setFixedSlot(FLAGS_SLOT, Int32Value(flags)); |
1226 | 0 | } |
1227 | | |
1228 | | ArrayBufferObject* |
1229 | | ArrayBufferObject::create(JSContext* cx, uint32_t nbytes, BufferContents contents, |
1230 | | OwnsState ownsState /* = OwnsData */, |
1231 | | HandleObject proto /* = nullptr */, |
1232 | | NewObjectKind newKind /* = GenericObject */) |
1233 | 0 | { |
1234 | 0 | MOZ_ASSERT_IF(contents.kind() == MAPPED, contents); |
1235 | 0 |
|
1236 | 0 | // 24.1.1.1, step 3 (Inlined 6.2.6.1 CreateByteDataBlock, step 2). |
1237 | 0 | // Refuse to allocate too large buffers, currently limited to ~2 GiB. |
1238 | 0 | if (nbytes > INT32_MAX) { |
1239 | 0 | JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_ARRAY_LENGTH); |
1240 | 0 | return nullptr; |
1241 | 0 | } |
1242 | 0 | |
1243 | 0 | // If we need to allocate data, try to use a larger object size class so |
1244 | 0 | // that the array buffer's data can be allocated inline with the object. |
1245 | 0 | // The extra space will be left unused by the object's fixed slots and |
1246 | 0 | // available for the buffer's data, see NewObject(). |
1247 | 0 | size_t reservedSlots = JSCLASS_RESERVED_SLOTS(&class_); |
1248 | 0 |
|
1249 | 0 | size_t nslots = reservedSlots; |
1250 | 0 | bool allocated = false; |
1251 | 0 | if (contents) { |
1252 | 0 | if (ownsState == OwnsData) { |
1253 | 0 | if (contents.kind() == EXTERNAL) { |
1254 | 0 | // Store the FreeInfo in the inline data slots so that we |
1255 | 0 | // don't use up slots for it in non-refcounted array buffers. |
1256 | 0 | size_t freeInfoSlots = JS_HOWMANY(sizeof(FreeInfo), sizeof(Value)); |
1257 | 0 | MOZ_ASSERT(reservedSlots + freeInfoSlots <= NativeObject::MAX_FIXED_SLOTS, |
1258 | 0 | "FreeInfo must fit in inline slots"); |
1259 | 0 | nslots += freeInfoSlots; |
1260 | 0 | } else { |
1261 | 0 | // The ABO is taking ownership, so account the bytes against |
1262 | 0 | // the zone. |
1263 | 0 | size_t nAllocated = nbytes; |
1264 | 0 | if (contents.kind() == MAPPED) { |
1265 | 0 | nAllocated = JS_ROUNDUP(nbytes, js::gc::SystemPageSize()); |
1266 | 0 | } |
1267 | 0 | cx->updateMallocCounter(nAllocated); |
1268 | 0 | } |
1269 | 0 | } |
1270 | 0 | } else { |
1271 | 0 | MOZ_ASSERT(ownsState == OwnsData); |
1272 | 0 | size_t usableSlots = NativeObject::MAX_FIXED_SLOTS - reservedSlots; |
1273 | 0 | if (nbytes <= usableSlots * sizeof(Value)) { |
1274 | 0 | int newSlots = JS_HOWMANY(nbytes, sizeof(Value)); |
1275 | 0 | MOZ_ASSERT(int(nbytes) <= newSlots * int(sizeof(Value))); |
1276 | 0 | nslots = reservedSlots + newSlots; |
1277 | 0 | contents = BufferContents::createPlain(nullptr); |
1278 | 0 | } else { |
1279 | 0 | contents = AllocateArrayBufferContents(cx, nbytes); |
1280 | 0 | if (!contents) { |
1281 | 0 | return nullptr; |
1282 | 0 | } |
1283 | 0 | allocated = true; |
1284 | 0 | } |
1285 | 0 | } |
1286 | 0 |
|
1287 | 0 | MOZ_ASSERT(!(class_.flags & JSCLASS_HAS_PRIVATE)); |
1288 | 0 | gc::AllocKind allocKind = gc::GetGCObjectKind(nslots); |
1289 | 0 |
|
1290 | 0 | AutoSetNewObjectMetadata metadata(cx); |
1291 | 0 | Rooted<ArrayBufferObject*> obj(cx, |
1292 | 0 | NewObjectWithClassProto<ArrayBufferObject>(cx, proto, allocKind, newKind)); |
1293 | 0 | if (!obj) { |
1294 | 0 | if (allocated) { |
1295 | 0 | js_free(contents.data()); |
1296 | 0 | } |
1297 | 0 | return nullptr; |
1298 | 0 | } |
1299 | 0 |
|
1300 | 0 | MOZ_ASSERT(obj->getClass() == &class_); |
1301 | 0 | MOZ_ASSERT(!gc::IsInsideNursery(obj)); |
1302 | 0 |
|
1303 | 0 | if (!contents) { |
1304 | 0 | void* data = obj->inlineDataPointer(); |
1305 | 0 | memset(data, 0, nbytes); |
1306 | 0 | obj->initialize(nbytes, BufferContents::createPlain(data), DoesntOwnData); |
1307 | 0 | } else { |
1308 | 0 | obj->initialize(nbytes, contents, ownsState); |
1309 | 0 | } |
1310 | 0 |
|
1311 | 0 | return obj; |
1312 | 0 | } |
1313 | | |
1314 | | ArrayBufferObject* |
1315 | | ArrayBufferObject::create(JSContext* cx, uint32_t nbytes, |
1316 | | HandleObject proto /* = nullptr */) |
1317 | 0 | { |
1318 | 0 | return create(cx, nbytes, BufferContents::createPlain(nullptr), |
1319 | 0 | OwnsState::OwnsData, proto); |
1320 | 0 | } |
1321 | | |
1322 | | ArrayBufferObject* |
1323 | | ArrayBufferObject::createEmpty(JSContext* cx) |
1324 | 0 | { |
1325 | 0 | AutoSetNewObjectMetadata metadata(cx); |
1326 | 0 | ArrayBufferObject* obj = NewBuiltinClassInstance<ArrayBufferObject>(cx); |
1327 | 0 | if (!obj) { |
1328 | 0 | return nullptr; |
1329 | 0 | } |
1330 | 0 | |
1331 | 0 | obj->setByteLength(0); |
1332 | 0 | obj->setFlags(0); |
1333 | 0 | obj->setFirstView(nullptr); |
1334 | 0 | obj->setDataPointer(BufferContents::createPlain(nullptr), DoesntOwnData); |
1335 | 0 |
|
1336 | 0 | return obj; |
1337 | 0 | } |
1338 | | |
1339 | | ArrayBufferObject* |
1340 | | ArrayBufferObject::createFromNewRawBuffer(JSContext* cx, WasmArrayRawBuffer* buffer, |
1341 | | uint32_t initialSize) |
1342 | 0 | { |
1343 | 0 | AutoSetNewObjectMetadata metadata(cx); |
1344 | 0 | ArrayBufferObject* obj = NewBuiltinClassInstance<ArrayBufferObject>(cx); |
1345 | 0 | if (!obj) { |
1346 | 0 | WasmArrayRawBuffer::Release(buffer->dataPointer()); |
1347 | 0 | return nullptr; |
1348 | 0 | } |
1349 | 0 | |
1350 | 0 | obj->setByteLength(initialSize); |
1351 | 0 | obj->setFlags(0); |
1352 | 0 | obj->setFirstView(nullptr); |
1353 | 0 |
|
1354 | 0 | auto contents = BufferContents::create<WASM>(buffer->dataPointer()); |
1355 | 0 | obj->setDataPointer(contents, OwnsData); |
1356 | 0 |
|
1357 | 0 | cx->updateMallocCounter(initialSize); |
1358 | 0 |
|
1359 | 0 | return obj; |
1360 | 0 | } |
1361 | | |
1362 | | /* static */ ArrayBufferObject::BufferContents |
1363 | | ArrayBufferObject::externalizeContents(JSContext* cx, Handle<ArrayBufferObject*> buffer, |
1364 | | bool hasStealableContents) |
1365 | 0 | { |
1366 | 0 | MOZ_ASSERT(buffer->isPlain(), "Only support doing this on plain ABOs"); |
1367 | 0 | MOZ_ASSERT(!buffer->isDetached(), "must have contents to externalize"); |
1368 | 0 | MOZ_ASSERT_IF(hasStealableContents, buffer->hasStealableContents()); |
1369 | 0 |
|
1370 | 0 | BufferContents contents = buffer->contents(); |
1371 | 0 |
|
1372 | 0 | if (hasStealableContents) { |
1373 | 0 | buffer->setOwnsData(DoesntOwnData); |
1374 | 0 | return contents; |
1375 | 0 | } |
1376 | 0 | |
1377 | 0 | // Create a new chunk of memory to return since we cannot steal the |
1378 | 0 | // existing contents away from the buffer. |
1379 | 0 | BufferContents newContents = AllocateArrayBufferContents(cx, buffer->byteLength()); |
1380 | 0 | if (!newContents) { |
1381 | 0 | return BufferContents::createPlain(nullptr); |
1382 | 0 | } |
1383 | 0 | memcpy(newContents.data(), contents.data(), buffer->byteLength()); |
1384 | 0 | buffer->changeContents(cx, newContents, DoesntOwnData); |
1385 | 0 |
|
1386 | 0 | return newContents; |
1387 | 0 | } |
1388 | | |
1389 | | /* static */ ArrayBufferObject::BufferContents |
1390 | | ArrayBufferObject::stealContents(JSContext* cx, Handle<ArrayBufferObject*> buffer, |
1391 | | bool hasStealableContents) |
1392 | 0 | { |
1393 | 0 | // While wasm buffers cannot generally be transferred by content, the |
1394 | 0 | // stealContents() is used internally by the impl of memory growth. |
1395 | 0 | MOZ_ASSERT_IF(hasStealableContents, buffer->hasStealableContents() || |
1396 | 0 | (buffer->isWasm() && !buffer->isPreparedForAsmJS())); |
1397 | 0 | cx->check(buffer); |
1398 | 0 |
|
1399 | 0 | BufferContents oldContents = buffer->contents(); |
1400 | 0 |
|
1401 | 0 | if (hasStealableContents) { |
1402 | 0 | // Return the old contents and reset the detached buffer's data |
1403 | 0 | // pointer. This pointer should never be accessed. |
1404 | 0 | auto newContents = BufferContents::createPlain(nullptr); |
1405 | 0 | buffer->setOwnsData(DoesntOwnData); // Do not free the stolen data. |
1406 | 0 | ArrayBufferObject::detach(cx, buffer, newContents); |
1407 | 0 | buffer->setOwnsData(DoesntOwnData); // Do not free the nullptr. |
1408 | 0 | return oldContents; |
1409 | 0 | } |
1410 | 0 | |
1411 | 0 | // Create a new chunk of memory to return since we cannot steal the |
1412 | 0 | // existing contents away from the buffer. |
1413 | 0 | BufferContents contentsCopy = AllocateArrayBufferContents(cx, buffer->byteLength()); |
1414 | 0 | if (!contentsCopy) { |
1415 | 0 | return BufferContents::createPlain(nullptr); |
1416 | 0 | } |
1417 | 0 | |
1418 | 0 | if (buffer->byteLength() > 0) { |
1419 | 0 | memcpy(contentsCopy.data(), oldContents.data(), buffer->byteLength()); |
1420 | 0 | } |
1421 | 0 | ArrayBufferObject::detach(cx, buffer, oldContents); |
1422 | 0 | return contentsCopy; |
1423 | 0 | } |
1424 | | |
1425 | | /* static */ void |
1426 | | ArrayBufferObject::addSizeOfExcludingThis(JSObject* obj, mozilla::MallocSizeOf mallocSizeOf, |
1427 | | JS::ClassInfo* info) |
1428 | 0 | { |
1429 | 0 | ArrayBufferObject& buffer = AsArrayBuffer(obj); |
1430 | 0 |
|
1431 | 0 | if (!buffer.ownsData()) { |
1432 | 0 | return; |
1433 | 0 | } |
1434 | 0 | |
1435 | 0 | switch (buffer.bufferKind()) { |
1436 | 0 | case PLAIN: |
1437 | 0 | if (buffer.isPreparedForAsmJS()) { |
1438 | 0 | info->objectsMallocHeapElementsAsmJS += mallocSizeOf(buffer.dataPointer()); |
1439 | 0 | } else { |
1440 | 0 | info->objectsMallocHeapElementsNormal += mallocSizeOf(buffer.dataPointer()); |
1441 | 0 | } |
1442 | 0 | break; |
1443 | 0 | case MAPPED: |
1444 | 0 | info->objectsNonHeapElementsNormal += buffer.byteLength(); |
1445 | 0 | break; |
1446 | 0 | case WASM: |
1447 | 0 | info->objectsNonHeapElementsWasm += buffer.byteLength(); |
1448 | 0 | MOZ_ASSERT(buffer.wasmMappedSize() >= buffer.byteLength()); |
1449 | 0 | info->wasmGuardPages += buffer.wasmMappedSize() - buffer.byteLength(); |
1450 | 0 | break; |
1451 | 0 | case KIND_MASK: |
1452 | 0 | MOZ_CRASH("bad bufferKind()"); |
1453 | 0 | } |
1454 | 0 | } |
1455 | | |
1456 | | /* static */ void |
1457 | | ArrayBufferObject::finalize(FreeOp* fop, JSObject* obj) |
1458 | 0 | { |
1459 | 0 | ArrayBufferObject& buffer = obj->as<ArrayBufferObject>(); |
1460 | 0 |
|
1461 | 0 | if (buffer.ownsData()) { |
1462 | 0 | buffer.releaseData(fop); |
1463 | 0 | } |
1464 | 0 | } |
1465 | | |
1466 | | /* static */ void |
1467 | | ArrayBufferObject::copyData(Handle<ArrayBufferObject*> toBuffer, uint32_t toIndex, |
1468 | | Handle<ArrayBufferObject*> fromBuffer, uint32_t fromIndex, |
1469 | | uint32_t count) |
1470 | 0 | { |
1471 | 0 | MOZ_ASSERT(toBuffer->byteLength() >= count); |
1472 | 0 | MOZ_ASSERT(toBuffer->byteLength() >= toIndex + count); |
1473 | 0 | MOZ_ASSERT(fromBuffer->byteLength() >= fromIndex); |
1474 | 0 | MOZ_ASSERT(fromBuffer->byteLength() >= fromIndex + count); |
1475 | 0 |
|
1476 | 0 | memcpy(toBuffer->dataPointer() + toIndex, fromBuffer->dataPointer() + fromIndex, count); |
1477 | 0 | } |
1478 | | |
1479 | | /* static */ void |
1480 | | ArrayBufferObject::trace(JSTracer* trc, JSObject* obj) |
1481 | 0 | { |
1482 | 0 | // If this buffer is associated with an inline typed object, |
1483 | 0 | // fix up the data pointer if the typed object was moved. |
1484 | 0 | ArrayBufferObject& buf = obj->as<ArrayBufferObject>(); |
1485 | 0 |
|
1486 | 0 | if (!buf.forInlineTypedObject()) { |
1487 | 0 | return; |
1488 | 0 | } |
1489 | 0 | |
1490 | 0 | JSObject* view = MaybeForwarded(buf.firstView()); |
1491 | 0 | MOZ_ASSERT(view && view->is<InlineTransparentTypedObject>()); |
1492 | 0 |
|
1493 | 0 | TraceManuallyBarrieredEdge(trc, &view, "array buffer inline typed object owner"); |
1494 | 0 | buf.setFixedSlot(DATA_SLOT, |
1495 | 0 | PrivateValue(view->as<InlineTransparentTypedObject>().inlineTypedMem())); |
1496 | 0 | } |
1497 | | |
1498 | | /* static */ size_t |
1499 | | ArrayBufferObject::objectMoved(JSObject* obj, JSObject* old) |
1500 | 0 | { |
1501 | 0 | ArrayBufferObject& dst = obj->as<ArrayBufferObject>(); |
1502 | 0 | const ArrayBufferObject& src = old->as<ArrayBufferObject>(); |
1503 | 0 |
|
1504 | 0 | // Fix up possible inline data pointer. |
1505 | 0 | if (src.hasInlineData()) { |
1506 | 0 | dst.setFixedSlot(DATA_SLOT, PrivateValue(dst.inlineDataPointer())); |
1507 | 0 | } |
1508 | 0 |
|
1509 | 0 | return 0; |
1510 | 0 | } |
1511 | | |
1512 | | ArrayBufferViewObject* |
1513 | | ArrayBufferObject::firstView() |
1514 | 0 | { |
1515 | 0 | return getFixedSlot(FIRST_VIEW_SLOT).isObject() |
1516 | 0 | ? static_cast<ArrayBufferViewObject*>(&getFixedSlot(FIRST_VIEW_SLOT).toObject()) |
1517 | 0 | : nullptr; |
1518 | 0 | } |
1519 | | |
1520 | | void |
1521 | | ArrayBufferObject::setFirstView(ArrayBufferViewObject* view) |
1522 | 0 | { |
1523 | 0 | setFixedSlot(FIRST_VIEW_SLOT, ObjectOrNullValue(view)); |
1524 | 0 | } |
1525 | | |
1526 | | bool |
1527 | | ArrayBufferObject::addView(JSContext* cx, JSObject* viewArg) |
1528 | 0 | { |
1529 | 0 | // Note: we don't pass in an ArrayBufferViewObject as the argument due to |
1530 | 0 | // tricky inheritance in the various view classes. View classes do not |
1531 | 0 | // inherit from ArrayBufferViewObject so won't be upcast automatically. |
1532 | 0 | MOZ_ASSERT(viewArg->is<ArrayBufferViewObject>() || viewArg->is<TypedObject>()); |
1533 | 0 | ArrayBufferViewObject* view = static_cast<ArrayBufferViewObject*>(viewArg); |
1534 | 0 |
|
1535 | 0 | if (!firstView()) { |
1536 | 0 | setFirstView(view); |
1537 | 0 | return true; |
1538 | 0 | } |
1539 | 0 | return ObjectRealm::get(this).innerViews.get().addView(cx, this, view); |
1540 | 0 | } |
1541 | | |
1542 | | /* |
1543 | | * InnerViewTable |
1544 | | */ |
1545 | | |
1546 | | constexpr size_t VIEW_LIST_MAX_LENGTH = 500; |
1547 | | |
1548 | | bool |
1549 | | InnerViewTable::addView(JSContext* cx, ArrayBufferObject* buffer, ArrayBufferViewObject* view) |
1550 | 0 | { |
1551 | 0 | // ArrayBufferObject entries are only added when there are multiple views. |
1552 | 0 | MOZ_ASSERT(buffer->firstView()); |
1553 | 0 |
|
1554 | 0 | Map::AddPtr p = map.lookupForAdd(buffer); |
1555 | 0 |
|
1556 | 0 | MOZ_ASSERT(!gc::IsInsideNursery(buffer)); |
1557 | 0 | bool addToNursery = nurseryKeysValid && gc::IsInsideNursery(view); |
1558 | 0 |
|
1559 | 0 | if (p) { |
1560 | 0 | ViewVector& views = p->value(); |
1561 | 0 | MOZ_ASSERT(!views.empty()); |
1562 | 0 |
|
1563 | 0 | if (addToNursery) { |
1564 | 0 | // Only add the entry to |nurseryKeys| if it isn't already there. |
1565 | 0 | if (views.length() >= VIEW_LIST_MAX_LENGTH) { |
1566 | 0 | // To avoid quadratic blowup, skip the loop below if we end up |
1567 | 0 | // adding enormous numbers of views for the same object. |
1568 | 0 | nurseryKeysValid = false; |
1569 | 0 | } else { |
1570 | 0 | for (size_t i = 0; i < views.length(); i++) { |
1571 | 0 | if (gc::IsInsideNursery(views[i])) { |
1572 | 0 | addToNursery = false; |
1573 | 0 | break; |
1574 | 0 | } |
1575 | 0 | } |
1576 | 0 | } |
1577 | 0 | } |
1578 | 0 |
|
1579 | 0 | if (!views.append(view)) { |
1580 | 0 | ReportOutOfMemory(cx); |
1581 | 0 | return false; |
1582 | 0 | } |
1583 | 0 | } else { |
1584 | 0 | if (!map.add(p, buffer, ViewVector())) { |
1585 | 0 | ReportOutOfMemory(cx); |
1586 | 0 | return false; |
1587 | 0 | } |
1588 | 0 | // ViewVector has one inline element, so the first insertion is |
1589 | 0 | // guaranteed to succeed. |
1590 | 0 | MOZ_ALWAYS_TRUE(p->value().append(view)); |
1591 | 0 | } |
1592 | 0 |
|
1593 | 0 | if (addToNursery && !nurseryKeys.append(buffer)) { |
1594 | 0 | nurseryKeysValid = false; |
1595 | 0 | } |
1596 | 0 |
|
1597 | 0 | return true; |
1598 | 0 | } |
1599 | | |
1600 | | InnerViewTable::ViewVector* |
1601 | | InnerViewTable::maybeViewsUnbarriered(ArrayBufferObject* buffer) |
1602 | 0 | { |
1603 | 0 | Map::Ptr p = map.lookup(buffer); |
1604 | 0 | if (p) { |
1605 | 0 | return &p->value(); |
1606 | 0 | } |
1607 | 0 | return nullptr; |
1608 | 0 | } |
1609 | | |
1610 | | void |
1611 | | InnerViewTable::removeViews(ArrayBufferObject* buffer) |
1612 | 0 | { |
1613 | 0 | Map::Ptr p = map.lookup(buffer); |
1614 | 0 | MOZ_ASSERT(p); |
1615 | 0 |
|
1616 | 0 | map.remove(p); |
1617 | 0 | } |
1618 | | |
1619 | | /* static */ bool |
1620 | | InnerViewTable::sweepEntry(JSObject** pkey, ViewVector& views) |
1621 | 0 | { |
1622 | 0 | if (IsAboutToBeFinalizedUnbarriered(pkey)) { |
1623 | 0 | return true; |
1624 | 0 | } |
1625 | 0 | |
1626 | 0 | MOZ_ASSERT(!views.empty()); |
1627 | 0 | size_t i = 0; |
1628 | 0 | while (i < views.length()) { |
1629 | 0 | if (IsAboutToBeFinalizedUnbarriered(&views[i])) { |
1630 | 0 | // If the current element is garbage then remove it from the |
1631 | 0 | // vector by moving the last one into its place. |
1632 | 0 | views[i] = views.back(); |
1633 | 0 | views.popBack(); |
1634 | 0 | } else { |
1635 | 0 | i++; |
1636 | 0 | } |
1637 | 0 | } |
1638 | 0 |
|
1639 | 0 | return views.empty(); |
1640 | 0 | } |
1641 | | |
1642 | | void |
1643 | | InnerViewTable::sweep() |
1644 | 0 | { |
1645 | 0 | MOZ_ASSERT(nurseryKeys.empty()); |
1646 | 0 | map.sweep(); |
1647 | 0 | } |
1648 | | |
1649 | | void |
1650 | | InnerViewTable::sweepAfterMinorGC() |
1651 | 0 | { |
1652 | 0 | MOZ_ASSERT(needsSweepAfterMinorGC()); |
1653 | 0 |
|
1654 | 0 | if (nurseryKeysValid) { |
1655 | 0 | for (size_t i = 0; i < nurseryKeys.length(); i++) { |
1656 | 0 | JSObject* buffer = MaybeForwarded(nurseryKeys[i]); |
1657 | 0 | Map::Ptr p = map.lookup(buffer); |
1658 | 0 | if (!p) { |
1659 | 0 | continue; |
1660 | 0 | } |
1661 | 0 | |
1662 | 0 | if (sweepEntry(&p->mutableKey(), p->value())) { |
1663 | 0 | map.remove(buffer); |
1664 | 0 | } |
1665 | 0 | } |
1666 | 0 | nurseryKeys.clear(); |
1667 | 0 | } else { |
1668 | 0 | // Do the required sweeping by looking at every map entry. |
1669 | 0 | nurseryKeys.clear(); |
1670 | 0 | sweep(); |
1671 | 0 |
|
1672 | 0 | nurseryKeysValid = true; |
1673 | 0 | } |
1674 | 0 | } |
1675 | | |
1676 | | size_t |
1677 | | InnerViewTable::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) |
1678 | 0 | { |
1679 | 0 | size_t vectorSize = 0; |
1680 | 0 | for (Map::Enum e(map); !e.empty(); e.popFront()) { |
1681 | 0 | vectorSize += e.front().value().sizeOfExcludingThis(mallocSizeOf); |
1682 | 0 | } |
1683 | 0 |
|
1684 | 0 | return vectorSize |
1685 | 0 | + map.shallowSizeOfExcludingThis(mallocSizeOf) |
1686 | 0 | + nurseryKeys.sizeOfExcludingThis(mallocSizeOf); |
1687 | 0 | } |
1688 | | |
1689 | | /* |
1690 | | * ArrayBufferViewObject |
1691 | | */ |
1692 | | |
1693 | | /* |
1694 | | * This method is used to trace TypedArrayObjects and DataViewObjects. We need |
1695 | | * a custom tracer to move the object's data pointer if its owner was moved and |
1696 | | * stores its data inline. |
1697 | | */ |
1698 | | /* static */ void |
1699 | | ArrayBufferViewObject::trace(JSTracer* trc, JSObject* objArg) |
1700 | 0 | { |
1701 | 0 | NativeObject* obj = &objArg->as<NativeObject>(); |
1702 | 0 | HeapSlot& bufSlot = obj->getFixedSlotRef(TypedArrayObject::BUFFER_SLOT); |
1703 | 0 | TraceEdge(trc, &bufSlot, "typedarray.buffer"); |
1704 | 0 |
|
1705 | 0 | // Update obj's data pointer if it moved. |
1706 | 0 | if (bufSlot.isObject()) { |
1707 | 0 | if (IsArrayBuffer(&bufSlot.toObject())) { |
1708 | 0 | ArrayBufferObject& buf = AsArrayBuffer(MaybeForwarded(&bufSlot.toObject())); |
1709 | 0 | uint32_t offset = uint32_t(obj->getFixedSlot(TypedArrayObject::BYTEOFFSET_SLOT).toInt32()); |
1710 | 0 | MOZ_ASSERT(offset <= INT32_MAX); |
1711 | 0 |
|
1712 | 0 | if (buf.forInlineTypedObject()) { |
1713 | 0 | MOZ_ASSERT(buf.dataPointer() != nullptr); |
1714 | 0 |
|
1715 | 0 | // The data is inline with an InlineTypedObject associated with the |
1716 | 0 | // buffer. Get a new address for the typed object if it moved. |
1717 | 0 | JSObject* view = buf.firstView(); |
1718 | 0 |
|
1719 | 0 | // Mark the object to move it into the tenured space. |
1720 | 0 | TraceManuallyBarrieredEdge(trc, &view, "typed array nursery owner"); |
1721 | 0 | MOZ_ASSERT(view->is<InlineTypedObject>()); |
1722 | 0 | MOZ_ASSERT(view != obj); |
1723 | 0 |
|
1724 | 0 | size_t nfixed = obj->numFixedSlotsMaybeForwarded(); |
1725 | 0 | void* srcData = obj->getPrivate(nfixed); |
1726 | 0 | void* dstData = view->as<InlineTypedObject>().inlineTypedMemForGC() + offset; |
1727 | 0 | obj->setPrivateUnbarriered(nfixed, dstData); |
1728 | 0 |
|
1729 | 0 | // We can't use a direct forwarding pointer here, as there might |
1730 | 0 | // not be enough bytes available, and other views might have data |
1731 | 0 | // pointers whose forwarding pointers would overlap this one. |
1732 | 0 | if (trc->isTenuringTracer()) { |
1733 | 0 | Nursery& nursery = trc->runtime()->gc.nursery(); |
1734 | 0 | nursery.maybeSetForwardingPointer(trc, srcData, dstData, /* direct = */ false); |
1735 | 0 | } |
1736 | 0 | } else { |
1737 | 0 | MOZ_ASSERT_IF(buf.dataPointer() == nullptr, offset == 0); |
1738 | 0 |
|
1739 | 0 | // The data may or may not be inline with the buffer. The buffer |
1740 | 0 | // can only move during a compacting GC, in which case its |
1741 | 0 | // objectMoved hook has already updated the buffer's data pointer. |
1742 | 0 | size_t nfixed = obj->numFixedSlotsMaybeForwarded(); |
1743 | 0 | obj->setPrivateUnbarriered(nfixed, buf.dataPointer() + offset); |
1744 | 0 | } |
1745 | 0 | } |
1746 | 0 | } |
1747 | 0 | } |
1748 | | |
1749 | | template <> |
1750 | | bool |
1751 | | JSObject::is<js::ArrayBufferViewObject>() const |
1752 | 0 | { |
1753 | 0 | return is<DataViewObject>() || is<TypedArrayObject>(); |
1754 | 0 | } |
1755 | | |
1756 | | template <> |
1757 | | bool |
1758 | | JSObject::is<js::ArrayBufferObjectMaybeShared>() const |
1759 | 0 | { |
1760 | 0 | return is<ArrayBufferObject>() || is<SharedArrayBufferObject>(); |
1761 | 0 | } |
1762 | | |
1763 | | void |
1764 | | ArrayBufferViewObject::notifyBufferDetached(JSContext* cx, void* newData) |
1765 | 0 | { |
1766 | 0 | if (is<DataViewObject>()) { |
1767 | 0 | if (as<DataViewObject>().isSharedMemory()) { |
1768 | 0 | return; |
1769 | 0 | } |
1770 | 0 | as<DataViewObject>().notifyBufferDetached(newData); |
1771 | 0 | } else if (is<TypedArrayObject>()) { |
1772 | 0 | if (as<TypedArrayObject>().isSharedMemory()) { |
1773 | 0 | return; |
1774 | 0 | } |
1775 | 0 | as<TypedArrayObject>().notifyBufferDetached(cx, newData); |
1776 | 0 | } else { |
1777 | 0 | as<OutlineTypedObject>().notifyBufferDetached(newData); |
1778 | 0 | } |
1779 | 0 | } |
1780 | | |
1781 | | uint8_t* |
1782 | | ArrayBufferViewObject::dataPointerUnshared(const JS::AutoRequireNoGC& nogc) |
1783 | 0 | { |
1784 | 0 | if (is<DataViewObject>()) { |
1785 | 0 | MOZ_ASSERT(!as<DataViewObject>().isSharedMemory()); |
1786 | 0 | return static_cast<uint8_t*>(as<DataViewObject>().dataPointerUnshared()); |
1787 | 0 | } |
1788 | 0 | if (is<TypedArrayObject>()) { |
1789 | 0 | MOZ_ASSERT(!as<TypedArrayObject>().isSharedMemory()); |
1790 | 0 | return static_cast<uint8_t*>(as<TypedArrayObject>().viewDataUnshared()); |
1791 | 0 | } |
1792 | 0 | return as<TypedObject>().typedMem(nogc); |
1793 | 0 | } |
1794 | | |
1795 | | #ifdef DEBUG |
1796 | | bool |
1797 | | ArrayBufferViewObject::isSharedMemory() |
1798 | | { |
1799 | | if (is<TypedArrayObject>()) { |
1800 | | return as<TypedArrayObject>().isSharedMemory(); |
1801 | | } |
1802 | | return false; |
1803 | | } |
1804 | | #endif |
1805 | | |
1806 | | void |
1807 | | ArrayBufferViewObject::setDataPointerUnshared(uint8_t* data) |
1808 | 0 | { |
1809 | 0 | if (is<DataViewObject>()) { |
1810 | 0 | MOZ_ASSERT(!as<DataViewObject>().isSharedMemory()); |
1811 | 0 | as<DataViewObject>().setPrivate(data); |
1812 | 0 | } else if (is<TypedArrayObject>()) { |
1813 | 0 | MOZ_ASSERT(!as<TypedArrayObject>().isSharedMemory()); |
1814 | 0 | as<TypedArrayObject>().setPrivate(data); |
1815 | 0 | } else if (is<OutlineTypedObject>()) { |
1816 | 0 | as<OutlineTypedObject>().setData(data); |
1817 | 0 | } else { |
1818 | 0 | MOZ_CRASH(); |
1819 | 0 | } |
1820 | 0 | } |
1821 | | |
1822 | | /* static */ ArrayBufferObjectMaybeShared* |
1823 | | ArrayBufferViewObject::bufferObject(JSContext* cx, Handle<ArrayBufferViewObject*> thisObject) |
1824 | 0 | { |
1825 | 0 | if (thisObject->is<TypedArrayObject>()) { |
1826 | 0 | Rooted<TypedArrayObject*> typedArray(cx, &thisObject->as<TypedArrayObject>()); |
1827 | 0 | if (!TypedArrayObject::ensureHasBuffer(cx, typedArray)) { |
1828 | 0 | return nullptr; |
1829 | 0 | } |
1830 | 0 | return thisObject->as<TypedArrayObject>().bufferEither(); |
1831 | 0 | } |
1832 | 0 | MOZ_ASSERT(thisObject->is<DataViewObject>()); |
1833 | 0 | return &thisObject->as<DataViewObject>().arrayBufferEither(); |
1834 | 0 | } |
1835 | | |
1836 | | /* JS Friend API */ |
1837 | | |
1838 | | JS_FRIEND_API(bool) |
1839 | | JS_IsArrayBufferViewObject(JSObject* obj) |
1840 | 0 | { |
1841 | 0 | obj = CheckedUnwrap(obj); |
1842 | 0 | return obj && obj->is<ArrayBufferViewObject>(); |
1843 | 0 | } |
1844 | | |
1845 | | JS_FRIEND_API(JSObject*) |
1846 | | js::UnwrapArrayBufferView(JSObject* obj) |
1847 | 0 | { |
1848 | 0 | if (JSObject* unwrapped = CheckedUnwrap(obj)) { |
1849 | 0 | return unwrapped->is<ArrayBufferViewObject>() ? unwrapped : nullptr; |
1850 | 0 | } |
1851 | 0 | return nullptr; |
1852 | 0 | } |
1853 | | |
1854 | | JS_FRIEND_API(uint32_t) |
1855 | | JS_GetArrayBufferByteLength(JSObject* obj) |
1856 | 0 | { |
1857 | 0 | obj = CheckedUnwrap(obj); |
1858 | 0 | return obj ? AsArrayBuffer(obj).byteLength() : 0; |
1859 | 0 | } |
1860 | | |
1861 | | JS_FRIEND_API(uint8_t*) |
1862 | | JS_GetArrayBufferData(JSObject* obj, bool* isSharedMemory, const JS::AutoRequireNoGC&) |
1863 | 0 | { |
1864 | 0 | obj = CheckedUnwrap(obj); |
1865 | 0 | if (!obj) { |
1866 | 0 | return nullptr; |
1867 | 0 | } |
1868 | 0 | if (!IsArrayBuffer(obj)) { |
1869 | 0 | return nullptr; |
1870 | 0 | } |
1871 | 0 | *isSharedMemory = false; |
1872 | 0 | return AsArrayBuffer(obj).dataPointer(); |
1873 | 0 | } |
1874 | | |
1875 | | JS_FRIEND_API(bool) |
1876 | | JS_DetachArrayBuffer(JSContext* cx, HandleObject obj) |
1877 | 0 | { |
1878 | 0 | AssertHeapIsIdle(); |
1879 | 0 | CHECK_THREAD(cx); |
1880 | 0 | cx->check(obj); |
1881 | 0 |
|
1882 | 0 | if (!obj->is<ArrayBufferObject>()) { |
1883 | 0 | JS_ReportErrorASCII(cx, "ArrayBuffer object required"); |
1884 | 0 | return false; |
1885 | 0 | } |
1886 | 0 | |
1887 | 0 | Rooted<ArrayBufferObject*> buffer(cx, &obj->as<ArrayBufferObject>()); |
1888 | 0 |
|
1889 | 0 | if (buffer->isWasm() || buffer->isPreparedForAsmJS()) { |
1890 | 0 | JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_NO_TRANSFER); |
1891 | 0 | return false; |
1892 | 0 | } |
1893 | 0 | |
1894 | 0 | ArrayBufferObject::BufferContents newContents = |
1895 | 0 | buffer->hasStealableContents() ? ArrayBufferObject::BufferContents::createPlain(nullptr) |
1896 | 0 | : buffer->contents(); |
1897 | 0 |
|
1898 | 0 | ArrayBufferObject::detach(cx, buffer, newContents); |
1899 | 0 |
|
1900 | 0 | return true; |
1901 | 0 | } |
1902 | | |
1903 | | JS_FRIEND_API(bool) |
1904 | | JS_IsDetachedArrayBufferObject(JSObject* obj) |
1905 | 0 | { |
1906 | 0 | obj = CheckedUnwrap(obj); |
1907 | 0 | if (!obj) { |
1908 | 0 | return false; |
1909 | 0 | } |
1910 | 0 | |
1911 | 0 | return obj->is<ArrayBufferObject>() && obj->as<ArrayBufferObject>().isDetached(); |
1912 | 0 | } |
1913 | | |
1914 | | JS_FRIEND_API(JSObject*) |
1915 | | JS_NewArrayBuffer(JSContext* cx, uint32_t nbytes) |
1916 | 0 | { |
1917 | 0 | AssertHeapIsIdle(); |
1918 | 0 | CHECK_THREAD(cx); |
1919 | 0 | MOZ_ASSERT(nbytes <= INT32_MAX); |
1920 | 0 | return ArrayBufferObject::create(cx, nbytes); |
1921 | 0 | } |
1922 | | |
1923 | | JS_PUBLIC_API(JSObject*) |
1924 | | JS_NewArrayBufferWithContents(JSContext* cx, size_t nbytes, void* data) |
1925 | 0 | { |
1926 | 0 | AssertHeapIsIdle(); |
1927 | 0 | CHECK_THREAD(cx); |
1928 | 0 | MOZ_ASSERT_IF(!data, nbytes == 0); |
1929 | 0 |
|
1930 | 0 | ArrayBufferObject::BufferContents contents = |
1931 | 0 | ArrayBufferObject::BufferContents::create<ArrayBufferObject::PLAIN>(data); |
1932 | 0 | return ArrayBufferObject::create(cx, nbytes, contents, ArrayBufferObject::OwnsData, |
1933 | 0 | /* proto = */ nullptr, TenuredObject); |
1934 | 0 | } |
1935 | | |
1936 | | JS_PUBLIC_API(JSObject*) |
1937 | | JS_NewExternalArrayBuffer(JSContext* cx, size_t nbytes, void* data, |
1938 | | JS::BufferContentsFreeFunc freeFunc, void* freeUserData) |
1939 | 0 | { |
1940 | 0 | AssertHeapIsIdle(); |
1941 | 0 | CHECK_THREAD(cx); |
1942 | 0 |
|
1943 | 0 | MOZ_ASSERT(data); |
1944 | 0 | MOZ_ASSERT(nbytes > 0); |
1945 | 0 |
|
1946 | 0 | ArrayBufferObject::BufferContents contents = |
1947 | 0 | ArrayBufferObject::BufferContents::createExternal(data, freeFunc, freeUserData); |
1948 | 0 | return ArrayBufferObject::create(cx, nbytes, contents, ArrayBufferObject::OwnsData, |
1949 | 0 | /* proto = */ nullptr, TenuredObject); |
1950 | 0 | } |
1951 | | |
1952 | | JS_PUBLIC_API(JSObject*) |
1953 | | JS_NewArrayBufferWithExternalContents(JSContext* cx, size_t nbytes, void* data) |
1954 | 0 | { |
1955 | 0 | AssertHeapIsIdle(); |
1956 | 0 | CHECK_THREAD(cx); |
1957 | 0 | MOZ_ASSERT_IF(!data, nbytes == 0); |
1958 | 0 | ArrayBufferObject::BufferContents contents = |
1959 | 0 | ArrayBufferObject::BufferContents::create<ArrayBufferObject::PLAIN>(data); |
1960 | 0 | return ArrayBufferObject::create(cx, nbytes, contents, ArrayBufferObject::DoesntOwnData, |
1961 | 0 | /* proto = */ nullptr, TenuredObject); |
1962 | 0 | } |
1963 | | |
1964 | | JS_FRIEND_API(bool) |
1965 | | JS_IsArrayBufferObject(JSObject* obj) |
1966 | 0 | { |
1967 | 0 | obj = CheckedUnwrap(obj); |
1968 | 0 | return obj && obj->is<ArrayBufferObject>(); |
1969 | 0 | } |
1970 | | |
1971 | | JS_FRIEND_API(bool) |
1972 | | JS_ArrayBufferHasData(JSObject* obj) |
1973 | 0 | { |
1974 | 0 | return CheckedUnwrap(obj)->as<ArrayBufferObject>().hasData(); |
1975 | 0 | } |
1976 | | |
1977 | | JS_FRIEND_API(JSObject*) |
1978 | | js::UnwrapArrayBuffer(JSObject* obj) |
1979 | 0 | { |
1980 | 0 | if (JSObject* unwrapped = CheckedUnwrap(obj)) { |
1981 | 0 | return unwrapped->is<ArrayBufferObject>() ? unwrapped : nullptr; |
1982 | 0 | } |
1983 | 0 | return nullptr; |
1984 | 0 | } |
1985 | | |
1986 | | JS_FRIEND_API(JSObject*) |
1987 | | js::UnwrapSharedArrayBuffer(JSObject* obj) |
1988 | 0 | { |
1989 | 0 | if (JSObject* unwrapped = CheckedUnwrap(obj)) { |
1990 | 0 | return unwrapped->is<SharedArrayBufferObject>() ? unwrapped : nullptr; |
1991 | 0 | } |
1992 | 0 | return nullptr; |
1993 | 0 | } |
1994 | | |
1995 | | JS_PUBLIC_API(void*) |
1996 | | JS_ExternalizeArrayBufferContents(JSContext* cx, HandleObject obj) |
1997 | 0 | { |
1998 | 0 | AssertHeapIsIdle(); |
1999 | 0 | CHECK_THREAD(cx); |
2000 | 0 | cx->check(obj); |
2001 | 0 |
|
2002 | 0 | if (!obj->is<ArrayBufferObject>()) { |
2003 | 0 | JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_BAD_ARGS); |
2004 | 0 | return nullptr; |
2005 | 0 | } |
2006 | 0 | |
2007 | 0 | Handle<ArrayBufferObject*> buffer = obj.as<ArrayBufferObject>(); |
2008 | 0 | if (!buffer->isPlain()) { |
2009 | 0 | // This operation isn't supported on mapped or wsm ArrayBufferObjects. |
2010 | 0 | JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_BAD_ARGS); |
2011 | 0 | return nullptr; |
2012 | 0 | } |
2013 | 0 | if (buffer->isDetached()) { |
2014 | 0 | JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_DETACHED); |
2015 | 0 | return nullptr; |
2016 | 0 | } |
2017 | 0 | |
2018 | 0 | // The caller assumes that a plain malloc'd buffer is returned. |
2019 | 0 | // hasStealableContents is true for mapped buffers, so we must additionally |
2020 | 0 | // require that the buffer is plain. In the future, we could consider |
2021 | 0 | // returning something that handles releasing the memory. |
2022 | 0 | bool hasStealableContents = buffer->hasStealableContents(); |
2023 | 0 |
|
2024 | 0 | return ArrayBufferObject::externalizeContents(cx, buffer, hasStealableContents).data(); |
2025 | 0 | } |
2026 | | |
2027 | | JS_PUBLIC_API(void*) |
2028 | | JS_StealArrayBufferContents(JSContext* cx, HandleObject objArg) |
2029 | 0 | { |
2030 | 0 | AssertHeapIsIdle(); |
2031 | 0 | CHECK_THREAD(cx); |
2032 | 0 | cx->check(objArg); |
2033 | 0 |
|
2034 | 0 | JSObject* obj = CheckedUnwrap(objArg); |
2035 | 0 | if (!obj) { |
2036 | 0 | return nullptr; |
2037 | 0 | } |
2038 | 0 | |
2039 | 0 | if (!obj->is<ArrayBufferObject>()) { |
2040 | 0 | JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_BAD_ARGS); |
2041 | 0 | return nullptr; |
2042 | 0 | } |
2043 | 0 | |
2044 | 0 | Rooted<ArrayBufferObject*> buffer(cx, &obj->as<ArrayBufferObject>()); |
2045 | 0 | if (buffer->isDetached()) { |
2046 | 0 | JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_DETACHED); |
2047 | 0 | return nullptr; |
2048 | 0 | } |
2049 | 0 | |
2050 | 0 | if (buffer->isWasm() || buffer->isPreparedForAsmJS()) { |
2051 | 0 | JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_NO_TRANSFER); |
2052 | 0 | return nullptr; |
2053 | 0 | } |
2054 | 0 | |
2055 | 0 | // The caller assumes that a plain malloc'd buffer is returned. |
2056 | 0 | // hasStealableContents is true for mapped buffers, so we must additionally |
2057 | 0 | // require that the buffer is plain. In the future, we could consider |
2058 | 0 | // returning something that handles releasing the memory. |
2059 | 0 | bool hasStealableContents = buffer->hasStealableContents() && buffer->isPlain(); |
2060 | 0 |
|
2061 | 0 | AutoRealm ar(cx, buffer); |
2062 | 0 | return ArrayBufferObject::stealContents(cx, buffer, hasStealableContents).data(); |
2063 | 0 | } |
2064 | | |
2065 | | JS_PUBLIC_API(JSObject*) |
2066 | | JS_NewMappedArrayBufferWithContents(JSContext* cx, size_t nbytes, void* data) |
2067 | 0 | { |
2068 | 0 | AssertHeapIsIdle(); |
2069 | 0 | CHECK_THREAD(cx); |
2070 | 0 |
|
2071 | 0 | MOZ_ASSERT(data); |
2072 | 0 | ArrayBufferObject::BufferContents contents = |
2073 | 0 | ArrayBufferObject::BufferContents::create<ArrayBufferObject::MAPPED>(data); |
2074 | 0 | return ArrayBufferObject::create(cx, nbytes, contents, ArrayBufferObject::OwnsData, |
2075 | 0 | /* proto = */ nullptr, TenuredObject); |
2076 | 0 | } |
2077 | | |
2078 | | JS_PUBLIC_API(void*) |
2079 | | JS_CreateMappedArrayBufferContents(int fd, size_t offset, size_t length) |
2080 | 0 | { |
2081 | 0 | return ArrayBufferObject::createMappedContents(fd, offset, length).data(); |
2082 | 0 | } |
2083 | | |
2084 | | JS_PUBLIC_API(void) |
2085 | | JS_ReleaseMappedArrayBufferContents(void* contents, size_t length) |
2086 | 0 | { |
2087 | 0 | gc::DeallocateMappedContent(contents, length); |
2088 | 0 | } |
2089 | | |
2090 | | JS_FRIEND_API(bool) |
2091 | | JS_IsMappedArrayBufferObject(JSObject* obj) |
2092 | 0 | { |
2093 | 0 | obj = CheckedUnwrap(obj); |
2094 | 0 | if (!obj) { |
2095 | 0 | return false; |
2096 | 0 | } |
2097 | 0 | |
2098 | 0 | return obj->is<ArrayBufferObject>() && obj->as<ArrayBufferObject>().isMapped(); |
2099 | 0 | } |
2100 | | |
2101 | | JS_FRIEND_API(void*) |
2102 | | JS_GetArrayBufferViewData(JSObject* obj, bool* isSharedMemory, const JS::AutoRequireNoGC&) |
2103 | 0 | { |
2104 | 0 | obj = CheckedUnwrap(obj); |
2105 | 0 | if (!obj) { |
2106 | 0 | return nullptr; |
2107 | 0 | } |
2108 | 0 | if (obj->is<DataViewObject>()) { |
2109 | 0 | DataViewObject& dv = obj->as<DataViewObject>(); |
2110 | 0 | *isSharedMemory = dv.isSharedMemory(); |
2111 | 0 | return dv.dataPointerEither().unwrap(/*safe - caller sees isSharedMemory flag*/); |
2112 | 0 | } |
2113 | 0 | TypedArrayObject& ta = obj->as<TypedArrayObject>(); |
2114 | 0 | *isSharedMemory = ta.isSharedMemory(); |
2115 | 0 | return ta.viewDataEither().unwrap(/*safe - caller sees isSharedMemory flag*/); |
2116 | 0 | } |
2117 | | |
2118 | | JS_FRIEND_API(JSObject*) |
2119 | | JS_GetArrayBufferViewBuffer(JSContext* cx, HandleObject objArg, bool* isSharedMemory) |
2120 | 0 | { |
2121 | 0 | AssertHeapIsIdle(); |
2122 | 0 | CHECK_THREAD(cx); |
2123 | 0 | cx->check(objArg); |
2124 | 0 |
|
2125 | 0 | JSObject* obj = CheckedUnwrap(objArg); |
2126 | 0 | if (!obj) { |
2127 | 0 | return nullptr; |
2128 | 0 | } |
2129 | 0 | MOZ_ASSERT(obj->is<ArrayBufferViewObject>()); |
2130 | 0 |
|
2131 | 0 | Rooted<ArrayBufferViewObject*> viewObject(cx, static_cast<ArrayBufferViewObject*>(obj)); |
2132 | 0 | ArrayBufferObjectMaybeShared* buffer = ArrayBufferViewObject::bufferObject(cx, viewObject); |
2133 | 0 | *isSharedMemory = buffer->is<SharedArrayBufferObject>(); |
2134 | 0 | return buffer; |
2135 | 0 | } |
2136 | | |
2137 | | JS_FRIEND_API(uint32_t) |
2138 | | JS_GetArrayBufferViewByteLength(JSObject* obj) |
2139 | 0 | { |
2140 | 0 | obj = CheckedUnwrap(obj); |
2141 | 0 | if (!obj) { |
2142 | 0 | return 0; |
2143 | 0 | } |
2144 | 0 | return obj->is<DataViewObject>() |
2145 | 0 | ? obj->as<DataViewObject>().byteLength() |
2146 | 0 | : obj->as<TypedArrayObject>().byteLength(); |
2147 | 0 | } |
2148 | | |
2149 | | JS_FRIEND_API(uint32_t) |
2150 | | JS_GetArrayBufferViewByteOffset(JSObject* obj) |
2151 | 0 | { |
2152 | 0 | obj = CheckedUnwrap(obj); |
2153 | 0 | if (!obj) { |
2154 | 0 | return 0; |
2155 | 0 | } |
2156 | 0 | return obj->is<DataViewObject>() |
2157 | 0 | ? obj->as<DataViewObject>().byteOffset() |
2158 | 0 | : obj->as<TypedArrayObject>().byteOffset(); |
2159 | 0 | } |
2160 | | |
2161 | | JS_FRIEND_API(JSObject*) |
2162 | | JS_GetObjectAsArrayBufferView(JSObject* obj, uint32_t* length, bool* isSharedMemory, uint8_t** data) |
2163 | 0 | { |
2164 | 0 | if (!(obj = CheckedUnwrap(obj))) { |
2165 | 0 | return nullptr; |
2166 | 0 | } |
2167 | 0 | if (!(obj->is<ArrayBufferViewObject>())) { |
2168 | 0 | return nullptr; |
2169 | 0 | } |
2170 | 0 | |
2171 | 0 | js::GetArrayBufferViewLengthAndData(obj, length, isSharedMemory, data); |
2172 | 0 | return obj; |
2173 | 0 | } |
2174 | | |
2175 | | JS_FRIEND_API(void) |
2176 | | js::GetArrayBufferViewLengthAndData(JSObject* obj, uint32_t* length, bool* isSharedMemory, |
2177 | | uint8_t** data) |
2178 | 0 | { |
2179 | 0 | MOZ_ASSERT(obj->is<ArrayBufferViewObject>()); |
2180 | 0 |
|
2181 | 0 | *length = obj->is<DataViewObject>() |
2182 | 0 | ? obj->as<DataViewObject>().byteLength() |
2183 | 0 | : obj->as<TypedArrayObject>().byteLength(); |
2184 | 0 |
|
2185 | 0 | if (obj->is<DataViewObject>()) { |
2186 | 0 | DataViewObject& dv = obj->as<DataViewObject>(); |
2187 | 0 | *isSharedMemory = dv.isSharedMemory(); |
2188 | 0 | *data = static_cast<uint8_t*>( |
2189 | 0 | dv.dataPointerEither().unwrap(/*safe - caller sees isShared flag*/)); |
2190 | 0 | } |
2191 | 0 | else { |
2192 | 0 | TypedArrayObject& ta = obj->as<TypedArrayObject>(); |
2193 | 0 | *isSharedMemory = ta.isSharedMemory(); |
2194 | 0 | *data = static_cast<uint8_t*>( |
2195 | 0 | ta.viewDataEither().unwrap(/*safe - caller sees isShared flag*/)); |
2196 | 0 | } |
2197 | 0 | } |
2198 | | |
2199 | | JS_FRIEND_API(JSObject*) |
2200 | | JS_GetObjectAsArrayBuffer(JSObject* obj, uint32_t* length, uint8_t** data) |
2201 | 0 | { |
2202 | 0 | if (!(obj = CheckedUnwrap(obj))) { |
2203 | 0 | return nullptr; |
2204 | 0 | } |
2205 | 0 | if (!IsArrayBuffer(obj)) { |
2206 | 0 | return nullptr; |
2207 | 0 | } |
2208 | 0 | |
2209 | 0 | *length = AsArrayBuffer(obj).byteLength(); |
2210 | 0 | *data = AsArrayBuffer(obj).dataPointer(); |
2211 | 0 |
|
2212 | 0 | return obj; |
2213 | 0 | } |
2214 | | |
2215 | | JS_FRIEND_API(void) |
2216 | | js::GetArrayBufferLengthAndData(JSObject* obj, uint32_t* length, bool* isSharedMemory, uint8_t** data) |
2217 | 0 | { |
2218 | 0 | MOZ_ASSERT(IsArrayBuffer(obj)); |
2219 | 0 | *length = AsArrayBuffer(obj).byteLength(); |
2220 | 0 | *data = AsArrayBuffer(obj).dataPointer(); |
2221 | 0 | *isSharedMemory = false; |
2222 | 0 | } |