Line data Source code
1 : // Copyright 2015 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/runtime/runtime-utils.h"
6 :
7 : #include "src/arguments.h"
8 : #include "src/base/macros.h"
9 : #include "src/base/platform/mutex.h"
10 : #include "src/conversions-inl.h"
11 : #include "src/factory.h"
12 :
13 : // Implement Atomic accesses to SharedArrayBuffers as defined in the
14 : // SharedArrayBuffer draft spec, found here
15 : // https://github.com/tc39/ecmascript_sharedmem
16 :
17 : namespace v8 {
18 : namespace internal {
19 :
20 : namespace {
21 :
22 : #if V8_CC_GNU
23 :
24 : template <typename T>
25 : inline T ExchangeSeqCst(T* p, T value) {
26 0 : return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
27 : }
28 :
29 : template <typename T>
30 : inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
31 0 : (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
32 0 : __ATOMIC_SEQ_CST);
33 0 : return oldval;
34 : }
35 :
36 : template <typename T>
37 : inline T AddSeqCst(T* p, T value) {
38 0 : return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
39 : }
40 :
41 : template <typename T>
42 : inline T SubSeqCst(T* p, T value) {
43 0 : return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
44 : }
45 :
46 : template <typename T>
47 : inline T AndSeqCst(T* p, T value) {
48 0 : return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
49 : }
50 :
51 : template <typename T>
52 : inline T OrSeqCst(T* p, T value) {
53 0 : return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
54 : }
55 :
56 : template <typename T>
57 : inline T XorSeqCst(T* p, T value) {
58 0 : return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
59 : }
60 :
61 : #elif V8_CC_MSVC
62 :
63 : #define InterlockedExchange32 _InterlockedExchange
64 : #define InterlockedCompareExchange32 _InterlockedCompareExchange
65 : #define InterlockedCompareExchange8 _InterlockedCompareExchange8
66 : #define InterlockedExchangeAdd32 _InterlockedExchangeAdd
67 : #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
68 : #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
69 : #define InterlockedAnd32 _InterlockedAnd
70 : #define InterlockedOr32 _InterlockedOr
71 : #define InterlockedXor32 _InterlockedXor
72 :
73 : #define ATOMIC_OPS(type, suffix, vctype) \
74 : inline type ExchangeSeqCst(type* p, type value) { \
75 : return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
76 : bit_cast<vctype>(value)); \
77 : } \
78 : inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
79 : return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
80 : bit_cast<vctype>(newval), \
81 : bit_cast<vctype>(oldval)); \
82 : } \
83 : inline type AddSeqCst(type* p, type value) { \
84 : return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
85 : bit_cast<vctype>(value)); \
86 : } \
87 : inline type SubSeqCst(type* p, type value) { \
88 : return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
89 : -bit_cast<vctype>(value)); \
90 : } \
91 : inline type AndSeqCst(type* p, type value) { \
92 : return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
93 : bit_cast<vctype>(value)); \
94 : } \
95 : inline type OrSeqCst(type* p, type value) { \
96 : return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
97 : bit_cast<vctype>(value)); \
98 : } \
99 : inline type XorSeqCst(type* p, type value) { \
100 : return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
101 : bit_cast<vctype>(value)); \
102 : }
103 :
104 : ATOMIC_OPS(int8_t, 8, char)
105 : ATOMIC_OPS(uint8_t, 8, char)
106 : ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
107 : ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
108 : ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
109 : ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
110 :
111 : #undef ATOMIC_OPS_INTEGER
112 : #undef ATOMIC_OPS
113 :
114 : #undef InterlockedExchange32
115 : #undef InterlockedCompareExchange32
116 : #undef InterlockedCompareExchange8
117 : #undef InterlockedExchangeAdd32
118 : #undef InterlockedExchangeAdd16
119 : #undef InterlockedExchangeAdd8
120 : #undef InterlockedAnd32
121 : #undef InterlockedOr32
122 : #undef InterlockedXor32
123 :
124 : #else
125 :
126 : #error Unsupported platform!
127 :
128 : #endif
129 :
130 : template <typename T>
131 : T FromObject(Handle<Object> number);
132 :
133 : template <>
134 : inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
135 0 : return NumberToUint32(*number);
136 : }
137 :
138 : template <>
139 : inline int8_t FromObject<int8_t>(Handle<Object> number) {
140 0 : return NumberToInt32(*number);
141 : }
142 :
143 : template <>
144 : inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
145 0 : return NumberToUint32(*number);
146 : }
147 :
148 : template <>
149 : inline int16_t FromObject<int16_t>(Handle<Object> number) {
150 0 : return NumberToInt32(*number);
151 : }
152 :
153 : template <>
154 : inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
155 0 : return NumberToUint32(*number);
156 : }
157 :
158 : template <>
159 : inline int32_t FromObject<int32_t>(Handle<Object> number) {
160 0 : return NumberToInt32(*number);
161 : }
162 :
163 :
164 : inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
165 :
166 : inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
167 :
168 : inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
169 :
170 : inline Object* ToObject(Isolate* isolate, uint16_t t) {
171 : return Smi::FromInt(t);
172 : }
173 :
174 : inline Object* ToObject(Isolate* isolate, int32_t t) {
175 0 : return *isolate->factory()->NewNumber(t);
176 : }
177 :
178 : inline Object* ToObject(Isolate* isolate, uint32_t t) {
179 0 : return *isolate->factory()->NewNumber(t);
180 : }
181 :
182 : template <typename T>
183 0 : inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index,
184 : Handle<Object> obj) {
185 : T value = FromObject<T>(obj);
186 0 : T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
187 0 : return ToObject(isolate, result);
188 : }
189 :
190 : template <typename T>
191 0 : inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
192 : Handle<Object> oldobj, Handle<Object> newobj) {
193 : T oldval = FromObject<T>(oldobj);
194 : T newval = FromObject<T>(newobj);
195 : T result =
196 0 : CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
197 0 : return ToObject(isolate, result);
198 : }
199 :
200 : template <typename T>
201 0 : inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
202 : Handle<Object> obj) {
203 : T value = FromObject<T>(obj);
204 0 : T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
205 0 : return ToObject(isolate, result);
206 : }
207 :
208 : template <typename T>
209 0 : inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
210 : Handle<Object> obj) {
211 : T value = FromObject<T>(obj);
212 0 : T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
213 0 : return ToObject(isolate, result);
214 : }
215 :
216 : template <typename T>
217 0 : inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
218 : Handle<Object> obj) {
219 : T value = FromObject<T>(obj);
220 0 : T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
221 0 : return ToObject(isolate, result);
222 : }
223 :
224 : template <typename T>
225 0 : inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
226 : Handle<Object> obj) {
227 : T value = FromObject<T>(obj);
228 0 : T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
229 0 : return ToObject(isolate, result);
230 : }
231 :
232 : template <typename T>
233 0 : inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
234 : Handle<Object> obj) {
235 : T value = FromObject<T>(obj);
236 0 : T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
237 0 : return ToObject(isolate, result);
238 : }
239 :
240 : } // anonymous namespace
241 :
242 : // Duplicated from objects.h
243 : // V has parameters (Type, type, TYPE, C type, element_size)
244 : #define INTEGER_TYPED_ARRAYS(V) \
245 : V(Uint8, uint8, UINT8, uint8_t, 1) \
246 : V(Int8, int8, INT8, int8_t, 1) \
247 : V(Uint16, uint16, UINT16, uint16_t, 2) \
248 : V(Int16, int16, INT16, int16_t, 2) \
249 : V(Uint32, uint32, UINT32, uint32_t, 4) \
250 : V(Int32, int32, INT32, int32_t, 4)
251 :
252 1620 : RUNTIME_FUNCTION(Runtime_ThrowNotIntegerSharedTypedArrayError) {
253 810 : HandleScope scope(isolate);
254 : DCHECK_EQ(1, args.length());
255 810 : CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
256 1620 : THROW_NEW_ERROR_RETURN_FAILURE(
257 : isolate,
258 810 : NewTypeError(MessageTemplate::kNotIntegerSharedTypedArray, value));
259 : }
260 :
261 0 : RUNTIME_FUNCTION(Runtime_ThrowNotInt32SharedTypedArrayError) {
262 0 : HandleScope scope(isolate);
263 : DCHECK_EQ(1, args.length());
264 0 : CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
265 0 : THROW_NEW_ERROR_RETURN_FAILURE(
266 0 : isolate, NewTypeError(MessageTemplate::kNotInt32SharedTypedArray, value));
267 : }
268 :
269 6932 : RUNTIME_FUNCTION(Runtime_ThrowInvalidAtomicAccessIndexError) {
270 3466 : HandleScope scope(isolate);
271 : DCHECK_EQ(0, args.length());
272 6932 : THROW_NEW_ERROR_RETURN_FAILURE(
273 3466 : isolate, NewRangeError(MessageTemplate::kInvalidAtomicAccessIndex));
274 : }
275 :
276 0 : RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
277 0 : HandleScope scope(isolate);
278 : DCHECK_EQ(3, args.length());
279 0 : CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
280 0 : CONVERT_SIZE_ARG_CHECKED(index, 1);
281 0 : CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
282 0 : CHECK(sta->GetBuffer()->is_shared());
283 0 : CHECK_LT(index, NumberToSize(sta->length()));
284 :
285 0 : uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
286 0 : NumberToSize(sta->byte_offset());
287 :
288 0 : switch (sta->type()) {
289 : #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
290 : case kExternal##Type##Array: \
291 : return DoExchange<ctype>(isolate, source, index, value);
292 :
293 0 : INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
294 : #undef TYPED_ARRAY_CASE
295 :
296 : default:
297 : break;
298 : }
299 :
300 0 : UNREACHABLE();
301 : }
302 :
303 0 : RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
304 0 : HandleScope scope(isolate);
305 : DCHECK_EQ(4, args.length());
306 0 : CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
307 0 : CONVERT_SIZE_ARG_CHECKED(index, 1);
308 0 : CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2);
309 0 : CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3);
310 0 : CHECK(sta->GetBuffer()->is_shared());
311 0 : CHECK_LT(index, NumberToSize(sta->length()));
312 :
313 0 : uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
314 0 : NumberToSize(sta->byte_offset());
315 :
316 0 : switch (sta->type()) {
317 : #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
318 : case kExternal##Type##Array: \
319 : return DoCompareExchange<ctype>(isolate, source, index, oldobj, newobj);
320 :
321 0 : INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
322 : #undef TYPED_ARRAY_CASE
323 :
324 : default:
325 : break;
326 : }
327 :
328 0 : UNREACHABLE();
329 : }
330 :
331 : // ES #sec-atomics.add
332 : // Atomics.add( typedArray, index, value )
333 0 : RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
334 0 : HandleScope scope(isolate);
335 : DCHECK_EQ(3, args.length());
336 0 : CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
337 0 : CONVERT_SIZE_ARG_CHECKED(index, 1);
338 0 : CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
339 0 : CHECK(sta->GetBuffer()->is_shared());
340 0 : CHECK_LT(index, NumberToSize(sta->length()));
341 :
342 0 : uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
343 0 : NumberToSize(sta->byte_offset());
344 :
345 0 : switch (sta->type()) {
346 : #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
347 : case kExternal##Type##Array: \
348 : return DoAdd<ctype>(isolate, source, index, value);
349 :
350 0 : INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
351 : #undef TYPED_ARRAY_CASE
352 :
353 : default:
354 : break;
355 : }
356 :
357 0 : UNREACHABLE();
358 : }
359 :
360 : // ES #sec-atomics.sub
361 : // Atomics.sub( typedArray, index, value )
362 0 : RUNTIME_FUNCTION(Runtime_AtomicsSub) {
363 0 : HandleScope scope(isolate);
364 : DCHECK_EQ(3, args.length());
365 0 : CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
366 0 : CONVERT_SIZE_ARG_CHECKED(index, 1);
367 0 : CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
368 0 : CHECK(sta->GetBuffer()->is_shared());
369 0 : CHECK_LT(index, NumberToSize(sta->length()));
370 :
371 0 : uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
372 0 : NumberToSize(sta->byte_offset());
373 :
374 0 : switch (sta->type()) {
375 : #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
376 : case kExternal##Type##Array: \
377 : return DoSub<ctype>(isolate, source, index, value);
378 :
379 0 : INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
380 : #undef TYPED_ARRAY_CASE
381 :
382 : default:
383 : break;
384 : }
385 :
386 0 : UNREACHABLE();
387 : }
388 :
389 : // ES #sec-atomics.and
390 : // Atomics.and( typedArray, index, value )
391 0 : RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
392 0 : HandleScope scope(isolate);
393 : DCHECK_EQ(3, args.length());
394 0 : CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
395 0 : CONVERT_SIZE_ARG_CHECKED(index, 1);
396 0 : CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
397 0 : CHECK(sta->GetBuffer()->is_shared());
398 0 : CHECK_LT(index, NumberToSize(sta->length()));
399 :
400 0 : uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
401 0 : NumberToSize(sta->byte_offset());
402 :
403 0 : switch (sta->type()) {
404 : #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
405 : case kExternal##Type##Array: \
406 : return DoAnd<ctype>(isolate, source, index, value);
407 :
408 0 : INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
409 : #undef TYPED_ARRAY_CASE
410 :
411 : default:
412 : break;
413 : }
414 :
415 0 : UNREACHABLE();
416 : }
417 :
418 : // ES #sec-atomics.or
419 : // Atomics.or( typedArray, index, value )
420 0 : RUNTIME_FUNCTION(Runtime_AtomicsOr) {
421 0 : HandleScope scope(isolate);
422 : DCHECK_EQ(3, args.length());
423 0 : CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
424 0 : CONVERT_SIZE_ARG_CHECKED(index, 1);
425 0 : CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
426 0 : CHECK(sta->GetBuffer()->is_shared());
427 0 : CHECK_LT(index, NumberToSize(sta->length()));
428 :
429 0 : uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
430 0 : NumberToSize(sta->byte_offset());
431 :
432 0 : switch (sta->type()) {
433 : #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
434 : case kExternal##Type##Array: \
435 : return DoOr<ctype>(isolate, source, index, value);
436 :
437 0 : INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
438 : #undef TYPED_ARRAY_CASE
439 :
440 : default:
441 : break;
442 : }
443 :
444 0 : UNREACHABLE();
445 : }
446 :
447 : // ES #sec-atomics.xor
448 : // Atomics.xor( typedArray, index, value )
449 0 : RUNTIME_FUNCTION(Runtime_AtomicsXor) {
450 0 : HandleScope scope(isolate);
451 : DCHECK_EQ(3, args.length());
452 0 : CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
453 0 : CONVERT_SIZE_ARG_CHECKED(index, 1);
454 0 : CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
455 0 : CHECK(sta->GetBuffer()->is_shared());
456 0 : CHECK_LT(index, NumberToSize(sta->length()));
457 :
458 0 : uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
459 0 : NumberToSize(sta->byte_offset());
460 :
461 0 : switch (sta->type()) {
462 : #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
463 : case kExternal##Type##Array: \
464 : return DoXor<ctype>(isolate, source, index, value);
465 :
466 0 : INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
467 : #undef TYPED_ARRAY_CASE
468 :
469 : default:
470 : break;
471 : }
472 :
473 0 : UNREACHABLE();
474 : }
475 :
476 : } // namespace internal
477 : } // namespace v8
|