Line data Source code
1 : // Copyright 2017 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/builtins/builtins-utils-gen.h"
6 : #include "src/builtins/builtins.h"
7 : #include "src/code-stub-assembler.h"
8 : #include "src/objects.h"
9 :
10 : namespace v8 {
11 : namespace internal {
12 :
13 : using compiler::Node;
14 : template <typename T>
15 : using TNode = compiler::TNode<T>;
16 :
17 504 : class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
18 : public:
19 : explicit SharedArrayBufferBuiltinsAssembler(
20 : compiler::CodeAssemblerState* state)
21 504 : : CodeStubAssembler(state) {}
22 :
23 : protected:
24 : typedef Node* (CodeAssembler::*AssemblerFunction)(MachineType type,
25 : Node* base, Node* offset,
26 : Node* value,
27 : Node* value_high);
28 : void ValidateSharedTypedArray(Node* tagged, Node* context,
29 : Node** out_instance_type,
30 : Node** out_backing_store);
31 : Node* ConvertTaggedAtomicIndexToWord32(Node* tagged, Node* context,
32 : Node** number_index);
33 : void ValidateAtomicIndex(Node* array, Node* index_word, Node* context);
34 : #if DEBUG
35 : void DebugSanityCheckAtomicIndex(Node* array, Node* index_word,
36 : Node* context);
37 : #endif
38 : void AtomicBinopBuiltinCommon(Node* array, Node* index, Node* value,
39 : Node* context, AssemblerFunction function,
40 : Runtime::FunctionId runtime_function);
41 :
42 : // Create a BigInt from the result of a 64-bit atomic operation, using
43 : // projections on 32-bit platforms.
44 : TNode<BigInt> BigIntFromSigned64(Node* signed64);
45 : TNode<BigInt> BigIntFromUnsigned64(Node* unsigned64);
46 : };
47 :
48 504 : void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
49 : Node* tagged, Node* context, Node** out_instance_type,
50 : Node** out_backing_store) {
51 1008 : Label not_float_or_clamped(this), invalid(this);
52 :
53 : // Fail if it is not a heap object.
54 1008 : GotoIf(TaggedIsSmi(tagged), &invalid);
55 :
56 : // Fail if the array's instance type is not JSTypedArray.
57 1512 : GotoIfNot(InstanceTypeEqual(LoadInstanceType(tagged), JS_TYPED_ARRAY_TYPE),
58 504 : &invalid);
59 :
60 : // Fail if the array's JSArrayBuffer is not shared.
61 504 : TNode<JSArrayBuffer> array_buffer = LoadJSArrayBufferViewBuffer(CAST(tagged));
62 504 : TNode<Uint32T> bitfield = LoadJSArrayBufferBitField(array_buffer);
63 504 : GotoIfNot(IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield), &invalid);
64 :
65 : // Fail if the array's element type is float32, float64 or clamped.
66 1008 : Node* elements_instance_type = LoadInstanceType(LoadElements(tagged));
67 : STATIC_ASSERT(FIXED_INT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
68 : STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
69 : STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
70 : STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
71 : STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
72 : STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
73 1008 : GotoIf(Int32LessThan(elements_instance_type,
74 1008 : Int32Constant(FIXED_FLOAT32_ARRAY_TYPE)),
75 504 : ¬_float_or_clamped);
76 : STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT8_CLAMPED_ARRAY_TYPE);
77 : STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT8_CLAMPED_ARRAY_TYPE);
78 1008 : Branch(Int32GreaterThan(elements_instance_type,
79 1008 : Int32Constant(FIXED_UINT8_CLAMPED_ARRAY_TYPE)),
80 504 : ¬_float_or_clamped, &invalid);
81 :
82 504 : BIND(&invalid);
83 : {
84 : ThrowTypeError(context, MessageTemplate::kNotIntegerSharedTypedArray,
85 504 : tagged);
86 : }
87 :
88 504 : BIND(¬_float_or_clamped);
89 504 : *out_instance_type = elements_instance_type;
90 :
91 504 : TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStore(array_buffer);
92 504 : TNode<UintPtrT> byte_offset = LoadJSArrayBufferViewByteOffset(CAST(tagged));
93 1008 : *out_backing_store = IntPtrAdd(backing_store, byte_offset);
94 504 : }
95 :
96 : // https://tc39.github.io/ecma262/#sec-validateatomicaccess
97 504 : Node* SharedArrayBufferBuiltinsAssembler::ConvertTaggedAtomicIndexToWord32(
98 : Node* tagged, Node* context, Node** number_index) {
99 1008 : VARIABLE(var_result, MachineRepresentation::kWord32);
100 504 : Label done(this), range_error(this);
101 :
102 : // Returns word32 since index cannot be longer than a TypedArray length,
103 : // which has a uint32 maximum.
104 : // The |number_index| output parameter is used only for architectures that
105 : // don't currently have a TF implementation and forward to runtime functions
106 : // instead; they expect the value has already been coerced to an integer.
107 1008 : *number_index = ToSmiIndex(CAST(context), CAST(tagged), &range_error);
108 1008 : var_result.Bind(SmiToInt32(*number_index));
109 504 : Goto(&done);
110 :
111 504 : BIND(&range_error);
112 504 : { ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex); }
113 :
114 504 : BIND(&done);
115 1008 : return var_result.value();
116 : }
117 :
118 504 : void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(Node* array,
119 : Node* index_word,
120 : Node* context) {
121 : // Check if the index is in bounds. If not, throw RangeError.
122 1008 : Label check_passed(this);
123 : Node* array_length_word32 =
124 1008 : TruncateTaggedToWord32(context, LoadJSTypedArrayLength(CAST(array)));
125 1008 : GotoIf(Uint32LessThan(index_word, array_length_word32), &check_passed);
126 :
127 504 : ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex);
128 :
129 504 : BIND(&check_passed);
130 504 : }
131 :
132 : #if DEBUG
133 : void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex(
134 : Node* array, Node* index_word, Node* context) {
135 : // In Debug mode, we re-validate the index as a sanity check because
136 : // ToInteger above calls out to JavaScript. A SharedArrayBuffer can't be
137 : // detached and the TypedArray length can't change either, so skipping this
138 : // check in Release mode is safe.
139 : CSA_ASSERT(this,
140 : Uint32LessThan(index_word,
141 : TruncateTaggedToWord32(
142 : context, LoadJSTypedArrayLength(CAST(array)))));
143 : }
144 : #endif
145 :
146 448 : TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromSigned64(
147 : Node* signed64) {
148 448 : if (Is64()) {
149 448 : return BigIntFromInt64(UncheckedCast<IntPtrT>(signed64));
150 : } else {
151 0 : TNode<IntPtrT> low = UncheckedCast<IntPtrT>(Projection(0, signed64));
152 0 : TNode<IntPtrT> high = UncheckedCast<IntPtrT>(Projection(1, signed64));
153 0 : return BigIntFromInt32Pair(low, high);
154 : }
155 : }
156 :
157 448 : TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromUnsigned64(
158 : Node* unsigned64) {
159 448 : if (Is64()) {
160 448 : return BigIntFromUint64(UncheckedCast<UintPtrT>(unsigned64));
161 : } else {
162 0 : TNode<UintPtrT> low = UncheckedCast<UintPtrT>(Projection(0, unsigned64));
163 0 : TNode<UintPtrT> high = UncheckedCast<UintPtrT>(Projection(1, unsigned64));
164 0 : return BigIntFromUint32Pair(low, high);
165 : }
166 : }
167 :
168 224 : TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
169 : Node* array = Parameter(Descriptor::kArray);
170 : Node* index = Parameter(Descriptor::kIndex);
171 : Node* context = Parameter(Descriptor::kContext);
172 :
173 : Node* instance_type;
174 : Node* backing_store;
175 56 : ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
176 :
177 : Node* index_integer;
178 : Node* index_word32 =
179 56 : ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
180 56 : ValidateAtomicIndex(array, index_word32, context);
181 112 : Node* index_word = ChangeUint32ToWord(index_word32);
182 :
183 56 : Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
184 56 : i64(this), u64(this), other(this);
185 : int32_t case_values[] = {
186 : FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE,
187 : FIXED_INT16_ARRAY_TYPE, FIXED_UINT16_ARRAY_TYPE,
188 : FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
189 : FIXED_BIGINT64_ARRAY_TYPE, FIXED_BIGUINT64_ARRAY_TYPE,
190 56 : };
191 56 : Label* case_labels[] = {&i8, &u8, &i16, &u16, &i32, &u32, &i64, &u64};
192 : Switch(instance_type, &other, case_values, case_labels,
193 56 : arraysize(case_labels));
194 :
195 56 : BIND(&i8);
196 56 : Return(
197 224 : SmiFromInt32(AtomicLoad(MachineType::Int8(), backing_store, index_word)));
198 :
199 56 : BIND(&u8);
200 112 : Return(SmiFromInt32(
201 168 : AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
202 :
203 56 : BIND(&i16);
204 112 : Return(SmiFromInt32(
205 224 : AtomicLoad(MachineType::Int16(), backing_store, WordShl(index_word, 1))));
206 :
207 56 : BIND(&u16);
208 112 : Return(SmiFromInt32(AtomicLoad(MachineType::Uint16(), backing_store,
209 224 : WordShl(index_word, 1))));
210 :
211 56 : BIND(&i32);
212 112 : Return(ChangeInt32ToTagged(
213 224 : AtomicLoad(MachineType::Int32(), backing_store, WordShl(index_word, 2))));
214 :
215 56 : BIND(&u32);
216 112 : Return(ChangeUint32ToTagged(AtomicLoad(MachineType::Uint32(), backing_store,
217 224 : WordShl(index_word, 2))));
218 : #if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
219 : BIND(&i64);
220 : Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer));
221 :
222 : BIND(&u64);
223 : Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer));
224 : #else
225 56 : BIND(&i64);
226 : // This uses Uint64() intentionally: AtomicLoad is not implemented for
227 : // Int64(), which is fine because the machine instruction only cares
228 : // about words.
229 112 : Return(BigIntFromSigned64(AtomicLoad(MachineType::Uint64(), backing_store,
230 224 : WordShl(index_word, 3))));
231 :
232 56 : BIND(&u64);
233 112 : Return(BigIntFromUnsigned64(AtomicLoad(MachineType::Uint64(), backing_store,
234 224 : WordShl(index_word, 3))));
235 : #endif
236 : // This shouldn't happen, we've already validated the type.
237 56 : BIND(&other);
238 56 : Unreachable();
239 56 : }
240 :
241 224 : TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
242 : Node* array = Parameter(Descriptor::kArray);
243 : Node* index = Parameter(Descriptor::kIndex);
244 : Node* value = Parameter(Descriptor::kValue);
245 : Node* context = Parameter(Descriptor::kContext);
246 :
247 : Node* instance_type;
248 : Node* backing_store;
249 56 : ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
250 :
251 : Node* index_integer;
252 : Node* index_word32 =
253 56 : ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
254 56 : ValidateAtomicIndex(array, index_word32, context);
255 112 : Node* index_word = ChangeUint32ToWord(index_word32);
256 :
257 56 : Label u8(this), u16(this), u32(this), u64(this), other(this);
258 : STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
259 : STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
260 56 : GotoIf(
261 224 : Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
262 56 : &u64);
263 :
264 112 : Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
265 56 : Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
266 :
267 : #if DEBUG
268 : DebugSanityCheckAtomicIndex(array, index_word32, context);
269 : #endif
270 :
271 : int32_t case_values[] = {
272 : FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
273 : FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
274 56 : };
275 56 : Label* case_labels[] = {&u8, &u8, &u16, &u16, &u32, &u32};
276 : Switch(instance_type, &other, case_values, case_labels,
277 56 : arraysize(case_labels));
278 :
279 56 : BIND(&u8);
280 : AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
281 56 : value_word32);
282 56 : Return(value_integer);
283 :
284 56 : BIND(&u16);
285 : AtomicStore(MachineRepresentation::kWord16, backing_store,
286 112 : WordShl(index_word, 1), value_word32);
287 56 : Return(value_integer);
288 :
289 56 : BIND(&u32);
290 : AtomicStore(MachineRepresentation::kWord32, backing_store,
291 112 : WordShl(index_word, 2), value_word32);
292 56 : Return(value_integer);
293 :
294 56 : BIND(&u64);
295 : #if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
296 : Return(CallRuntime(Runtime::kAtomicsStore64, context, array, index_integer,
297 : value));
298 : #else
299 56 : TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
300 : #if DEBUG
301 : DebugSanityCheckAtomicIndex(array, index_word32, context);
302 : #endif
303 : TVARIABLE(UintPtrT, var_low);
304 : TVARIABLE(UintPtrT, var_high);
305 56 : BigIntToRawBytes(value_bigint, &var_low, &var_high);
306 56 : Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
307 : AtomicStore(MachineRepresentation::kWord64, backing_store,
308 112 : WordShl(index_word, 3), var_low.value(), high);
309 56 : Return(value_bigint);
310 : #endif
311 :
312 : // This shouldn't happen, we've already validated the type.
313 56 : BIND(&other);
314 56 : Unreachable();
315 56 : }
316 :
317 224 : TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
318 : Node* array = Parameter(Descriptor::kArray);
319 : Node* index = Parameter(Descriptor::kIndex);
320 : Node* value = Parameter(Descriptor::kValue);
321 : Node* context = Parameter(Descriptor::kContext);
322 :
323 : Node* instance_type;
324 : Node* backing_store;
325 56 : ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
326 :
327 : Node* index_integer;
328 : Node* index_word32 =
329 56 : ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
330 56 : ValidateAtomicIndex(array, index_word32, context);
331 :
332 : #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
333 : Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_integer,
334 : value));
335 : #else
336 112 : Node* index_word = ChangeUint32ToWord(index_word32);
337 :
338 56 : Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
339 56 : i64(this), u64(this), big(this), other(this);
340 : STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
341 : STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
342 56 : GotoIf(
343 224 : Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
344 56 : &big);
345 :
346 112 : Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
347 : #if DEBUG
348 : DebugSanityCheckAtomicIndex(array, index_word32, context);
349 : #endif
350 56 : Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
351 :
352 : int32_t case_values[] = {
353 : FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
354 : FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
355 56 : };
356 : Label* case_labels[] = {
357 : &i8, &u8, &i16, &u16, &i32, &u32,
358 56 : };
359 : Switch(instance_type, &other, case_values, case_labels,
360 56 : arraysize(case_labels));
361 :
362 56 : BIND(&i8);
363 112 : Return(SmiFromInt32(AtomicExchange(MachineType::Int8(), backing_store,
364 168 : index_word, value_word32)));
365 :
366 56 : BIND(&u8);
367 112 : Return(SmiFromInt32(AtomicExchange(MachineType::Uint8(), backing_store,
368 168 : index_word, value_word32)));
369 :
370 56 : BIND(&i16);
371 112 : Return(SmiFromInt32(AtomicExchange(MachineType::Int16(), backing_store,
372 224 : WordShl(index_word, 1), value_word32)));
373 :
374 56 : BIND(&u16);
375 112 : Return(SmiFromInt32(AtomicExchange(MachineType::Uint16(), backing_store,
376 224 : WordShl(index_word, 1), value_word32)));
377 :
378 56 : BIND(&i32);
379 112 : Return(ChangeInt32ToTagged(AtomicExchange(MachineType::Int32(), backing_store,
380 112 : WordShl(index_word, 2),
381 168 : value_word32)));
382 :
383 56 : BIND(&u32);
384 112 : Return(ChangeUint32ToTagged(
385 : AtomicExchange(MachineType::Uint32(), backing_store,
386 224 : WordShl(index_word, 2), value_word32)));
387 :
388 56 : BIND(&big);
389 56 : TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
390 : #if DEBUG
391 : DebugSanityCheckAtomicIndex(array, index_word32, context);
392 : #endif
393 : TVARIABLE(UintPtrT, var_low);
394 : TVARIABLE(UintPtrT, var_high);
395 56 : BigIntToRawBytes(value_bigint, &var_low, &var_high);
396 56 : Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
397 224 : GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)),
398 56 : &i64);
399 224 : GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)),
400 56 : &u64);
401 56 : Unreachable();
402 :
403 56 : BIND(&i64);
404 : // This uses Uint64() intentionally: AtomicExchange is not implemented for
405 : // Int64(), which is fine because the machine instruction only cares
406 : // about words.
407 112 : Return(BigIntFromSigned64(AtomicExchange(MachineType::Uint64(), backing_store,
408 112 : WordShl(index_word, 3),
409 112 : var_low.value(), high)));
410 :
411 56 : BIND(&u64);
412 112 : Return(BigIntFromUnsigned64(
413 : AtomicExchange(MachineType::Uint64(), backing_store,
414 224 : WordShl(index_word, 3), var_low.value(), high)));
415 :
416 : // This shouldn't happen, we've already validated the type.
417 56 : BIND(&other);
418 56 : Unreachable();
419 : #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
420 56 : }
421 :
422 224 : TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
423 : Node* array = Parameter(Descriptor::kArray);
424 : Node* index = Parameter(Descriptor::kIndex);
425 : Node* old_value = Parameter(Descriptor::kOldValue);
426 : Node* new_value = Parameter(Descriptor::kNewValue);
427 : Node* context = Parameter(Descriptor::kContext);
428 :
429 : Node* instance_type;
430 : Node* backing_store;
431 56 : ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
432 :
433 : Node* index_integer;
434 : Node* index_word32 =
435 56 : ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
436 56 : ValidateAtomicIndex(array, index_word32, context);
437 :
438 : #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
439 : V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
440 : Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array,
441 : index_integer, old_value, new_value));
442 : #else
443 112 : Node* index_word = ChangeUint32ToWord(index_word32);
444 :
445 56 : Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
446 56 : i64(this), u64(this), big(this), other(this);
447 : STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
448 : STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
449 56 : GotoIf(
450 224 : Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
451 56 : &big);
452 :
453 112 : Node* old_value_integer = ToInteger_Inline(CAST(context), CAST(old_value));
454 112 : Node* new_value_integer = ToInteger_Inline(CAST(context), CAST(new_value));
455 : #if DEBUG
456 : DebugSanityCheckAtomicIndex(array, index_word32, context);
457 : #endif
458 56 : Node* old_value_word32 = TruncateTaggedToWord32(context, old_value_integer);
459 56 : Node* new_value_word32 = TruncateTaggedToWord32(context, new_value_integer);
460 :
461 : int32_t case_values[] = {
462 : FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
463 : FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
464 56 : };
465 : Label* case_labels[] = {
466 : &i8, &u8, &i16, &u16, &i32, &u32,
467 56 : };
468 : Switch(instance_type, &other, case_values, case_labels,
469 56 : arraysize(case_labels));
470 :
471 56 : BIND(&i8);
472 112 : Return(SmiFromInt32(AtomicCompareExchange(MachineType::Int8(), backing_store,
473 : index_word, old_value_word32,
474 168 : new_value_word32)));
475 :
476 56 : BIND(&u8);
477 112 : Return(SmiFromInt32(AtomicCompareExchange(MachineType::Uint8(), backing_store,
478 : index_word, old_value_word32,
479 168 : new_value_word32)));
480 :
481 56 : BIND(&i16);
482 112 : Return(SmiFromInt32(AtomicCompareExchange(
483 112 : MachineType::Int16(), backing_store, WordShl(index_word, 1),
484 168 : old_value_word32, new_value_word32)));
485 :
486 56 : BIND(&u16);
487 112 : Return(SmiFromInt32(AtomicCompareExchange(
488 112 : MachineType::Uint16(), backing_store, WordShl(index_word, 1),
489 168 : old_value_word32, new_value_word32)));
490 :
491 56 : BIND(&i32);
492 112 : Return(ChangeInt32ToTagged(AtomicCompareExchange(
493 112 : MachineType::Int32(), backing_store, WordShl(index_word, 2),
494 168 : old_value_word32, new_value_word32)));
495 :
496 56 : BIND(&u32);
497 112 : Return(ChangeUint32ToTagged(AtomicCompareExchange(
498 112 : MachineType::Uint32(), backing_store, WordShl(index_word, 2),
499 168 : old_value_word32, new_value_word32)));
500 :
501 56 : BIND(&big);
502 56 : TNode<BigInt> old_value_bigint = ToBigInt(CAST(context), CAST(old_value));
503 56 : TNode<BigInt> new_value_bigint = ToBigInt(CAST(context), CAST(new_value));
504 : #if DEBUG
505 : DebugSanityCheckAtomicIndex(array, index_word32, context);
506 : #endif
507 : TVARIABLE(UintPtrT, var_old_low);
508 : TVARIABLE(UintPtrT, var_old_high);
509 : TVARIABLE(UintPtrT, var_new_low);
510 : TVARIABLE(UintPtrT, var_new_high);
511 56 : BigIntToRawBytes(old_value_bigint, &var_old_low, &var_old_high);
512 56 : BigIntToRawBytes(new_value_bigint, &var_new_low, &var_new_high);
513 56 : Node* old_high = Is64() ? nullptr : static_cast<Node*>(var_old_high.value());
514 56 : Node* new_high = Is64() ? nullptr : static_cast<Node*>(var_new_high.value());
515 224 : GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)),
516 56 : &i64);
517 224 : GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)),
518 56 : &u64);
519 56 : Unreachable();
520 :
521 56 : BIND(&i64);
522 : // This uses Uint64() intentionally: AtomicCompareExchange is not implemented
523 : // for Int64(), which is fine because the machine instruction only cares
524 : // about words.
525 112 : Return(BigIntFromSigned64(AtomicCompareExchange(
526 112 : MachineType::Uint64(), backing_store, WordShl(index_word, 3),
527 112 : var_old_low.value(), var_new_low.value(), old_high, new_high)));
528 :
529 56 : BIND(&u64);
530 112 : Return(BigIntFromUnsigned64(AtomicCompareExchange(
531 112 : MachineType::Uint64(), backing_store, WordShl(index_word, 3),
532 112 : var_old_low.value(), var_new_low.value(), old_high, new_high)));
533 :
534 : // This shouldn't happen, we've already validated the type.
535 56 : BIND(&other);
536 56 : Unreachable();
537 : #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
538 : // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
539 56 : }
540 :
541 : #define BINOP_BUILTIN(op) \
542 : TF_BUILTIN(Atomics##op, SharedArrayBufferBuiltinsAssembler) { \
543 : Node* array = Parameter(Descriptor::kArray); \
544 : Node* index = Parameter(Descriptor::kIndex); \
545 : Node* value = Parameter(Descriptor::kValue); \
546 : Node* context = Parameter(Descriptor::kContext); \
547 : AtomicBinopBuiltinCommon(array, index, value, context, \
548 : &CodeAssembler::Atomic##op, \
549 : Runtime::kAtomics##op); \
550 : }
551 224 : BINOP_BUILTIN(Add)
552 224 : BINOP_BUILTIN(Sub)
553 224 : BINOP_BUILTIN(And)
554 224 : BINOP_BUILTIN(Or)
555 224 : BINOP_BUILTIN(Xor)
556 : #undef BINOP_BUILTIN
557 :
558 280 : void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
559 : Node* array, Node* index, Node* value, Node* context,
560 : AssemblerFunction function, Runtime::FunctionId runtime_function) {
561 : Node* instance_type;
562 : Node* backing_store;
563 280 : ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
564 :
565 : Node* index_integer;
566 : Node* index_word32 =
567 280 : ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
568 280 : ValidateAtomicIndex(array, index_word32, context);
569 :
570 : #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
571 : V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
572 : Return(CallRuntime(runtime_function, context, array, index_integer, value));
573 : #else
574 560 : Node* index_word = ChangeUint32ToWord(index_word32);
575 :
576 280 : Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
577 280 : i64(this), u64(this), big(this), other(this);
578 :
579 : STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
580 : STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
581 280 : GotoIf(
582 1120 : Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
583 280 : &big);
584 :
585 560 : Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
586 : #if DEBUG
587 : DebugSanityCheckAtomicIndex(array, index_word32, context);
588 : #endif
589 280 : Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
590 :
591 : int32_t case_values[] = {
592 : FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
593 : FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
594 280 : };
595 : Label* case_labels[] = {
596 : &i8, &u8, &i16, &u16, &i32, &u32,
597 280 : };
598 : Switch(instance_type, &other, case_values, case_labels,
599 280 : arraysize(case_labels));
600 :
601 280 : BIND(&i8);
602 560 : Return(SmiFromInt32((this->*function)(MachineType::Int8(), backing_store,
603 840 : index_word, value_word32, nullptr)));
604 :
605 280 : BIND(&u8);
606 560 : Return(SmiFromInt32((this->*function)(MachineType::Uint8(), backing_store,
607 840 : index_word, value_word32, nullptr)));
608 :
609 280 : BIND(&i16);
610 560 : Return(SmiFromInt32((this->*function)(MachineType::Int16(), backing_store,
611 560 : WordShl(index_word, 1), value_word32,
612 1120 : nullptr)));
613 :
614 280 : BIND(&u16);
615 560 : Return(SmiFromInt32((this->*function)(MachineType::Uint16(), backing_store,
616 560 : WordShl(index_word, 1), value_word32,
617 1120 : nullptr)));
618 :
619 280 : BIND(&i32);
620 560 : Return(ChangeInt32ToTagged(
621 : (this->*function)(MachineType::Int32(), backing_store,
622 1400 : WordShl(index_word, 2), value_word32, nullptr)));
623 :
624 280 : BIND(&u32);
625 560 : Return(ChangeUint32ToTagged(
626 : (this->*function)(MachineType::Uint32(), backing_store,
627 1400 : WordShl(index_word, 2), value_word32, nullptr)));
628 :
629 280 : BIND(&big);
630 280 : TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
631 : #if DEBUG
632 : DebugSanityCheckAtomicIndex(array, index_word32, context);
633 : #endif
634 : TVARIABLE(UintPtrT, var_low);
635 : TVARIABLE(UintPtrT, var_high);
636 280 : BigIntToRawBytes(value_bigint, &var_low, &var_high);
637 280 : Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
638 1120 : GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)),
639 280 : &i64);
640 1120 : GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)),
641 280 : &u64);
642 280 : Unreachable();
643 :
644 280 : BIND(&i64);
645 : // This uses Uint64() intentionally: Atomic* ops are not implemented for
646 : // Int64(), which is fine because the machine instructions only care
647 : // about words.
648 560 : Return(BigIntFromSigned64(
649 : (this->*function)(MachineType::Uint64(), backing_store,
650 1400 : WordShl(index_word, 3), var_low.value(), high)));
651 :
652 280 : BIND(&u64);
653 560 : Return(BigIntFromUnsigned64(
654 : (this->*function)(MachineType::Uint64(), backing_store,
655 1400 : WordShl(index_word, 3), var_low.value(), high)));
656 :
657 : // This shouldn't happen, we've already validated the type.
658 280 : BIND(&other);
659 280 : Unreachable();
660 : #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
661 : // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
662 280 : }
663 :
664 : } // namespace internal
665 59456 : } // namespace v8
|