Coverage Report

Created: 2025-10-29 06:26

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/moddable/xs/sources/xsAtomics.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2016-2017  Moddable Tech, Inc.
3
 *
4
 *   This file is part of the Moddable SDK Runtime.
5
 * 
6
 *   The Moddable SDK Runtime is free software: you can redistribute it and/or modify
7
 *   it under the terms of the GNU Lesser General Public License as published by
8
 *   the Free Software Foundation, either version 3 of the License, or
9
 *   (at your option) any later version.
10
 * 
11
 *   The Moddable SDK Runtime is distributed in the hope that it will be useful,
12
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 *   GNU Lesser General Public License for more details.
15
 * 
16
 *   You should have received a copy of the GNU Lesser General Public License
17
 *   along with the Moddable SDK Runtime.  If not, see <http://www.gnu.org/licenses/>.
18
 *
19
 */
20
21
#include "xsAll.h"
22
23
static txInteger fxCheckAtomicsIndex(txMachine* the, txInteger index, txInteger length);
24
static txSlot* fxCheckAtomicsTypedArray(txMachine* the, txBoolean onlyInt32);
25
static txSlot* fxCheckAtomicsArrayBuffer(txMachine* the, txSlot* slot, txBoolean onlyShared);
26
static void* fxCheckAtomicsArrayBufferDetached(txMachine* the, txSlot* slot, txBoolean mutable);
27
static txSlot* fxCheckSharedArrayBuffer(txMachine* the, txSlot* slot, txString which);
28
static void fxPushAtomicsValue(txMachine* the, int i, txID id);
29
30
#define mxAtomicsHead0(TYPE,TO) \
31
4
  TYPE result = 0; \
32
4
  txBoolean lock = host->kind == XS_HOST_KIND; \
33
4
  void* data = (lock) ? host->value.host.data : fxCheckAtomicsArrayBufferDetached(the, host, XS_IMMUTABLE); \
34
4
  TYPE* address = (TYPE*)(((txByte*)data) + offset)
35
36
#define mxAtomicsHead1(TYPE,TO) \
37
14
  TYPE result = 0; \
38
14
  TYPE value = (TYPE)TO(the, slot); \
39
14
  txBoolean lock = host->kind == XS_HOST_KIND; \
40
14
  void* data = (lock) ? host->value.host.data : fxCheckAtomicsArrayBufferDetached(the, host, XS_MUTABLE); \
41
14
  TYPE* address = (TYPE*)(((txByte*)data) + offset)
42
43
#define mxAtomicsHead2(TYPE,TO) \
44
1
  TYPE result = (TYPE)TO(the, slot + 1); \
45
1
  TYPE value = (TYPE)TO(the, slot); \
46
1
  txBoolean lock = host->kind == XS_HOST_KIND; \
47
1
  void* data = (lock) ? host->value.host.data : fxCheckAtomicsArrayBufferDetached(the, host, XS_MUTABLE); \
48
1
  TYPE* address = (TYPE*)(((txByte*)data) + offset)
49
50
#ifdef mxUseGCCAtomics
51
1
  #define mxAtomicsCompareExchange() __atomic_compare_exchange(address, &result, &value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
52
9
  #define mxAtomicsLoad() __atomic_load(address, &result, __ATOMIC_SEQ_CST)
53
1
  #define mxAtomicsAdd() result = __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST)
54
1
  #define mxAtomicsAnd() result = __atomic_fetch_and(address, value, __ATOMIC_SEQ_CST)
55
1
  #define mxAtomicsExchange() __atomic_exchange(address, &value, &result, __ATOMIC_SEQ_CST)
56
1
  #define mxAtomicsOr() result = __atomic_fetch_or(address, value, __ATOMIC_SEQ_CST)
57
2
  #define mxAtomicsStore() __atomic_store(address, &value, __ATOMIC_SEQ_CST)
58
21.4k
  #define mxAtomicsSub() result = __atomic_fetch_sub(address, value, __ATOMIC_SEQ_CST)
59
2
  #define mxAtomicsXor() result = __atomic_fetch_xor(address, value, __ATOMIC_SEQ_CST)
60
#else
61
  #define mxAtomicsCompareExchange() if (lock) fxLockSharedChunk(data); if (*address == result) *address = value; else result = *address; if (lock) fxUnlockSharedChunk(data)
62
  #define mxAtomicsLoad() if (lock) fxLockSharedChunk(data); result = *address;  if (lock) fxUnlockSharedChunk(data)
63
  #define mxAtomicsAdd() if (lock) fxLockSharedChunk(data); result = *address; *address = result + value; if (lock) fxUnlockSharedChunk(data)
64
  #define mxAtomicsAnd() if (lock) fxLockSharedChunk(data); result = *address; *address = result & value; if (lock) fxUnlockSharedChunk(data)
65
  #define mxAtomicsExchange() if (lock) fxLockSharedChunk(data); result = *address; *address = value; if (lock) fxUnlockSharedChunk(data)
66
  #define mxAtomicsOr() if (lock) fxLockSharedChunk(data); result = *address; *address = result | value; if (lock) fxUnlockSharedChunk(data)
67
  #define mxAtomicsStore() if (lock) fxLockSharedChunk(data); *address = value; if (lock) fxUnlockSharedChunk(data)
68
  #define mxAtomicsSub() if (lock) fxLockSharedChunk(data); result = *address; *address = result - value; if (lock) fxUnlockSharedChunk(data)
69
  #define mxAtomicsXor() if (lock) fxLockSharedChunk(data); result = *address; *address = result ^ value; if (lock) fxUnlockSharedChunk(data)
70
#endif  
71
72
#define mxAtomicsTail() \
73
14
  slot->kind = XS_INTEGER_KIND; \
74
14
  slot->value.integer = result
75
76
#define mxAtomicsTailBigInt64() \
77
0
  fxFromBigInt64(the, slot, result)
78
79
#define mxAtomicsTailBigUint64() \
80
0
  fxFromBigUint64(the, slot, result)
81
  
82
#define mxAtomicsTailOverflow() \
83
0
  if (result <= 0x7FFFFFFF) { \
84
0
    slot->kind = XS_INTEGER_KIND; \
85
0
    slot->value.integer = result; \
86
0
  } \
87
0
  else { \
88
0
    slot->kind = XS_NUMBER_KIND; \
89
0
    slot->value.number = result; \
90
0
  }
91
  
92
#define mxAtomicsTailWait() \
93
5
  return (result != value) ? -1 : (timeout == 0) ? 0 : 1;
94
95
#define mxAtomicsDeclarations(onlyInt32, onlyShared) \
96
134
  txSlot* dispatch = fxCheckAtomicsTypedArray(the, onlyInt32); \
97
134
  txSlot* view = dispatch->next; \
98
134
  txSlot* buffer = view->next; \
99
134
  txSlot* host = fxCheckAtomicsArrayBuffer(the, buffer, onlyShared); \
100
134
  txU2 shift = dispatch->value.typedArray.dispatch->shift; \
101
134
  txInteger index = fxCheckAtomicsIndex(the, 1, fxGetDataViewSize(the, view, buffer) >> shift); \
102
134
  txInteger offset = view->value.dataView.offset + (index << shift)
103
104
0
void fxInt8Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsAdd(); mxAtomicsTail(); }
105
0
void fxInt16Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsAdd(); mxAtomicsTail(); }
106
1
void fxInt32Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsAdd(); mxAtomicsTail(); }
107
0
void fxInt64Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsAdd(); mxAtomicsTailBigInt64(); }
108
0
void fxUint8Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsAdd(); mxAtomicsTail(); }
109
0
void fxUint16Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsAdd(); mxAtomicsTail(); }
110
0
void fxUint32Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsAdd(); mxAtomicsTailOverflow(); }
111
0
void fxUint64Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsAdd(); mxAtomicsTailBigUint64(); }
112
113
0
void fxInt8And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsAnd(); mxAtomicsTail(); }
114
0
void fxInt16And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsAnd(); mxAtomicsTail(); }
115
1
void fxInt32And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsAnd(); mxAtomicsTail(); }
116
0
void fxInt64And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsAnd(); mxAtomicsTailBigInt64(); }
117
0
void fxUint8And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsAnd(); mxAtomicsTail(); }
118
0
void fxUint16And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsAnd(); mxAtomicsTail(); }
119
0
void fxUint32And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsAnd(); mxAtomicsTailOverflow(); }
120
0
void fxUint64And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsAnd(); mxAtomicsTailBigUint64(); }
121
122
0
void fxInt8CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txS1, fxToInteger); mxAtomicsCompareExchange(); mxAtomicsTail(); }
123
0
void fxInt16CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txS2, fxToInteger); mxAtomicsCompareExchange(); mxAtomicsTail(); }
124
1
void fxInt32CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txS4, fxToInteger); mxAtomicsCompareExchange(); mxAtomicsTail(); }
125
0
void fxInt64CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txS8, fxToBigInt64); mxAtomicsCompareExchange(); mxAtomicsTailBigInt64(); }
126
0
void fxUint8CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txU1, fxToUnsigned); mxAtomicsCompareExchange(); mxAtomicsTail(); }
127
0
void fxUint16CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txU2, fxToUnsigned); mxAtomicsCompareExchange(); mxAtomicsTail(); }
128
0
void fxUint32CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txU4, fxToUnsigned); mxAtomicsCompareExchange(); mxAtomicsTailOverflow(); }
129
0
void fxUint64CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txU8, fxToBigUint64); mxAtomicsCompareExchange(); mxAtomicsTailBigUint64(); }
130
131
0
void fxInt8Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsExchange(); mxAtomicsTail(); }
132
0
void fxInt16Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsExchange(); mxAtomicsTail(); }
133
1
void fxInt32Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsExchange(); mxAtomicsTail(); }
134
0
void fxInt64Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsExchange(); mxAtomicsTailBigInt64(); }
135
0
void fxUint8Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsExchange(); mxAtomicsTail(); }
136
0
void fxUint16Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsExchange(); mxAtomicsTail(); }
137
0
void fxUint32Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsExchange(); mxAtomicsTailOverflow(); }
138
0
void fxUint64Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsExchange(); mxAtomicsTailBigUint64(); }
139
140
0
void fxInt8Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txS1, fxToInteger); mxAtomicsLoad(); mxAtomicsTail(); }
141
0
void fxInt16Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txS2, fxToInteger); mxAtomicsLoad(); mxAtomicsTail(); }
142
4
void fxInt32Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txS4, fxToInteger); mxAtomicsLoad(); mxAtomicsTail(); }
143
0
void fxInt64Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txS8, fxToBigInt64); mxAtomicsLoad(); mxAtomicsTailBigInt64(); }
144
0
void fxUint8Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txU1, fxToUnsigned); mxAtomicsLoad(); mxAtomicsTail(); }
145
0
void fxUint16Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txU2, fxToUnsigned); mxAtomicsLoad(); mxAtomicsTail(); }
146
0
void fxUint32Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txU4, fxToUnsigned); mxAtomicsLoad(); mxAtomicsTailOverflow(); }
147
0
void fxUint64Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txU8, fxToBigUint64); mxAtomicsLoad(); mxAtomicsTailBigUint64(); }
148
149
0
void fxInt8Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsOr(); mxAtomicsTail(); }
150
0
void fxInt16Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsOr(); mxAtomicsTail(); }
151
1
void fxInt32Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsOr(); mxAtomicsTail(); }
152
0
void fxInt64Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsOr(); mxAtomicsTailBigInt64(); }
153
0
void fxUint8Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsOr(); mxAtomicsTail(); }
154
0
void fxUint16Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsOr(); mxAtomicsTail(); }
155
0
void fxUint32Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsOr(); mxAtomicsTailOverflow(); }
156
0
void fxUint64Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsOr(); mxAtomicsTailBigUint64(); }
157
158
0
void fxInt8Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsStore(); mxAtomicsTail(); }
159
0
void fxInt16Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsStore(); mxAtomicsTail(); }
160
2
void fxInt32Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsStore(); mxAtomicsTail(); }
161
0
void fxInt64Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsStore(); mxAtomicsTailBigInt64(); }
162
0
void fxUint8Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsStore(); mxAtomicsTail(); }
163
0
void fxUint16Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsStore(); mxAtomicsTail(); }
164
0
void fxUint32Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsStore(); mxAtomicsTailOverflow(); }
165
0
void fxUint64Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsStore(); mxAtomicsTailBigUint64(); }
166
167
0
void fxInt8Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsSub(); mxAtomicsTail(); }
168
0
void fxInt16Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsSub(); mxAtomicsTail(); }
169
1
void fxInt32Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsSub(); mxAtomicsTail(); }
170
0
void fxInt64Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsSub(); mxAtomicsTailBigInt64(); }
171
0
void fxUint8Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsSub(); mxAtomicsTail(); }
172
0
void fxUint16Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsSub(); mxAtomicsTail(); }
173
0
void fxUint32Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsSub(); mxAtomicsTailOverflow(); }
174
0
void fxUint64Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsSub(); mxAtomicsTailBigUint64(); }
175
176
0
void fxInt8Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsXor(); mxAtomicsTail(); }
177
0
void fxInt16Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsXor(); mxAtomicsTail(); }
178
2
void fxInt32Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsXor(); mxAtomicsTail(); }
179
0
void fxInt64Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsXor(); mxAtomicsTailBigInt64(); }
180
0
void fxUint8Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsXor(); mxAtomicsTail(); }
181
0
void fxUint16Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsXor(); mxAtomicsTail(); }
182
0
void fxUint32Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsXor(); mxAtomicsTailOverflow(); }
183
0
void fxUint64Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsXor(); mxAtomicsTailBigUint64(); }
184
185
0
txInteger fxInt32Wait(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, txNumber timeout) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsLoad(); mxAtomicsTailWait(); }
186
5
txInteger fxInt64Wait(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, txNumber timeout) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsLoad(); mxAtomicsTailWait(); }
187
188
void fxBuildAtomics(txMachine* the)
189
29.6k
{
190
29.6k
  txSlot* slot;
191
  
192
29.6k
  mxPush(mxObjectPrototype);
193
29.6k
  slot = fxLastProperty(the, fxNewObjectInstance(the));
194
29.6k
  slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_SharedArrayBuffer_prototype_get_byteLength), C_NULL, mxID(_byteLength), XS_DONT_ENUM_FLAG);
195
29.6k
  slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_SharedArrayBuffer_prototype_get_growable), C_NULL, mxID(_growable), XS_DONT_ENUM_FLAG);
196
29.6k
  slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_SharedArrayBuffer_prototype_get_maxByteLength), C_NULL, mxID(_maxByteLength), XS_DONT_ENUM_FLAG);
197
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_SharedArrayBuffer_prototype_grow), 1, mxID(_grow), XS_DONT_ENUM_FLAG);
198
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_SharedArrayBuffer_prototype_slice), 2, mxID(_slice), XS_DONT_ENUM_FLAG);
199
29.6k
  slot = fxNextStringXProperty(the, slot, "SharedArrayBuffer", mxID(_Symbol_toStringTag), XS_DONT_ENUM_FLAG | XS_DONT_SET_FLAG);
200
29.6k
  mxSharedArrayBufferPrototype = *the->stack;
201
29.6k
  slot = fxBuildHostConstructor(the, mxCallback(fx_SharedArrayBuffer), 1, mxID(_SharedArrayBuffer));
202
29.6k
  mxSharedArrayBufferConstructor = *the->stack;
203
29.6k
  slot = fxLastProperty(the, slot);
204
29.6k
  slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_species_get), C_NULL, mxID(_Symbol_species), XS_DONT_ENUM_FLAG);
205
29.6k
  mxPop();
206
  
207
29.6k
  mxPush(mxObjectPrototype);
208
29.6k
  slot = fxLastProperty(the, fxNewObjectInstance(the));
209
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_add), 3, mxID(_add), XS_DONT_ENUM_FLAG);
210
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_and), 3, mxID(_and), XS_DONT_ENUM_FLAG);
211
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_compareExchange), 4, mxID(_compareExchange), XS_DONT_ENUM_FLAG);
212
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_exchange), 3, mxID(_exchange), XS_DONT_ENUM_FLAG);
213
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_isLockFree), 1, mxID(_isLockFree), XS_DONT_ENUM_FLAG);
214
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_load), 2, mxID(_load), XS_DONT_ENUM_FLAG);
215
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_or), 3, mxID(_or), XS_DONT_ENUM_FLAG);
216
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_notify), 3, mxID(_notify), XS_DONT_ENUM_FLAG);
217
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_store), 3, mxID(_store), XS_DONT_ENUM_FLAG);
218
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_sub), 3, mxID(_sub), XS_DONT_ENUM_FLAG);
219
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_wait), 4, mxID(_wait), XS_DONT_ENUM_FLAG);
220
29.6k
#if mxECMAScript2024
221
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_waitAsync), 4, mxID(_waitAsync), XS_DONT_ENUM_FLAG);
222
29.6k
#endif
223
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_notify), 3, mxID(_wake), XS_DONT_ENUM_FLAG);
224
29.6k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_xor), 3, mxID(_xor), XS_DONT_ENUM_FLAG);
225
29.6k
  slot = fxNextStringXProperty(the, slot, "Atomics", mxID(_Symbol_toStringTag), XS_DONT_ENUM_FLAG | XS_DONT_SET_FLAG);
226
29.6k
  mxPull(mxAtomicsObject);
227
29.6k
}
228
229
txSlot* fxCheckAtomicsArrayBuffer(txMachine* the, txSlot* slot, txBoolean onlyShared)
230
84
{
231
84
  if ((!slot) || (!mxIsReference(slot)))
232
0
    mxTypeError("typedArray.buffer: not an object");
233
84
  slot = slot->value.reference->next;
234
84
  if (slot && (slot->kind == XS_HOST_KIND) && (slot->value.host.variant.destructor == fxReleaseSharedChunk))
235
68
    return slot;
236
16
  if (onlyShared)
237
8
    mxTypeError("typedArray.buffer: not a SharedArrayBuffer instance");
238
8
  if (slot && (slot->flag & XS_INTERNAL_FLAG) && (slot->kind == XS_ARRAY_BUFFER_KIND)) {
239
8
    if (slot->value.arrayBuffer.address == C_NULL)
240
0
      mxTypeError("typedArray.buffer: detached");
241
8
    return slot;
242
8
  }
243
8
  mxTypeError("typedArray.buffer: not a SharedArrayBuffer instance, not an ArrayBuffer instance");
244
0
  return C_NULL;
245
8
}
246
247
void* fxCheckAtomicsArrayBufferDetached(txMachine* the, txSlot* slot, txBoolean mutable)
248
3
{
249
3
  if (slot->value.arrayBuffer.address == C_NULL)
250
0
    mxTypeError("typedArray.buffer: detached");
251
3
  if (mutable && (slot->flag & XS_DONT_SET_FLAG))
252
0
    mxTypeError("typedArray.buffer: read-only");
253
3
  return slot->value.arrayBuffer.address;
254
3
}
255
256
txInteger fxCheckAtomicsIndex(txMachine* the, txInteger i, txInteger length)
257
76
{
258
76
  txSlot *slot = (mxArgc > i) ? mxArgv(i) : C_NULL;
259
76
  if (slot && (XS_INTEGER_KIND == slot->kind)) {
260
56
    int index = slot->value.integer;
261
56
    if ((0 <= index) && (index < length))
262
42
      return index;
263
56
  }
264
265
34
  txNumber index = slot ? c_trunc(fxToNumber(the, slot)) : C_NAN; 
266
34
  if (c_isnan(index))
267
11
    index = 0;
268
34
  if (index < 0)
269
10
    mxRangeError("invalid index");
270
24
  else if (index >= length)
271
19
    mxRangeError("invalid index");
272
5
  return (txInteger)index;
273
34
}
274
275
txSlot* fxCheckAtomicsTypedArray(txMachine* the, txBoolean onlyInt32)
276
134
{
277
134
  txSlot* slot = (mxArgc > 0) ? mxArgv(0) : C_NULL;
278
134
  txID id;
279
134
  if ((!slot) || (!mxIsReference(slot)))
280
17
    mxTypeError("typedArray: not an object");
281
117
  slot = slot->value.reference->next;
282
117
  if ((!slot) || ((slot->kind != XS_TYPED_ARRAY_KIND)))
283
1
    mxTypeError("typedArray: not a TypedArray instance");
284
116
  id = slot->value.typedArray.dispatch->constructorID;
285
116
  if (onlyInt32) {
286
83
    if ((id != _Int32Array) && (id != _BigInt64Array))
287
21
      mxTypeError("typedArray: not an Int32Array instance");
288
83
  }
289
33
  else {
290
33
    if (id == _Float32Array)
291
0
      mxTypeError("typedArray: Float32Array instance");
292
33
    else if (id == _Float64Array)
293
0
      mxTypeError("typedArray: Float64Array instance");
294
33
    else if (id == _Uint8ClampedArray)
295
11
      mxTypeError("typedArray: Uint8ClampedArray instance");
296
22
  #if mxFloat16
297
22
    else if (id == _Float16Array)
298
0
      mxTypeError("typedArray: Float16Array instance");
299
33
  #endif
300
33
  }
301
84
  return slot;
302
116
}
303
304
txSlot* fxCheckSharedArrayBuffer(txMachine* the, txSlot* slot, txString which)
305
316
{
306
316
  if ((!slot) || (!mxIsReference(slot)))
307
69
    mxTypeError("%s: not an object", which);
308
247
  slot = slot->value.reference->next;
309
247
  if ((!slot) || (slot->kind != XS_HOST_KIND) || (slot->value.host.variant.destructor != fxReleaseSharedChunk))
310
35
    mxTypeError("%s: not a SharedArrayBuffer instance", which);
311
212
  return slot;
312
247
}
313
314
void fxPushAtomicsValue(txMachine* the, int i, txID id)
315
19
{
316
19
  txSlot* slot;
317
19
  if (mxArgc > i)
318
18
    mxPushSlot(mxArgv(i));
319
1
  else
320
1
    mxPushUndefined();
321
19
  slot = the->stack;
322
19
  if ((id == _BigInt64Array) || (id == _BigUint64Array))
323
8
    fxBigIntCoerce(the, slot);
324
11
  else if (XS_INTEGER_KIND != slot->kind) {
325
3
    txNumber value;
326
3
    fxNumberCoerce(the, slot);
327
3
    value = c_trunc(slot->value.number); 
328
3
    if (c_isnan(value) || (value == -0))
329
2
      value = 0;
330
3
    slot->value.number = value;
331
3
  }
332
19
}
333
334
335
void fx_SharedArrayBuffer(txMachine* the)
336
21.4k
{
337
21.4k
  txSlot* instance;
338
21.4k
  txS8 byteLength;
339
21.4k
  txS8 maxByteLength = -1;
340
21.4k
  txSlot* property;
341
21.4k
  if (mxIsUndefined(mxTarget))
342
3
    mxTypeError("call: SharedArrayBuffer");
343
21.4k
  byteLength = fxArgToSafeByteLength(the, 0, 0);
344
21.4k
  if ((mxArgc > 1) && mxIsReference(mxArgv(1))) {
345
30
    mxPushSlot(mxArgv(1));
346
30
    mxGetID(mxID(_maxByteLength));
347
30
    mxPullSlot(mxArgv(1));
348
30
    maxByteLength = fxArgToSafeByteLength(the, 1, -1);
349
30
  }
350
21.4k
  if (maxByteLength >= 0) {
351
22
    if (byteLength > maxByteLength)
352
1
      mxRangeError("byteLength > maxByteLength");
353
22
  }
354
21.4k
  mxPushSlot(mxTarget);
355
21.4k
  fxGetPrototypeFromConstructor(the, &mxSharedArrayBufferPrototype);
356
21.4k
  instance = fxNewSlot(the);
357
21.4k
  instance->kind = XS_INSTANCE_KIND;
358
21.4k
  instance->value.instance.garbage = C_NULL;
359
21.4k
  instance->value.instance.prototype = the->stack->value.reference;
360
21.4k
  the->stack->value.reference = instance;
361
21.4k
  the->stack->kind = XS_REFERENCE_KIND;
362
21.4k
  if (byteLength > 0x7FFFFFFF)
363
4
    mxRangeError("byteLength too big");
364
21.4k
  if (maxByteLength > 0x7FFFFFFF)
365
3
    mxRangeError("maxByteLength too big");
366
21.4k
  property = instance->next = fxNewSlot(the);
367
21.4k
  property->flag = XS_INTERNAL_FLAG;
368
21.4k
  property->kind = XS_HOST_KIND;
369
21.4k
  property->value.host.data = fxCreateSharedChunk((txInteger)byteLength);
370
21.4k
  if (!property->value.host.data) {
371
0
    property->value.host.variant.destructor = NULL;
372
0
    mxRangeError("cannot allocate SharedArrayBuffer insatnce");
373
0
  }
374
21.4k
  property->value.host.variant.destructor = fxReleaseSharedChunk;
375
21.4k
  property = property->next = fxNewSlot(the);
376
21.4k
  property->flag = XS_INTERNAL_FLAG;
377
21.4k
  property->kind = XS_BUFFER_INFO_KIND;
378
21.4k
  property->value.bufferInfo.length = (txInteger)byteLength;
379
21.4k
  property->value.bufferInfo.maxLength = (txInteger)maxByteLength;
380
21.4k
  mxPullSlot(mxResult);
381
21.4k
}
382
383
void fx_SharedArrayBuffer_prototype_get_byteLength(txMachine* the)
384
84
{
385
84
  txSlot* host = fxCheckSharedArrayBuffer(the, mxThis, "this");
386
84
  txSlot* bufferInfo = host->next; 
387
84
  mxResult->kind = XS_INTEGER_KIND;
388
84
  mxResult->value.integer = bufferInfo->value.bufferInfo.length;
389
84
}
390
391
void fx_SharedArrayBuffer_prototype_get_growable(txMachine* the)
392
32
{
393
32
  txSlot* host = fxCheckSharedArrayBuffer(the, mxThis, "this");
394
32
  txSlot* bufferInfo = host->next;
395
32
  mxResult->kind = XS_BOOLEAN_KIND;
396
32
  mxResult->value.boolean = (bufferInfo->value.bufferInfo.maxLength >= 0) ? 1 : 0;
397
32
}
398
399
void fx_SharedArrayBuffer_prototype_get_maxByteLength(txMachine* the)
400
37
{
401
37
  txSlot* host = fxCheckSharedArrayBuffer(the, mxThis, "this");
402
37
  txSlot* bufferInfo = host->next; 
403
37
  mxResult->kind = XS_INTEGER_KIND;
404
37
  if (bufferInfo->value.bufferInfo.maxLength >= 0)
405
9
    mxResult->value.integer = bufferInfo->value.bufferInfo.maxLength;
406
28
  else
407
28
    mxResult->value.integer = bufferInfo->value.bufferInfo.length;
408
37
}
409
410
void fx_SharedArrayBuffer_prototype_grow(txMachine* the)
411
38
{
412
38
  txSlot* host = fxCheckSharedArrayBuffer(the, mxThis, "this");
413
38
  txSlot* bufferInfo = host->next; 
414
38
  txInteger maxByteLength, oldByteLength, newByteLength;
415
38
  maxByteLength = bufferInfo->value.bufferInfo.maxLength;
416
38
  if (maxByteLength < 0)
417
10
    mxTypeError("this: not resizable");
418
28
  oldByteLength = bufferInfo->value.bufferInfo.length;
419
28
  newByteLength = fxArgToByteLength(the, 0, 0);
420
28
  if (newByteLength < oldByteLength)
421
1
    mxRangeError("newLength < byteLength");
422
27
  if (newByteLength > maxByteLength)
423
3
    mxRangeError("newLength > maxByteLength");
424
27
  mxRangeError("cannot grow SharedArrayBuffer insatnce");
425
27
}
426
427
void fx_SharedArrayBuffer_prototype_slice(txMachine* the)
428
75
{
429
75
  txSlot* host = fxCheckSharedArrayBuffer(the, mxThis, "this");
430
75
  txSlot* bufferInfo = host->next; 
431
75
  txInteger length = bufferInfo->value.bufferInfo.length;
432
75
  txInteger start = fxArgToIndexInteger(the, 0, 0, length);
433
75
  txInteger stop = fxArgToIndexInteger(the, 1, length, length);
434
75
  txSlot* result;
435
75
  if (stop < start) 
436
19
    stop = start;
437
75
  length = stop - start;
438
75
  mxPushSlot(mxThis);
439
75
  mxGetID(mxID(_constructor));
440
75
  fxToSpeciesConstructor(the, &mxSharedArrayBufferConstructor);
441
75
  mxNew();
442
75
  mxPushInteger(length);
443
75
  mxRunCount(1);
444
75
  mxPullSlot(mxResult);
445
75
  result = fxCheckSharedArrayBuffer(the, mxResult, "result");
446
75
  if (result == host)
447
1
    mxTypeError("result: same SharedArrayBuffer instance");
448
74
  bufferInfo = result->next; 
449
74
  if (bufferInfo->value.bufferInfo.length < length)
450
1
    mxTypeError("result: smaller SharedArrayBuffer instance");
451
73
  c_memcpy(result->value.host.data, ((txByte*)host->value.host.data + start), stop - start);
452
73
}
453
454
void fx_Atomics_add(txMachine* the)
455
3
{
456
3
  mxAtomicsDeclarations(0, 0);
457
3
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
458
3
  (*dispatch->value.typedArray.atomics->add)(the, host, offset, the->stack, 0);
459
3
  mxPullSlot(mxResult);
460
3
}
461
462
void fx_Atomics_and(txMachine* the)
463
2
{
464
2
  mxAtomicsDeclarations(0, 0);
465
2
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
466
2
  (*dispatch->value.typedArray.atomics->and)(the, host, offset, the->stack, 0);
467
2
  mxPullSlot(mxResult);
468
2
}
469
470
void fx_Atomics_compareExchange(txMachine* the)
471
2
{
472
2
  mxAtomicsDeclarations(0, 0);
473
2
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
474
2
  fxPushAtomicsValue(the, 3, dispatch->value.typedArray.dispatch->constructorID);
475
2
  (*dispatch->value.typedArray.atomics->compareExchange)(the, host, offset, the->stack, 0);
476
2
  mxPullSlot(mxResult);
477
2
  mxPop();
478
2
}
479
480
void fx_Atomics_exchange(txMachine* the)
481
2
{
482
2
  mxAtomicsDeclarations(0, 0);
483
2
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
484
2
  (*dispatch->value.typedArray.atomics->exchange)(the, host, offset, the->stack, 0);
485
2
  mxPullSlot(mxResult);
486
2
}
487
488
void fx_Atomics_isLockFree(txMachine* the)
489
36
{
490
36
  txInteger size = (mxArgc > 0) ? fxToInteger(the, mxArgv(0)) : 0;
491
36
  mxResult->value.boolean = (size == 4) ? 1 : 0;
492
36
  mxResult->kind = XS_BOOLEAN_KIND;
493
36
}
494
495
void fx_Atomics_load(txMachine* the)
496
5
{
497
5
  mxAtomicsDeclarations(0, 0);
498
5
  (*dispatch->value.typedArray.atomics->load)(the, host, offset, mxResult, 0);
499
5
}
500
501
void fx_Atomics_or(txMachine* the)
502
1
{
503
1
  mxAtomicsDeclarations(0, 0);
504
1
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
505
1
  (*dispatch->value.typedArray.atomics->or)(the, host, offset, the->stack, 0);
506
1
  mxPullSlot(mxResult);
507
1
}
508
509
void fx_Atomics_notify(txMachine* the)
510
66
{
511
66
  mxAtomicsDeclarations(1, 0);
512
66
  txInteger count = ((mxArgc > 2) && !mxIsUndefined(mxArgv(2))) ? fxToInteger(the, mxArgv(2)) : 20;
513
66
  if (count < 0)
514
4
    count = 0;
515
66
  if (host->kind == XS_ARRAY_BUFFER_KIND) {
516
4
    mxResult->value.integer = 0;
517
4
  }
518
62
  else {
519
62
    mxResult->value.integer = fxNotifySharedChunk(the, (txByte*)host->value.host.data + offset, count);
520
62
  }
521
66
  mxResult->kind = XS_INTEGER_KIND;
522
66
}
523
524
void fx_Atomics_store(txMachine* the)
525
19
{
526
19
  mxAtomicsDeclarations(0, 0);
527
19
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
528
19
  *mxResult = *the->stack;
529
19
  (*dispatch->value.typedArray.atomics->store)(the, host, offset, the->stack, 0);
530
19
  mxPop();
531
19
}
532
533
void fx_Atomics_sub(txMachine* the)
534
2
{
535
2
  mxAtomicsDeclarations(0, 0);
536
2
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
537
2
  (*dispatch->value.typedArray.atomics->sub)(the, host, offset, the->stack, 0);
538
2
  mxPullSlot(mxResult);
539
2
}
540
541
void fx_Atomics_wait(txMachine* the)
542
21
{
543
21
  mxAtomicsDeclarations(1, 1);
544
21
  txNumber timeout;
545
21
  txInteger result;
546
21
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
547
21
  timeout = (mxArgc > 3) ? fxToNumber(the, mxArgv(3)) : C_NAN;
548
21
  if (c_isnan(timeout))
549
0
    timeout = C_INFINITY;
550
21
  else if (timeout < 0)
551
0
    timeout = 0;
552
21
  result = (*dispatch->value.typedArray.atomics->wait)(the, host, offset, the->stack, timeout);
553
21
  if (result < 0)
554
0
    mxPushStringX("not-equal");
555
21
  else {
556
21
    result = fxWaitSharedChunk(the, (txByte*)host->value.host.data + offset, timeout, C_NULL);
557
21
    if (result == 0)
558
0
      mxPushStringX("timed-out");
559
21
    else
560
21
      mxPushStringX("ok");
561
21
  }
562
21
  mxPullSlot(mxResult);
563
21
}
564
565
#if mxECMAScript2024
566
void fx_Atomics_waitAsync(txMachine* the)
567
8
{
568
8
  mxAtomicsDeclarations(1, 1);
569
8
  txNumber timeout;
570
8
  txInteger result;
571
8
  txSlot* slot;
572
8
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
573
8
  timeout = (mxArgc > 3) ? fxToNumber(the, mxArgv(3)) : C_NAN;
574
8
  if (c_isnan(timeout))
575
0
    timeout = C_INFINITY;
576
8
  else if (timeout < 0)
577
1
    timeout = 0;
578
8
  result = (*dispatch->value.typedArray.atomics->wait)(the, host, offset, the->stack, timeout);
579
  
580
8
  mxPush(mxObjectPrototype);
581
8
  slot = fxLastProperty(the, fxNewObjectInstance(the));
582
8
  slot = fxNextBooleanProperty(the, slot, (result <= 0) ? 0 : 1, mxID(_async), XS_NO_FLAG);
583
8
  if (result < 0)
584
1
    fxNextStringXProperty(the, slot, "not-equal", mxID(_value), XS_NO_FLAG);
585
7
  else if (result == 0)
586
1
    fxNextStringXProperty(the, slot, "timed-out", mxID(_value), XS_NO_FLAG);
587
6
  else {
588
6
    txSlot* resolveFunction;
589
6
    txSlot* rejectFunction;
590
6
    mxTemporary(resolveFunction);
591
6
    mxTemporary(rejectFunction);
592
6
    mxPush(mxPromiseConstructor);
593
6
    fxNewPromiseCapability(the, resolveFunction, rejectFunction);
594
6
    fxNextSlotProperty(the, slot, the->stack, mxID(_value), XS_NO_FLAG);
595
6
    mxPop(); // promise
596
6
    fxWaitSharedChunk(the, (txByte*)host->value.host.data + offset, timeout, resolveFunction);
597
6
    mxPop(); // rejectFunction
598
6
    mxPop(); // resolveFunction
599
6
  }
600
8
  mxPullSlot(mxResult);
601
8
}
602
#endif
603
604
void fx_Atomics_xor(txMachine* the)
605
3
{
606
3
  mxAtomicsDeclarations(0, 0);
607
3
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
608
3
  (*dispatch->value.typedArray.atomics->xor)(the, host, offset, the->stack, 0);
609
3
  mxPullSlot(mxResult);
610
3
}
611
612
#ifdef mxUseDefaultSharedChunks
613
614
#if defined(mxUsePOSIXThreads)
615
  #define mxThreads 1
616
  typedef pthread_cond_t txCondition;
617
  typedef pthread_mutex_t txMutex;
618
  typedef pthread_t txThread;
619
0
  #define mxCreateCondition(CONDITION) pthread_cond_init(CONDITION,NULL)
620
29.6k
  #define mxCreateMutex(MUTEX) pthread_mutex_init(MUTEX,NULL)
621
29.6k
  #define mxCurrentThread() pthread_self()
622
0
  #define mxDeleteCondition(CONDITION) pthread_cond_destroy(CONDITION)
623
29.6k
  #define mxDeleteMutex(MUTEX) pthread_mutex_destroy(MUTEX)
624
29.6k
  #define mxLockMutex(MUTEX) pthread_mutex_lock(MUTEX)
625
29.6k
  #define mxUnlockMutex(MUTEX) pthread_mutex_unlock(MUTEX)
626
0
  #define mxWakeCondition(CONDITION) pthread_cond_signal(CONDITION)
627
#elif defined(mxUseFreeRTOSTasks)
628
  #define mxThreads 1
629
630
  #include "FreeRTOS.h"
631
#if ESP32
632
  #include "freertos/queue.h"
633
  #include "freertos/semphr.h"
634
#else
635
  #include "queue.h"
636
  #include "semphr.h"
637
#endif
638
  typedef TaskHandle_t txCondition;
639
  typedef struct {
640
#if nrf52
641
    SemaphoreHandle_t sem;
642
#else
643
    QueueHandle_t handle;
644
    StaticSemaphore_t buffer;
645
#endif
646
  } txMutex;
647
  typedef TaskHandle_t txThread;
648
  #define mxCreateCondition(CONDITION) *(CONDITION) = xTaskGetCurrentTaskHandle()
649
#if nrf52
650
  #define mxCreateMutex(MUTEX) (MUTEX)->sem = xSemaphoreCreateMutex()
651
#else
652
  #define mxCreateMutex(MUTEX) (MUTEX)->handle = xSemaphoreCreateMutexStatic(&((MUTEX)->buffer))
653
#endif
654
  #define mxCurrentThread() xTaskGetCurrentTaskHandle()
655
  #define mxDeleteCondition(CONDITION) *(CONDITION) = NULL
656
#if nrf52
657
  #define mxDeleteMutex(MUTEX) vSemaphoreDelete((MUTEX)->sem)
658
  #define mxLockMutex(MUTEX) xSemaphoreTake((MUTEX)->sem, portMAX_DELAY)
659
  #define mxUnlockMutex(MUTEX) xSemaphoreGive((MUTEX)->sem)
660
#else
661
  #define mxDeleteMutex(MUTEX) vSemaphoreDelete((MUTEX)->handle)
662
  #define mxLockMutex(MUTEX) xSemaphoreTake((MUTEX)->handle, portMAX_DELAY)
663
  #define mxUnlockMutex(MUTEX) xSemaphoreGive((MUTEX)->handle)
664
#endif
665
  #define mxWakeCondition(CONDITION) xTaskNotifyGive(*(CONDITION));
666
#elif mxWindows
667
  #define mxThreads 1
668
  typedef CONDITION_VARIABLE txCondition;
669
  typedef CRITICAL_SECTION txMutex;
670
  typedef DWORD txThread;
671
  #define mxCreateCondition(CONDITION) InitializeConditionVariable(CONDITION)
672
  #define mxCreateMutex(MUTEX) InitializeCriticalSection(MUTEX)
673
  #define mxCurrentThread() GetCurrentThreadId()
674
  #define mxDeleteCondition(CONDITION) (void)(CONDITION)
675
  #define mxDeleteMutex(MUTEX) DeleteCriticalSection(MUTEX)
676
  #define mxLockMutex(MUTEX) EnterCriticalSection(MUTEX)
677
  #define mxUnlockMutex(MUTEX) LeaveCriticalSection(MUTEX)
678
  #define mxWakeCondition(CONDITION) WakeConditionVariable(CONDITION)
679
#else
680
  #define mxThreads 0
681
  typedef void* txThread;
682
  #define mxCurrentThread() C_NULL
683
#endif
684
685
typedef struct sxSharedChunk txSharedChunk;
686
typedef struct sxSharedCluster txSharedCluster;
687
typedef struct sxSharedWaiter txSharedWaiter;
688
689
typedef void (*txTimerCallback)(void* timer, void *refcon, txInteger refconSize);
690
extern void fxRescheduleTimer(void* timer, txNumber timeout, txNumber interval);
691
extern void* fxScheduleTimer(txNumber timeout, txNumber interval, txTimerCallback callback, void* refcon, txInteger refconSize);
692
extern void fxUnscheduleTimer(void* timer);
693
694
struct sxSharedChunk {
695
#if mxThreads && !defined(mxUseGCCAtomics)
696
  txMutex mutex;
697
#endif
698
  txSize size;
699
  txSize usage;
700
};
701
702
struct sxSharedCluster {
703
  txThread mainThread;
704
  txSize usage;
705
#if mxThreads
706
  txSharedWaiter* first; 
707
  txMutex waiterMutex; 
708
#endif
709
};
710
711
struct sxSharedWaiter {
712
  txSharedWaiter* next;
713
  txMachine* the;
714
  void* data;
715
  void* condition;
716
  void* timer;
717
  txSlot resolve;
718
  txBoolean ok;
719
};
720
721
txSharedCluster* gxSharedCluster = C_NULL;
722
723
void fxInitializeSharedCluster(txMachine* the)
724
29.6k
{
725
29.6k
  if (gxSharedCluster) {
726
0
    gxSharedCluster->usage++;
727
0
  }
728
29.6k
  else {
729
29.6k
    gxSharedCluster = c_calloc(sizeof(txSharedCluster), 1);
730
29.6k
    if (gxSharedCluster) {
731
29.6k
      gxSharedCluster->mainThread = mxCurrentThread();
732
29.6k
      gxSharedCluster->usage++;
733
29.6k
    #if mxThreads
734
29.6k
      mxCreateMutex(&gxSharedCluster->waiterMutex);
735
29.6k
    #endif
736
29.6k
    #ifdef mxInitializeSharedTimers
737
29.6k
      mxInitializeSharedTimers();
738
29.6k
    #endif
739
29.6k
    }
740
29.6k
  }
741
29.6k
}
742
743
void fxTerminateSharedCluster(txMachine* the)
744
29.6k
{
745
29.6k
  if (gxSharedCluster) {
746
29.6k
  #ifdef mxUnscheduleSharedTimer
747
29.6k
    if (the) {
748
29.6k
      txSharedWaiter** address;
749
29.6k
      txSharedWaiter* waiter;
750
29.6k
      mxLockMutex(&gxSharedCluster->waiterMutex);
751
29.6k
      address = &(gxSharedCluster->first);
752
29.6k
      while ((waiter = *address)) {
753
1
        if (waiter->the == the) {
754
1
          *address = waiter->next;
755
1
          if (waiter->timer)
756
1
            mxUnscheduleSharedTimer(waiter->timer);
757
1
          c_free(waiter);         
758
1
        }
759
0
        else
760
0
          address = &(waiter->next);
761
1
      }
762
29.6k
      mxUnlockMutex(&gxSharedCluster->waiterMutex);
763
29.6k
    }
764
29.6k
  #endif
765
29.6k
    gxSharedCluster->usage--;
766
29.6k
    if (gxSharedCluster->usage == 0) {
767
29.6k
    #ifdef mxTerminateSharedTimers
768
29.6k
      mxTerminateSharedTimers();
769
29.6k
    #endif
770
29.6k
    #if mxThreads
771
29.6k
      mxDeleteMutex(&gxSharedCluster->waiterMutex);
772
29.6k
    #endif
773
29.6k
      c_free(gxSharedCluster);
774
29.6k
      gxSharedCluster = C_NULL;
775
29.6k
    }
776
29.6k
  }
777
29.6k
}
778
779
void* fxCreateSharedChunk(txInteger size)
780
21.4k
{
781
21.4k
  txSharedChunk* chunk = c_malloc(sizeof(txSharedChunk) + size);
782
21.4k
  if (chunk) {
783
21.4k
    void* data = (((txByte*)chunk) + sizeof(txSharedChunk));
784
  #if mxThreads && !defined(mxUseGCCAtomics)
785
    mxCreateMutex(&(chunk->mutex));
786
  #endif
787
21.4k
    chunk->size = size;
788
21.4k
    chunk->usage = 1;
789
21.4k
    c_memset(data, 0, size);
790
21.4k
    return data;
791
21.4k
  }
792
0
  return C_NULL;
793
21.4k
}
794
795
void fxLockSharedChunk(void* data)
796
0
{
797
#if mxThreads && !defined(mxUseGCCAtomics)
798
  txSharedChunk* chunk = (txSharedChunk*)(((txByte*)data) - sizeof(txSharedChunk));
799
    mxLockMutex(&(chunk->mutex));
800
#endif
801
0
}
802
803
txInteger fxMeasureSharedChunk(void* data)
804
0
{
805
0
  txSharedChunk* chunk = (txSharedChunk*)(((txByte*)data) - sizeof(txSharedChunk));
806
0
  return chunk->size;
807
0
}
808
809
txInteger fxNotifySharedChunk(txMachine* the, void* data, txInteger count)
810
20
{
811
20
  txInteger result = 0;
812
20
  if (gxSharedCluster) {
813
20
  #if mxThreads
814
20
    txSharedWaiter* first = C_NULL;
815
20
    txSharedWaiter* last = C_NULL;
816
20
    txSharedWaiter** address;
817
20
    txSharedWaiter* waiter;
818
20
    mxLockMutex(&gxSharedCluster->waiterMutex);
819
20
    address = &(gxSharedCluster->first);
820
20
    while ((waiter = *address)) {
821
0
      if (waiter->data == data) {
822
0
        if (count == 0)
823
0
          break;
824
0
        count--;
825
0
        if (first)
826
0
          last->next = waiter;
827
0
        else
828
0
          first = waiter;
829
0
        last = waiter;
830
0
        *address = waiter->next;
831
0
        waiter->next = C_NULL;
832
0
      }
833
0
      else
834
0
        address = &(waiter->next);
835
0
    }
836
20
    waiter = first;
837
20
    while (waiter) {
838
0
      waiter->data = C_NULL;
839
0
      if (waiter->condition) {
840
0
        mxWakeCondition((txCondition*)waiter->condition);
841
0
      }
842
0
      else {
843
0
        waiter->ok = 1;
844
0
      #ifdef mxRescheduleSharedTimer
845
0
        mxRescheduleSharedTimer(waiter->timer, 0, 0);
846
      #else
847
        fxAbort(the, XS_DEAD_STRIP_EXIT);
848
      #endif
849
0
      }
850
0
      result++;
851
0
      waiter = waiter->next;
852
0
    }
853
20
    mxUnlockMutex(&gxSharedCluster->waiterMutex);
854
20
  #endif  
855
20
  }
856
20
  return result;
857
20
}
858
859
void* fxRetainSharedChunk(void* data)
860
0
{
861
0
  txSharedChunk* chunk = (txSharedChunk*)(((txByte*)data) - sizeof(txSharedChunk));
862
0
  txS4 result = 0;
863
0
  txS4 value = 1;
864
0
  txS4* address = &(chunk->usage);
865
#ifndef mxUseGCCAtomics
866
  txBoolean lock = 1;
867
#endif
868
0
  mxAtomicsAdd();
869
0
  if (result == 0)
870
0
    return C_NULL;
871
0
  return data;
872
0
}
873
874
void fxReleaseSharedChunk(void* data)
875
21.4k
{
876
21.4k
  txSharedChunk* chunk = (txSharedChunk*)(((txByte*)data) - sizeof(txSharedChunk));
877
21.4k
  txS4 result = 0;
878
21.4k
  txS4 value = 1;
879
21.4k
  txS4* address = &(chunk->usage);
880
#ifndef mxUseGCCAtomics
881
  txBoolean lock = 1;
882
#endif
883
21.4k
  mxAtomicsSub();
884
21.4k
  if (result == 1) {
885
21.4k
    c_free(chunk);
886
21.4k
  }
887
21.4k
}
888
889
void fxUnlockSharedChunk(void* data)
890
0
{
891
#if mxThreads && !defined(mxUseGCCAtomics)
892
  txSharedChunk* chunk = (txSharedChunk*)(((txByte*)data) - sizeof(txSharedChunk));
893
    mxUnlockMutex(&(chunk->mutex));
894
#endif
895
0
}
896
897
#if mxThreads
898
void fxWaitSharedChunkCallback(void* timer, void* refcon, txInteger refconSize)
899
2
{
900
2
  txSharedWaiter** address = (txSharedWaiter**)refcon;
901
2
  txSharedWaiter* waiter = *address;
902
2
  txSharedWaiter* link;
903
2
  txMachine* the;
904
2
  mxLockMutex(&gxSharedCluster->waiterMutex);
905
2
  address = &(gxSharedCluster->first);
906
4
  while ((link = *address)) {
907
2
    if (link == waiter)
908
2
      *address = link->next;
909
0
    else
910
0
      address = &(link->next);
911
2
  }
912
2
  mxUnlockMutex(&gxSharedCluster->waiterMutex);
913
2
  the = waiter->the;
914
2
  fxBeginHost(waiter->the);
915
2
  mxTry(the) {
916
2
    mxPushUndefined();
917
2
    mxPush(waiter->resolve);
918
2
    mxCall();
919
2
    if (waiter->ok)
920
0
      mxPushStringX("ok");
921
2
    else
922
2
      mxPushStringX("timed-out");
923
2
    mxRunCount(1);
924
2
    mxPop();
925
2
  }
926
2
  mxCatch(the) {
927
0
  }
928
2
  fxForget(the, &waiter->resolve);
929
2
  fxEndHost(the);
930
2
  c_free(waiter);
931
2
}
932
#endif
933
934
txInteger fxWaitSharedChunk(txMachine* the, void* data, txNumber timeout, txSlot* resolveFunction)
935
3
{
936
3
  txInteger result = 1;
937
3
  if (gxSharedCluster) {
938
3
  #if mxThreads
939
3
    txSharedWaiter* waiter;
940
3
    txSharedWaiter** address;
941
3
    txSharedWaiter* link;
942
3
    waiter = c_calloc(1, sizeof(txSharedWaiter));
943
3
    if (!waiter)
944
0
      fxAbort(the, XS_NOT_ENOUGH_MEMORY_EXIT);
945
3
    waiter->the = the;
946
3
    waiter->data = data;
947
3
    mxLockMutex(&gxSharedCluster->waiterMutex);
948
3
    address = &(gxSharedCluster->first);
949
3
    while ((link = *address))
950
0
      address = &(link->next);
951
3
    *address = waiter;
952
    
953
3
    if (resolveFunction) {
954
3
      mxUnlockMutex(&gxSharedCluster->waiterMutex);
955
3
      waiter->resolve = *resolveFunction;
956
3
      fxRemember(the, &waiter->resolve);
957
3
    #ifdef mxScheduleSharedTimer
958
3
      waiter->timer = mxScheduleSharedTimer(timeout, 0, (txSharedTimerCallback)fxWaitSharedChunkCallback, &waiter, sizeof(txSharedWaiter*));
959
3
      if (!waiter->timer)
960
0
        fxAbort(the, XS_NOT_ENOUGH_MEMORY_EXIT);
961
    #else
962
      fxAbort(the, XS_DEAD_STRIP_EXIT);
963
    #endif
964
3
    }
965
0
    else if (gxSharedCluster->mainThread != mxCurrentThread()) {
966
0
      txCondition condition;
967
0
      mxCreateCondition(&condition);
968
0
      waiter->condition = &condition;
969
0
      if (timeout == C_INFINITY) {
970
0
      #if defined(mxUsePOSIXThreads)
971
0
        while (waiter->data == data)
972
0
          pthread_cond_wait(&condition, &gxSharedCluster->waiterMutex);
973
      #elif defined(mxUseFreeRTOSTasks)
974
        mxUnlockMutex(&gxSharedCluster->waiterMutex);
975
        ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
976
        mxLockMutex(&gxSharedCluster->waiterMutex);
977
      #else
978
        while (waiter->data == data)
979
          SleepConditionVariableCS(&condition, &gxSharedCluster->waiterMutex, INFINITE);
980
      #endif
981
0
      }
982
0
      else {
983
0
      #if defined(mxUsePOSIXThreads)
984
0
        struct timespec ts;
985
0
        timeout += fxDateNow();
986
0
        ts.tv_sec = c_floor(timeout / 1000);
987
0
        ts.tv_nsec = c_fmod(timeout, 1000) * 1000000;
988
0
        while (waiter->data == data) {
989
0
          result = (pthread_cond_timedwait(&condition, &gxSharedCluster->waiterMutex, &ts) == ETIMEDOUT) ? 0 : 1;
990
0
          if (!result)
991
0
            break;
992
0
        }
993
      #elif defined(mxUseFreeRTOSTasks)
994
        mxUnlockMutex(&gxSharedCluster->waiterMutex);
995
        ulTaskNotifyTake(pdTRUE, pdMS_TO_TICKS(timeout));
996
        mxLockMutex(&gxSharedCluster->waiterMutex);
997
        result = (waiter->data == data) ? 0 : 1;
998
      #else
999
        timeout += fxDateNow();
1000
        while (waiter->data == data) {
1001
          result = (SleepConditionVariableCS(&condition, &gxSharedCluster->waiterMutex, (DWORD)(timeout - fxDateNow()))) ? 1 : 0;
1002
          if (!result)
1003
            break;
1004
        }
1005
      #endif
1006
0
      }
1007
0
      address = &(gxSharedCluster->first);
1008
0
      while ((link = *address)) {
1009
0
        if (link == waiter)
1010
0
          *address = link->next;
1011
0
        else
1012
0
          address = &(link->next);
1013
0
      }
1014
0
      mxUnlockMutex(&gxSharedCluster->waiterMutex);
1015
0
      c_free(waiter);
1016
0
      mxDeleteCondition(&condition);
1017
0
    }
1018
0
    else {
1019
0
      mxTypeError("main thread cannot wait");
1020
0
    }
1021
3
  #endif  
1022
3
  }
1023
0
  else {
1024
0
    mxTypeError("no shared cluster");
1025
0
  }
1026
3
  return result;
1027
3
}
1028
1029
#endif /* mxUseDefaultSharedChunks */
1030
1031