Coverage Report

Created: 2026-01-10 06:30

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/moddable/xs/sources/xsAtomics.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2016-2017  Moddable Tech, Inc.
3
 *
4
 *   This file is part of the Moddable SDK Runtime.
5
 * 
6
 *   The Moddable SDK Runtime is free software: you can redistribute it and/or modify
7
 *   it under the terms of the GNU Lesser General Public License as published by
8
 *   the Free Software Foundation, either version 3 of the License, or
9
 *   (at your option) any later version.
10
 * 
11
 *   The Moddable SDK Runtime is distributed in the hope that it will be useful,
12
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 *   GNU Lesser General Public License for more details.
15
 * 
16
 *   You should have received a copy of the GNU Lesser General Public License
17
 *   along with the Moddable SDK Runtime.  If not, see <http://www.gnu.org/licenses/>.
18
 *
19
 */
20
21
#include "xsAll.h"
22
23
static txInteger fxCheckAtomicsIndex(txMachine* the, txInteger index, txInteger length);
24
static txSlot* fxCheckAtomicsTypedArray(txMachine* the, txBoolean onlyInt32);
25
static txSlot* fxCheckAtomicsArrayBuffer(txMachine* the, txSlot* slot, txBoolean onlyShared);
26
static void* fxCheckAtomicsArrayBufferDetached(txMachine* the, txSlot* slot, txBoolean mutable);
27
static txSlot* fxCheckSharedArrayBuffer(txMachine* the, txSlot* slot, txString which);
28
static void fxPushAtomicsValue(txMachine* the, int i, txID id);
29
30
#define mxAtomicsHead0(TYPE,TO) \
31
4
  TYPE result = 0; \
32
4
  txBoolean lock = host->kind == XS_HOST_KIND; \
33
4
  void* data = (lock) ? host->value.host.data : fxCheckAtomicsArrayBufferDetached(the, host, XS_IMMUTABLE); \
34
4
  TYPE* address = (TYPE*)(((txByte*)data) + offset)
35
36
#define mxAtomicsHead1(TYPE,TO) \
37
11
  TYPE result = 0; \
38
11
  TYPE value = (TYPE)TO(the, slot); \
39
11
  txBoolean lock = host->kind == XS_HOST_KIND; \
40
11
  void* data = (lock) ? host->value.host.data : fxCheckAtomicsArrayBufferDetached(the, host, XS_MUTABLE); \
41
11
  TYPE* address = (TYPE*)(((txByte*)data) + offset)
42
43
#define mxAtomicsHead2(TYPE,TO) \
44
1
  TYPE result = (TYPE)TO(the, slot + 1); \
45
1
  TYPE value = (TYPE)TO(the, slot); \
46
1
  txBoolean lock = host->kind == XS_HOST_KIND; \
47
1
  void* data = (lock) ? host->value.host.data : fxCheckAtomicsArrayBufferDetached(the, host, XS_MUTABLE); \
48
1
  TYPE* address = (TYPE*)(((txByte*)data) + offset)
49
50
#ifdef mxUseGCCAtomics
51
1
  #define mxAtomicsCompareExchange() __atomic_compare_exchange(address, &result, &value, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
52
6
  #define mxAtomicsLoad() __atomic_load(address, &result, __ATOMIC_SEQ_CST)
53
1
  #define mxAtomicsAdd() result = __atomic_fetch_add(address, value, __ATOMIC_SEQ_CST)
54
1
  #define mxAtomicsAnd() result = __atomic_fetch_and(address, value, __ATOMIC_SEQ_CST)
55
1
  #define mxAtomicsExchange() __atomic_exchange(address, &value, &result, __ATOMIC_SEQ_CST)
56
1
  #define mxAtomicsOr() result = __atomic_fetch_or(address, value, __ATOMIC_SEQ_CST)
57
3
  #define mxAtomicsStore() __atomic_store(address, &value, __ATOMIC_SEQ_CST)
58
3.71k
  #define mxAtomicsSub() result = __atomic_fetch_sub(address, value, __ATOMIC_SEQ_CST)
59
1
  #define mxAtomicsXor() result = __atomic_fetch_xor(address, value, __ATOMIC_SEQ_CST)
60
#else
61
  #define mxAtomicsCompareExchange() if (lock) fxLockSharedChunk(data); if (*address == result) *address = value; else result = *address; if (lock) fxUnlockSharedChunk(data)
62
  #define mxAtomicsLoad() if (lock) fxLockSharedChunk(data); result = *address;  if (lock) fxUnlockSharedChunk(data)
63
  #define mxAtomicsAdd() if (lock) fxLockSharedChunk(data); result = *address; *address = result + value; if (lock) fxUnlockSharedChunk(data)
64
  #define mxAtomicsAnd() if (lock) fxLockSharedChunk(data); result = *address; *address = result & value; if (lock) fxUnlockSharedChunk(data)
65
  #define mxAtomicsExchange() if (lock) fxLockSharedChunk(data); result = *address; *address = value; if (lock) fxUnlockSharedChunk(data)
66
  #define mxAtomicsOr() if (lock) fxLockSharedChunk(data); result = *address; *address = result | value; if (lock) fxUnlockSharedChunk(data)
67
  #define mxAtomicsStore() if (lock) fxLockSharedChunk(data); *address = value; if (lock) fxUnlockSharedChunk(data)
68
  #define mxAtomicsSub() if (lock) fxLockSharedChunk(data); result = *address; *address = result - value; if (lock) fxUnlockSharedChunk(data)
69
  #define mxAtomicsXor() if (lock) fxLockSharedChunk(data); result = *address; *address = result ^ value; if (lock) fxUnlockSharedChunk(data)
70
#endif  
71
72
#define mxAtomicsTail() \
73
14
  slot->kind = XS_INTEGER_KIND; \
74
14
  slot->value.integer = result
75
76
#define mxAtomicsTailBigInt64() \
77
0
  fxFromBigInt64(the, slot, result)
78
79
#define mxAtomicsTailBigUint64() \
80
0
  fxFromBigUint64(the, slot, result)
81
  
82
#define mxAtomicsTailOverflow() \
83
0
  if (result <= 0x7FFFFFFF) { \
84
0
    slot->kind = XS_INTEGER_KIND; \
85
0
    slot->value.integer = result; \
86
0
  } \
87
0
  else { \
88
0
    slot->kind = XS_NUMBER_KIND; \
89
0
    slot->value.number = result; \
90
0
  }
91
  
92
#define mxAtomicsTailWait() \
93
2
  return (result != value) ? -1 : (timeout == 0) ? 0 : 1;
94
95
#define mxAtomicsDeclarations(onlyInt32, onlyShared) \
96
113
  txSlot* dispatch = fxCheckAtomicsTypedArray(the, onlyInt32); \
97
113
  txSlot* view = dispatch->next; \
98
113
  txSlot* buffer = view->next; \
99
113
  txSlot* host = fxCheckAtomicsArrayBuffer(the, buffer, onlyShared); \
100
113
  txU2 shift = dispatch->value.typedArray.dispatch->shift; \
101
113
  txInteger index = fxCheckAtomicsIndex(the, 1, fxGetDataViewSize(the, view, buffer) >> shift); \
102
113
  txInteger offset = view->value.dataView.offset + (index << shift)
103
104
0
void fxInt8Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsAdd(); mxAtomicsTail(); }
105
0
void fxInt16Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsAdd(); mxAtomicsTail(); }
106
1
void fxInt32Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsAdd(); mxAtomicsTail(); }
107
0
void fxInt64Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsAdd(); mxAtomicsTailBigInt64(); }
108
0
void fxUint8Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsAdd(); mxAtomicsTail(); }
109
0
void fxUint16Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsAdd(); mxAtomicsTail(); }
110
0
void fxUint32Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsAdd(); mxAtomicsTailOverflow(); }
111
0
void fxUint64Add(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsAdd(); mxAtomicsTailBigUint64(); }
112
113
0
void fxInt8And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsAnd(); mxAtomicsTail(); }
114
0
void fxInt16And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsAnd(); mxAtomicsTail(); }
115
1
void fxInt32And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsAnd(); mxAtomicsTail(); }
116
0
void fxInt64And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsAnd(); mxAtomicsTailBigInt64(); }
117
0
void fxUint8And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsAnd(); mxAtomicsTail(); }
118
0
void fxUint16And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsAnd(); mxAtomicsTail(); }
119
0
void fxUint32And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsAnd(); mxAtomicsTailOverflow(); }
120
0
void fxUint64And(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsAnd(); mxAtomicsTailBigUint64(); }
121
122
0
void fxInt8CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txS1, fxToInteger); mxAtomicsCompareExchange(); mxAtomicsTail(); }
123
0
void fxInt16CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txS2, fxToInteger); mxAtomicsCompareExchange(); mxAtomicsTail(); }
124
1
void fxInt32CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txS4, fxToInteger); mxAtomicsCompareExchange(); mxAtomicsTail(); }
125
0
void fxInt64CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txS8, fxToBigInt64); mxAtomicsCompareExchange(); mxAtomicsTailBigInt64(); }
126
0
void fxUint8CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txU1, fxToUnsigned); mxAtomicsCompareExchange(); mxAtomicsTail(); }
127
0
void fxUint16CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txU2, fxToUnsigned); mxAtomicsCompareExchange(); mxAtomicsTail(); }
128
0
void fxUint32CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txU4, fxToUnsigned); mxAtomicsCompareExchange(); mxAtomicsTailOverflow(); }
129
0
void fxUint64CompareExchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead2(txU8, fxToBigUint64); mxAtomicsCompareExchange(); mxAtomicsTailBigUint64(); }
130
131
0
void fxInt8Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsExchange(); mxAtomicsTail(); }
132
0
void fxInt16Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsExchange(); mxAtomicsTail(); }
133
1
void fxInt32Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsExchange(); mxAtomicsTail(); }
134
0
void fxInt64Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsExchange(); mxAtomicsTailBigInt64(); }
135
0
void fxUint8Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsExchange(); mxAtomicsTail(); }
136
0
void fxUint16Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsExchange(); mxAtomicsTail(); }
137
0
void fxUint32Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsExchange(); mxAtomicsTailOverflow(); }
138
0
void fxUint64Exchange(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsExchange(); mxAtomicsTailBigUint64(); }
139
140
0
void fxInt8Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txS1, fxToInteger); mxAtomicsLoad(); mxAtomicsTail(); }
141
0
void fxInt16Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txS2, fxToInteger); mxAtomicsLoad(); mxAtomicsTail(); }
142
4
void fxInt32Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txS4, fxToInteger); mxAtomicsLoad(); mxAtomicsTail(); }
143
0
void fxInt64Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txS8, fxToBigInt64); mxAtomicsLoad(); mxAtomicsTailBigInt64(); }
144
0
void fxUint8Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txU1, fxToUnsigned); mxAtomicsLoad(); mxAtomicsTail(); }
145
0
void fxUint16Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txU2, fxToUnsigned); mxAtomicsLoad(); mxAtomicsTail(); }
146
0
void fxUint32Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txU4, fxToUnsigned); mxAtomicsLoad(); mxAtomicsTailOverflow(); }
147
0
void fxUint64Load(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead0(txU8, fxToBigUint64); mxAtomicsLoad(); mxAtomicsTailBigUint64(); }
148
149
0
void fxInt8Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsOr(); mxAtomicsTail(); }
150
0
void fxInt16Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsOr(); mxAtomicsTail(); }
151
1
void fxInt32Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsOr(); mxAtomicsTail(); }
152
0
void fxInt64Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsOr(); mxAtomicsTailBigInt64(); }
153
0
void fxUint8Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsOr(); mxAtomicsTail(); }
154
0
void fxUint16Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsOr(); mxAtomicsTail(); }
155
0
void fxUint32Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsOr(); mxAtomicsTailOverflow(); }
156
0
void fxUint64Or(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsOr(); mxAtomicsTailBigUint64(); }
157
158
0
void fxInt8Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsStore(); mxAtomicsTail(); }
159
0
void fxInt16Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsStore(); mxAtomicsTail(); }
160
3
void fxInt32Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsStore(); mxAtomicsTail(); }
161
0
void fxInt64Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsStore(); mxAtomicsTailBigInt64(); }
162
0
void fxUint8Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsStore(); mxAtomicsTail(); }
163
0
void fxUint16Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsStore(); mxAtomicsTail(); }
164
0
void fxUint32Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsStore(); mxAtomicsTailOverflow(); }
165
0
void fxUint64Store(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsStore(); mxAtomicsTailBigUint64(); }
166
167
0
void fxInt8Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsSub(); mxAtomicsTail(); }
168
0
void fxInt16Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsSub(); mxAtomicsTail(); }
169
1
void fxInt32Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsSub(); mxAtomicsTail(); }
170
0
void fxInt64Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsSub(); mxAtomicsTailBigInt64(); }
171
0
void fxUint8Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsSub(); mxAtomicsTail(); }
172
0
void fxUint16Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsSub(); mxAtomicsTail(); }
173
0
void fxUint32Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsSub(); mxAtomicsTailOverflow(); }
174
0
void fxUint64Sub(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsSub(); mxAtomicsTailBigUint64(); }
175
176
0
void fxInt8Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS1, fxToInteger); mxAtomicsXor(); mxAtomicsTail(); }
177
0
void fxInt16Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS2, fxToInteger); mxAtomicsXor(); mxAtomicsTail(); }
178
1
void fxInt32Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsXor(); mxAtomicsTail(); }
179
0
void fxInt64Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsXor(); mxAtomicsTailBigInt64(); }
180
0
void fxUint8Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU1, fxToUnsigned); mxAtomicsXor(); mxAtomicsTail(); }
181
0
void fxUint16Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU2, fxToUnsigned); mxAtomicsXor(); mxAtomicsTail(); }
182
0
void fxUint32Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU4, fxToUnsigned); mxAtomicsXor(); mxAtomicsTailOverflow(); }
183
0
void fxUint64Xor(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, int endian) { mxAtomicsHead1(txU8, fxToBigUint64); mxAtomicsXor(); mxAtomicsTailBigUint64(); }
184
185
0
txInteger fxInt32Wait(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, txNumber timeout) { mxAtomicsHead1(txS4, fxToInteger); mxAtomicsLoad(); mxAtomicsTailWait(); }
186
2
txInteger fxInt64Wait(txMachine* the, txSlot* host, txInteger offset, txSlot* slot, txNumber timeout) { mxAtomicsHead1(txS8, fxToBigInt64); mxAtomicsLoad(); mxAtomicsTailWait(); }
187
188
void fxBuildAtomics(txMachine* the)
189
16.3k
{
190
16.3k
  txSlot* slot;
191
  
192
16.3k
  mxPush(mxObjectPrototype);
193
16.3k
  slot = fxLastProperty(the, fxNewObjectInstance(the));
194
16.3k
  slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_SharedArrayBuffer_prototype_get_byteLength), C_NULL, mxID(_byteLength), XS_DONT_ENUM_FLAG);
195
16.3k
  slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_SharedArrayBuffer_prototype_get_growable), C_NULL, mxID(_growable), XS_DONT_ENUM_FLAG);
196
16.3k
  slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_SharedArrayBuffer_prototype_get_maxByteLength), C_NULL, mxID(_maxByteLength), XS_DONT_ENUM_FLAG);
197
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_SharedArrayBuffer_prototype_grow), 1, mxID(_grow), XS_DONT_ENUM_FLAG);
198
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_SharedArrayBuffer_prototype_slice), 2, mxID(_slice), XS_DONT_ENUM_FLAG);
199
16.3k
  slot = fxNextStringXProperty(the, slot, "SharedArrayBuffer", mxID(_Symbol_toStringTag), XS_DONT_ENUM_FLAG | XS_DONT_SET_FLAG);
200
16.3k
  mxSharedArrayBufferPrototype = *the->stack;
201
16.3k
  slot = fxBuildHostConstructor(the, mxCallback(fx_SharedArrayBuffer), 1, mxID(_SharedArrayBuffer));
202
16.3k
  mxSharedArrayBufferConstructor = *the->stack;
203
16.3k
  slot = fxLastProperty(the, slot);
204
16.3k
  slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_species_get), C_NULL, mxID(_Symbol_species), XS_DONT_ENUM_FLAG);
205
16.3k
  mxPop();
206
  
207
16.3k
  mxPush(mxObjectPrototype);
208
16.3k
  slot = fxLastProperty(the, fxNewObjectInstance(the));
209
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_add), 3, mxID(_add), XS_DONT_ENUM_FLAG);
210
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_and), 3, mxID(_and), XS_DONT_ENUM_FLAG);
211
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_compareExchange), 4, mxID(_compareExchange), XS_DONT_ENUM_FLAG);
212
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_exchange), 3, mxID(_exchange), XS_DONT_ENUM_FLAG);
213
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_isLockFree), 1, mxID(_isLockFree), XS_DONT_ENUM_FLAG);
214
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_load), 2, mxID(_load), XS_DONT_ENUM_FLAG);
215
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_or), 3, mxID(_or), XS_DONT_ENUM_FLAG);
216
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_notify), 3, mxID(_notify), XS_DONT_ENUM_FLAG);
217
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_store), 3, mxID(_store), XS_DONT_ENUM_FLAG);
218
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_sub), 3, mxID(_sub), XS_DONT_ENUM_FLAG);
219
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_wait), 4, mxID(_wait), XS_DONT_ENUM_FLAG);
220
16.3k
#if mxECMAScript2024
221
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_waitAsync), 4, mxID(_waitAsync), XS_DONT_ENUM_FLAG);
222
16.3k
#endif
223
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_notify), 3, mxID(_wake), XS_DONT_ENUM_FLAG);
224
16.3k
  slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_Atomics_xor), 3, mxID(_xor), XS_DONT_ENUM_FLAG);
225
16.3k
  slot = fxNextStringXProperty(the, slot, "Atomics", mxID(_Symbol_toStringTag), XS_DONT_ENUM_FLAG | XS_DONT_SET_FLAG);
226
16.3k
  mxPull(mxAtomicsObject);
227
16.3k
}
228
229
txSlot* fxCheckAtomicsArrayBuffer(txMachine* the, txSlot* slot, txBoolean onlyShared)
230
71
{
231
71
  if ((!slot) || (!mxIsReference(slot)))
232
0
    mxTypeError("typedArray.buffer: not an object");
233
71
  slot = slot->value.reference->next;
234
71
  if (slot && (slot->kind == XS_HOST_KIND) && (slot->value.host.variant.destructor == fxReleaseSharedChunk))
235
59
    return slot;
236
12
  if (onlyShared)
237
6
    mxTypeError("typedArray.buffer: not a SharedArrayBuffer instance");
238
6
  if (slot && (slot->flag & XS_INTERNAL_FLAG) && (slot->kind == XS_ARRAY_BUFFER_KIND)) {
239
6
    if (slot->value.arrayBuffer.address == C_NULL)
240
0
      mxTypeError("typedArray.buffer: detached");
241
6
    return slot;
242
6
  }
243
6
  mxTypeError("typedArray.buffer: not a SharedArrayBuffer instance, not an ArrayBuffer instance");
244
0
  return C_NULL;
245
6
}
246
247
void* fxCheckAtomicsArrayBufferDetached(txMachine* the, txSlot* slot, txBoolean mutable)
248
1
{
249
1
  if (slot->value.arrayBuffer.address == C_NULL)
250
0
    mxTypeError("typedArray.buffer: detached");
251
1
  if (mutable && (slot->flag & XS_DONT_SET_FLAG))
252
0
    mxTypeError("typedArray.buffer: read-only");
253
1
  return slot->value.arrayBuffer.address;
254
1
}
255
256
txInteger fxCheckAtomicsIndex(txMachine* the, txInteger i, txInteger length)
257
65
{
258
65
  txSlot *slot = (mxArgc > i) ? mxArgv(i) : C_NULL;
259
65
  if (slot && (XS_INTEGER_KIND == slot->kind)) {
260
50
    int index = slot->value.integer;
261
50
    if ((0 <= index) && (index < length))
262
37
      return index;
263
50
  }
264
265
28
  txNumber index = slot ? c_trunc(fxToNumber(the, slot)) : C_NAN; 
266
28
  if (c_isnan(index))
267
7
    index = 0;
268
28
  if (index < 0)
269
8
    mxRangeError("invalid index");
270
20
  else if (index >= length)
271
14
    mxRangeError("invalid index");
272
6
  return (txInteger)index;
273
28
}
274
275
txSlot* fxCheckAtomicsTypedArray(txMachine* the, txBoolean onlyInt32)
276
113
{
277
113
  txSlot* slot = (mxArgc > 0) ? mxArgv(0) : C_NULL;
278
113
  txID id;
279
113
  if ((!slot) || (!mxIsReference(slot)))
280
11
    mxTypeError("typedArray: not an object");
281
102
  slot = slot->value.reference->next;
282
102
  if ((!slot) || ((slot->kind != XS_TYPED_ARRAY_KIND)))
283
1
    mxTypeError("typedArray: not a TypedArray instance");
284
101
  id = slot->value.typedArray.dispatch->constructorID;
285
101
  if (onlyInt32) {
286
69
    if ((id != _Int32Array) && (id != _BigInt64Array))
287
18
      mxTypeError("typedArray: not an Int32Array instance");
288
69
  }
289
32
  else {
290
32
    if (id == _Float32Array)
291
2
      mxTypeError("typedArray: Float32Array instance");
292
30
    else if (id == _Float64Array)
293
0
      mxTypeError("typedArray: Float64Array instance");
294
30
    else if (id == _Uint8ClampedArray)
295
10
      mxTypeError("typedArray: Uint8ClampedArray instance");
296
20
  #if mxFloat16
297
20
    else if (id == _Float16Array)
298
0
      mxTypeError("typedArray: Float16Array instance");
299
32
  #endif
300
32
  }
301
71
  return slot;
302
101
}
303
304
txSlot* fxCheckSharedArrayBuffer(txMachine* the, txSlot* slot, txString which)
305
55.2k
{
306
55.2k
  if ((!slot) || (!mxIsReference(slot)))
307
64
    mxTypeError("%s: not an object", which);
308
55.1k
  slot = slot->value.reference->next;
309
55.1k
  if ((!slot) || (slot->kind != XS_HOST_KIND) || (slot->value.host.variant.destructor != fxReleaseSharedChunk))
310
54.5k
    mxTypeError("%s: not a SharedArrayBuffer instance", which);
311
641
  return slot;
312
55.1k
}
313
314
void fxPushAtomicsValue(txMachine* the, int i, txID id)
315
16
{
316
16
  txSlot* slot;
317
16
  if (mxArgc > i)
318
15
    mxPushSlot(mxArgv(i));
319
1
  else
320
1
    mxPushUndefined();
321
16
  slot = the->stack;
322
16
  if ((id == _BigInt64Array) || (id == _BigUint64Array))
323
5
    fxBigIntCoerce(the, slot);
324
11
  else if (XS_INTEGER_KIND != slot->kind) {
325
3
    txNumber value;
326
3
    fxNumberCoerce(the, slot);
327
3
    value = c_trunc(slot->value.number); 
328
3
    if (c_isnan(value) || (value == -0))
329
3
      value = 0;
330
3
    slot->value.number = value;
331
3
  }
332
16
}
333
334
335
void fx_SharedArrayBuffer(txMachine* the)
336
3.72k
{
337
3.72k
  txSlot* instance;
338
3.72k
  txS8 byteLength;
339
3.72k
  txS8 maxByteLength = -1;
340
3.72k
  txSlot* property;
341
3.72k
  if (mxIsUndefined(mxTarget))
342
2
    mxTypeError("call: SharedArrayBuffer");
343
3.72k
  byteLength = fxArgToSafeByteLength(the, 0, 0);
344
3.72k
  if ((mxArgc > 1) && mxIsReference(mxArgv(1))) {
345
33
    mxPushSlot(mxArgv(1));
346
33
    mxGetID(mxID(_maxByteLength));
347
33
    mxPullSlot(mxArgv(1));
348
33
    maxByteLength = fxArgToSafeByteLength(the, 1, -1);
349
33
  }
350
3.72k
  if (maxByteLength >= 0) {
351
26
    if (byteLength > maxByteLength)
352
1
      mxRangeError("byteLength > maxByteLength");
353
26
  }
354
3.72k
  mxPushSlot(mxTarget);
355
3.72k
  fxGetPrototypeFromConstructor(the, &mxSharedArrayBufferPrototype);
356
3.72k
  instance = fxNewSlot(the);
357
3.72k
  instance->kind = XS_INSTANCE_KIND;
358
3.72k
  instance->value.instance.garbage = C_NULL;
359
3.72k
  instance->value.instance.prototype = the->stack->value.reference;
360
3.72k
  the->stack->value.reference = instance;
361
3.72k
  the->stack->kind = XS_REFERENCE_KIND;
362
3.72k
  if (byteLength > 0x7FFFFFFF)
363
3
    mxRangeError("byteLength too big");
364
3.71k
  if (maxByteLength > 0x7FFFFFFF)
365
5
    mxRangeError("maxByteLength too big");
366
3.71k
  property = instance->next = fxNewSlot(the);
367
3.71k
  property->flag = XS_INTERNAL_FLAG;
368
3.71k
  property->kind = XS_HOST_KIND;
369
3.71k
  property->value.host.data = fxCreateSharedChunk((txInteger)byteLength);
370
3.71k
  if (!property->value.host.data) {
371
0
    property->value.host.variant.destructor = NULL;
372
0
    mxRangeError("cannot allocate SharedArrayBuffer insatnce");
373
0
  }
374
3.71k
  property->value.host.variant.destructor = fxReleaseSharedChunk;
375
3.71k
  property = property->next = fxNewSlot(the);
376
3.71k
  property->flag = XS_INTERNAL_FLAG;
377
3.71k
  property->kind = XS_BUFFER_INFO_KIND;
378
3.71k
  property->value.bufferInfo.length = (txInteger)byteLength;
379
3.71k
  property->value.bufferInfo.maxLength = (txInteger)maxByteLength;
380
3.71k
  mxPullSlot(mxResult);
381
3.71k
}
382
383
void fx_SharedArrayBuffer_prototype_get_byteLength(txMachine* the)
384
76
{
385
76
  txSlot* host = fxCheckSharedArrayBuffer(the, mxThis, "this");
386
76
  txSlot* bufferInfo = host->next; 
387
76
  mxResult->kind = XS_INTEGER_KIND;
388
76
  mxResult->value.integer = bufferInfo->value.bufferInfo.length;
389
76
}
390
391
void fx_SharedArrayBuffer_prototype_get_growable(txMachine* the)
392
481
{
393
481
  txSlot* host = fxCheckSharedArrayBuffer(the, mxThis, "this");
394
481
  txSlot* bufferInfo = host->next;
395
481
  mxResult->kind = XS_BOOLEAN_KIND;
396
481
  mxResult->value.boolean = (bufferInfo->value.bufferInfo.maxLength >= 0) ? 1 : 0;
397
481
}
398
399
void fx_SharedArrayBuffer_prototype_get_maxByteLength(txMachine* the)
400
54.5k
{
401
54.5k
  txSlot* host = fxCheckSharedArrayBuffer(the, mxThis, "this");
402
54.5k
  txSlot* bufferInfo = host->next; 
403
54.5k
  mxResult->kind = XS_INTEGER_KIND;
404
54.5k
  if (bufferInfo->value.bufferInfo.maxLength >= 0)
405
9
    mxResult->value.integer = bufferInfo->value.bufferInfo.maxLength;
406
54.5k
  else
407
54.5k
    mxResult->value.integer = bufferInfo->value.bufferInfo.length;
408
54.5k
}
409
410
void fx_SharedArrayBuffer_prototype_grow(txMachine* the)
411
56
{
412
56
  txSlot* host = fxCheckSharedArrayBuffer(the, mxThis, "this");
413
56
  txSlot* bufferInfo = host->next; 
414
56
  txInteger maxByteLength, oldByteLength, newByteLength;
415
56
  maxByteLength = bufferInfo->value.bufferInfo.maxLength;
416
56
  if (maxByteLength < 0)
417
19
    mxTypeError("this: not resizable");
418
37
  oldByteLength = bufferInfo->value.bufferInfo.length;
419
37
  newByteLength = fxArgToByteLength(the, 0, 0);
420
37
  if (newByteLength < oldByteLength)
421
1
    mxRangeError("newLength < byteLength");
422
36
  if (newByteLength > maxByteLength)
423
3
    mxRangeError("newLength > maxByteLength");
424
36
  mxRangeError("cannot grow SharedArrayBuffer insatnce");
425
36
}
426
427
void fx_SharedArrayBuffer_prototype_slice(txMachine* the)
428
60
{
429
60
  txSlot* host = fxCheckSharedArrayBuffer(the, mxThis, "this");
430
60
  txSlot* bufferInfo = host->next; 
431
60
  txInteger length = bufferInfo->value.bufferInfo.length;
432
60
  txInteger start = fxArgToIndexInteger(the, 0, 0, length);
433
60
  txInteger stop = fxArgToIndexInteger(the, 1, length, length);
434
60
  txSlot* result;
435
60
  if (stop < start) 
436
17
    stop = start;
437
60
  length = stop - start;
438
60
  mxPushSlot(mxThis);
439
60
  mxGetID(mxID(_constructor));
440
60
  fxToSpeciesConstructor(the, &mxSharedArrayBufferConstructor);
441
60
  mxNew();
442
60
  mxPushInteger(length);
443
60
  mxRunCount(1);
444
60
  mxPullSlot(mxResult);
445
60
  result = fxCheckSharedArrayBuffer(the, mxResult, "result");
446
60
  if (result == host)
447
1
    mxTypeError("result: same SharedArrayBuffer instance");
448
59
  bufferInfo = result->next; 
449
59
  if (bufferInfo->value.bufferInfo.length < length)
450
1
    mxTypeError("result: smaller SharedArrayBuffer instance");
451
58
  c_memcpy(result->value.host.data, ((txByte*)host->value.host.data + start), stop - start);
452
58
}
453
454
void fx_Atomics_add(txMachine* the)
455
3
{
456
3
  mxAtomicsDeclarations(0, 0);
457
3
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
458
3
  (*dispatch->value.typedArray.atomics->add)(the, host, offset, the->stack, 0);
459
3
  mxPullSlot(mxResult);
460
3
}
461
462
void fx_Atomics_and(txMachine* the)
463
1
{
464
1
  mxAtomicsDeclarations(0, 0);
465
1
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
466
1
  (*dispatch->value.typedArray.atomics->and)(the, host, offset, the->stack, 0);
467
1
  mxPullSlot(mxResult);
468
1
}
469
470
void fx_Atomics_compareExchange(txMachine* the)
471
2
{
472
2
  mxAtomicsDeclarations(0, 0);
473
2
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
474
2
  fxPushAtomicsValue(the, 3, dispatch->value.typedArray.dispatch->constructorID);
475
2
  (*dispatch->value.typedArray.atomics->compareExchange)(the, host, offset, the->stack, 0);
476
2
  mxPullSlot(mxResult);
477
2
  mxPop();
478
2
}
479
480
void fx_Atomics_exchange(txMachine* the)
481
2
{
482
2
  mxAtomicsDeclarations(0, 0);
483
2
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
484
2
  (*dispatch->value.typedArray.atomics->exchange)(the, host, offset, the->stack, 0);
485
2
  mxPullSlot(mxResult);
486
2
}
487
488
void fx_Atomics_isLockFree(txMachine* the)
489
63
{
490
63
  txInteger size = (mxArgc > 0) ? fxToInteger(the, mxArgv(0)) : 0;
491
63
  mxResult->value.boolean = (size == 4) ? 1 : 0;
492
63
  mxResult->kind = XS_BOOLEAN_KIND;
493
63
}
494
495
void fx_Atomics_load(txMachine* the)
496
6
{
497
6
  mxAtomicsDeclarations(0, 0);
498
6
  (*dispatch->value.typedArray.atomics->load)(the, host, offset, mxResult, 0);
499
6
}
500
501
void fx_Atomics_or(txMachine* the)
502
1
{
503
1
  mxAtomicsDeclarations(0, 0);
504
1
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
505
1
  (*dispatch->value.typedArray.atomics->or)(the, host, offset, the->stack, 0);
506
1
  mxPullSlot(mxResult);
507
1
}
508
509
void fx_Atomics_notify(txMachine* the)
510
59
{
511
59
  mxAtomicsDeclarations(1, 0);
512
59
  txInteger count = ((mxArgc > 2) && !mxIsUndefined(mxArgv(2))) ? fxToInteger(the, mxArgv(2)) : 20;
513
59
  if (count < 0)
514
3
    count = 0;
515
59
  if (host->kind == XS_ARRAY_BUFFER_KIND) {
516
4
    mxResult->value.integer = 0;
517
4
  }
518
55
  else {
519
55
    mxResult->value.integer = fxNotifySharedChunk(the, (txByte*)host->value.host.data + offset, count);
520
55
  }
521
59
  mxResult->kind = XS_INTEGER_KIND;
522
59
}
523
524
void fx_Atomics_store(txMachine* the)
525
15
{
526
15
  mxAtomicsDeclarations(0, 0);
527
15
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
528
15
  *mxResult = *the->stack;
529
15
  (*dispatch->value.typedArray.atomics->store)(the, host, offset, the->stack, 0);
530
15
  mxPop();
531
15
}
532
533
void fx_Atomics_sub(txMachine* the)
534
2
{
535
2
  mxAtomicsDeclarations(0, 0);
536
2
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
537
2
  (*dispatch->value.typedArray.atomics->sub)(the, host, offset, the->stack, 0);
538
2
  mxPullSlot(mxResult);
539
2
}
540
541
void fx_Atomics_wait(txMachine* the)
542
17
{
543
17
  mxAtomicsDeclarations(1, 1);
544
17
  txNumber timeout;
545
17
  txInteger result;
546
17
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
547
17
  timeout = (mxArgc > 3) ? fxToNumber(the, mxArgv(3)) : C_NAN;
548
17
  if (c_isnan(timeout))
549
0
    timeout = C_INFINITY;
550
17
  else if (timeout < 0)
551
0
    timeout = 0;
552
17
  result = (*dispatch->value.typedArray.atomics->wait)(the, host, offset, the->stack, timeout);
553
17
  if (result < 0)
554
0
    mxPushStringX("not-equal");
555
17
  else {
556
17
    result = fxWaitSharedChunk(the, (txByte*)host->value.host.data + offset, timeout, C_NULL);
557
17
    if (result == 0)
558
0
      mxPushStringX("timed-out");
559
17
    else
560
17
      mxPushStringX("ok");
561
17
  }
562
17
  mxPullSlot(mxResult);
563
17
}
564
565
#if mxECMAScript2024
566
void fx_Atomics_waitAsync(txMachine* the)
567
4
{
568
4
  mxAtomicsDeclarations(1, 1);
569
4
  txNumber timeout;
570
4
  txInteger result;
571
4
  txSlot* slot;
572
4
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
573
4
  timeout = (mxArgc > 3) ? fxToNumber(the, mxArgv(3)) : C_NAN;
574
4
  if (c_isnan(timeout))
575
0
    timeout = C_INFINITY;
576
4
  else if (timeout < 0)
577
1
    timeout = 0;
578
4
  result = (*dispatch->value.typedArray.atomics->wait)(the, host, offset, the->stack, timeout);
579
  
580
4
  mxPush(mxObjectPrototype);
581
4
  slot = fxLastProperty(the, fxNewObjectInstance(the));
582
4
  slot = fxNextBooleanProperty(the, slot, (result <= 0) ? 0 : 1, mxID(_async), XS_NO_FLAG);
583
4
  if (result < 0)
584
1
    fxNextStringXProperty(the, slot, "not-equal", mxID(_value), XS_NO_FLAG);
585
3
  else if (result == 0)
586
1
    fxNextStringXProperty(the, slot, "timed-out", mxID(_value), XS_NO_FLAG);
587
2
  else {
588
2
    txSlot* resolveFunction;
589
2
    txSlot* rejectFunction;
590
2
    mxTemporary(resolveFunction);
591
2
    mxTemporary(rejectFunction);
592
2
    mxPush(mxPromiseConstructor);
593
2
    fxNewPromiseCapability(the, resolveFunction, rejectFunction);
594
2
    fxNextSlotProperty(the, slot, the->stack, mxID(_value), XS_NO_FLAG);
595
2
    mxPop(); // promise
596
2
    fxWaitSharedChunk(the, (txByte*)host->value.host.data + offset, timeout, resolveFunction);
597
2
    mxPop(); // rejectFunction
598
2
    mxPop(); // resolveFunction
599
2
  }
600
4
  mxPullSlot(mxResult);
601
4
}
602
#endif
603
604
void fx_Atomics_xor(txMachine* the)
605
1
{
606
1
  mxAtomicsDeclarations(0, 0);
607
1
  fxPushAtomicsValue(the, 2, dispatch->value.typedArray.dispatch->constructorID);
608
1
  (*dispatch->value.typedArray.atomics->xor)(the, host, offset, the->stack, 0);
609
1
  mxPullSlot(mxResult);
610
1
}
611
612
#ifdef mxUseDefaultSharedChunks
613
614
#if defined(mxUsePOSIXThreads)
615
  #define mxThreads 1
616
  typedef pthread_cond_t txCondition;
617
  typedef pthread_mutex_t txMutex;
618
  typedef pthread_t txThread;
619
0
  #define mxCreateCondition(CONDITION) pthread_cond_init(CONDITION,NULL)
620
16.3k
  #define mxCreateMutex(MUTEX) pthread_mutex_init(MUTEX,NULL)
621
16.3k
  #define mxCurrentThread() pthread_self()
622
0
  #define mxDeleteCondition(CONDITION) pthread_cond_destroy(CONDITION)
623
16.3k
  #define mxDeleteMutex(MUTEX) pthread_mutex_destroy(MUTEX)
624
16.4k
  #define mxLockMutex(MUTEX) pthread_mutex_lock(MUTEX)
625
16.4k
  #define mxUnlockMutex(MUTEX) pthread_mutex_unlock(MUTEX)
626
0
  #define mxWakeCondition(CONDITION) pthread_cond_signal(CONDITION)
627
#elif defined(mxUseFreeRTOSTasks)
628
  #define mxThreads 1
629
630
  #include "FreeRTOS.h"
631
#if ESP32
632
  #include "freertos/queue.h"
633
  #include "freertos/semphr.h"
634
#else
635
  #include "queue.h"
636
  #include "semphr.h"
637
#endif
638
  typedef TaskHandle_t txCondition;
639
  typedef struct {
640
#if nrf52
641
    SemaphoreHandle_t sem;
642
#else
643
    QueueHandle_t handle;
644
    StaticSemaphore_t buffer;
645
#endif
646
  } txMutex;
647
  typedef TaskHandle_t txThread;
648
  #define mxCreateCondition(CONDITION) *(CONDITION) = xTaskGetCurrentTaskHandle()
649
#if nrf52
650
  #define mxCreateMutex(MUTEX) (MUTEX)->sem = xSemaphoreCreateMutex()
651
#else
652
  #define mxCreateMutex(MUTEX) (MUTEX)->handle = xSemaphoreCreateMutexStatic(&((MUTEX)->buffer))
653
#endif
654
  #define mxCurrentThread() xTaskGetCurrentTaskHandle()
655
  #define mxDeleteCondition(CONDITION) *(CONDITION) = NULL
656
#if nrf52
657
  #define mxDeleteMutex(MUTEX) vSemaphoreDelete((MUTEX)->sem)
658
  #define mxLockMutex(MUTEX) xSemaphoreTake((MUTEX)->sem, portMAX_DELAY)
659
  #define mxUnlockMutex(MUTEX) xSemaphoreGive((MUTEX)->sem)
660
#else
661
  #define mxDeleteMutex(MUTEX) vSemaphoreDelete((MUTEX)->handle)
662
  #define mxLockMutex(MUTEX) xSemaphoreTake((MUTEX)->handle, portMAX_DELAY)
663
  #define mxUnlockMutex(MUTEX) xSemaphoreGive((MUTEX)->handle)
664
#endif
665
  #define mxWakeCondition(CONDITION) xTaskNotifyGive(*(CONDITION));
666
#elif mxWindows
667
  #define mxThreads 1
668
  typedef CONDITION_VARIABLE txCondition;
669
  typedef CRITICAL_SECTION txMutex;
670
  typedef DWORD txThread;
671
  #define mxCreateCondition(CONDITION) InitializeConditionVariable(CONDITION)
672
  #define mxCreateMutex(MUTEX) InitializeCriticalSection(MUTEX)
673
  #define mxCurrentThread() GetCurrentThreadId()
674
  #define mxDeleteCondition(CONDITION) (void)(CONDITION)
675
  #define mxDeleteMutex(MUTEX) DeleteCriticalSection(MUTEX)
676
  #define mxLockMutex(MUTEX) EnterCriticalSection(MUTEX)
677
  #define mxUnlockMutex(MUTEX) LeaveCriticalSection(MUTEX)
678
  #define mxWakeCondition(CONDITION) WakeConditionVariable(CONDITION)
679
#else
680
  #define mxThreads 0
681
  typedef void* txThread;
682
  #define mxCurrentThread() C_NULL
683
#endif
684
685
typedef struct sxSharedChunk txSharedChunk;
686
typedef struct sxSharedCluster txSharedCluster;
687
typedef struct sxSharedWaiter txSharedWaiter;
688
689
typedef void (*txTimerCallback)(void* timer, void *refcon, txInteger refconSize);
690
extern void fxRescheduleTimer(void* timer, txNumber timeout, txNumber interval);
691
extern void* fxScheduleTimer(txNumber timeout, txNumber interval, txTimerCallback callback, void* refcon, txInteger refconSize);
692
extern void fxUnscheduleTimer(void* timer);
693
694
struct sxSharedChunk {
695
#if mxThreads && !defined(mxUseGCCAtomics)
696
  txMutex mutex;
697
#endif
698
  txSize size;
699
  txSize usage;
700
};
701
702
struct sxSharedCluster {
703
  txThread mainThread;
704
  txSize usage;
705
#if mxThreads
706
  txSharedWaiter* first; 
707
  txMutex waiterMutex; 
708
#endif
709
};
710
711
struct sxSharedWaiter {
712
  txSharedWaiter* next;
713
  txMachine* the;
714
  void* data;
715
  void* condition;
716
  void* timer;
717
  txSlot resolve;
718
  txBoolean ok;
719
};
720
721
txSharedCluster* gxSharedCluster = C_NULL;
722
723
void fxInitializeSharedCluster(txMachine* the)
724
16.3k
{
725
16.3k
  if (gxSharedCluster) {
726
0
    gxSharedCluster->usage++;
727
0
  }
728
16.3k
  else {
729
16.3k
    gxSharedCluster = c_calloc(sizeof(txSharedCluster), 1);
730
16.3k
    if (gxSharedCluster) {
731
16.3k
      gxSharedCluster->mainThread = mxCurrentThread();
732
16.3k
      gxSharedCluster->usage++;
733
16.3k
    #if mxThreads
734
16.3k
      mxCreateMutex(&gxSharedCluster->waiterMutex);
735
16.3k
    #endif
736
16.3k
    #ifdef mxInitializeSharedTimers
737
16.3k
      mxInitializeSharedTimers();
738
16.3k
    #endif
739
16.3k
    }
740
16.3k
  }
741
16.3k
}
742
743
void fxTerminateSharedCluster(txMachine* the)
744
16.3k
{
745
16.3k
  if (gxSharedCluster) {
746
16.3k
  #ifdef mxUnscheduleSharedTimer
747
16.3k
    if (the) {
748
16.3k
      txSharedWaiter** address;
749
16.3k
      txSharedWaiter* waiter;
750
16.3k
      mxLockMutex(&gxSharedCluster->waiterMutex);
751
16.3k
      address = &(gxSharedCluster->first);
752
16.3k
      while ((waiter = *address)) {
753
0
        if (waiter->the == the) {
754
0
          *address = waiter->next;
755
0
          if (waiter->timer)
756
0
            mxUnscheduleSharedTimer(waiter->timer);
757
0
          c_free(waiter);         
758
0
        }
759
0
        else
760
0
          address = &(waiter->next);
761
0
      }
762
16.3k
      mxUnlockMutex(&gxSharedCluster->waiterMutex);
763
16.3k
    }
764
16.3k
  #endif
765
16.3k
    gxSharedCluster->usage--;
766
16.3k
    if (gxSharedCluster->usage == 0) {
767
16.3k
    #ifdef mxTerminateSharedTimers
768
16.3k
      mxTerminateSharedTimers();
769
16.3k
    #endif
770
16.3k
    #if mxThreads
771
16.3k
      mxDeleteMutex(&gxSharedCluster->waiterMutex);
772
16.3k
    #endif
773
16.3k
      c_free(gxSharedCluster);
774
16.3k
      gxSharedCluster = C_NULL;
775
16.3k
    }
776
16.3k
  }
777
16.3k
}
778
779
void* fxCreateSharedChunk(txInteger size)
780
3.71k
{
781
3.71k
  txSharedChunk* chunk = c_malloc(sizeof(txSharedChunk) + size);
782
3.71k
  if (chunk) {
783
3.71k
    void* data = (((txByte*)chunk) + sizeof(txSharedChunk));
784
  #if mxThreads && !defined(mxUseGCCAtomics)
785
    mxCreateMutex(&(chunk->mutex));
786
  #endif
787
3.71k
    chunk->size = size;
788
3.71k
    chunk->usage = 1;
789
3.71k
    c_memset(data, 0, size);
790
3.71k
    return data;
791
3.71k
  }
792
0
  return C_NULL;
793
3.71k
}
794
795
void fxLockSharedChunk(void* data)
796
0
{
797
#if mxThreads && !defined(mxUseGCCAtomics)
798
  txSharedChunk* chunk = (txSharedChunk*)(((txByte*)data) - sizeof(txSharedChunk));
799
    mxLockMutex(&(chunk->mutex));
800
#endif
801
0
}
802
803
txInteger fxMeasureSharedChunk(void* data)
804
0
{
805
0
  txSharedChunk* chunk = (txSharedChunk*)(((txByte*)data) - sizeof(txSharedChunk));
806
0
  return chunk->size;
807
0
}
808
809
txInteger fxNotifySharedChunk(txMachine* the, void* data, txInteger count)
810
19
{
811
19
  txInteger result = 0;
812
19
  if (gxSharedCluster) {
813
19
  #if mxThreads
814
19
    txSharedWaiter* first = C_NULL;
815
19
    txSharedWaiter* last = C_NULL;
816
19
    txSharedWaiter** address;
817
19
    txSharedWaiter* waiter;
818
19
    mxLockMutex(&gxSharedCluster->waiterMutex);
819
19
    address = &(gxSharedCluster->first);
820
19
    while ((waiter = *address)) {
821
0
      if (waiter->data == data) {
822
0
        if (count == 0)
823
0
          break;
824
0
        count--;
825
0
        if (first)
826
0
          last->next = waiter;
827
0
        else
828
0
          first = waiter;
829
0
        last = waiter;
830
0
        *address = waiter->next;
831
0
        waiter->next = C_NULL;
832
0
      }
833
0
      else
834
0
        address = &(waiter->next);
835
0
    }
836
19
    waiter = first;
837
19
    while (waiter) {
838
0
      waiter->data = C_NULL;
839
0
      if (waiter->condition) {
840
0
        mxWakeCondition((txCondition*)waiter->condition);
841
0
      }
842
0
      else {
843
0
        waiter->ok = 1;
844
0
      #ifdef mxRescheduleSharedTimer
845
0
        mxRescheduleSharedTimer(waiter->timer, 0, 0);
846
      #else
847
        fxAbort(the, XS_DEAD_STRIP_EXIT);
848
      #endif
849
0
      }
850
0
      result++;
851
0
      waiter = waiter->next;
852
0
    }
853
19
    mxUnlockMutex(&gxSharedCluster->waiterMutex);
854
19
  #endif  
855
19
  }
856
19
  return result;
857
19
}
858
859
void* fxRetainSharedChunk(void* data)
860
0
{
861
0
  txSharedChunk* chunk = (txSharedChunk*)(((txByte*)data) - sizeof(txSharedChunk));
862
0
  txS4 result = 0;
863
0
  txS4 value = 1;
864
0
  txS4* address = &(chunk->usage);
865
#ifndef mxUseGCCAtomics
866
  txBoolean lock = 1;
867
#endif
868
0
  mxAtomicsAdd();
869
0
  if (result == 0)
870
0
    return C_NULL;
871
0
  return data;
872
0
}
873
874
void fxReleaseSharedChunk(void* data)
875
3.71k
{
876
3.71k
  txSharedChunk* chunk = (txSharedChunk*)(((txByte*)data) - sizeof(txSharedChunk));
877
3.71k
  txS4 result = 0;
878
3.71k
  txS4 value = 1;
879
3.71k
  txS4* address = &(chunk->usage);
880
#ifndef mxUseGCCAtomics
881
  txBoolean lock = 1;
882
#endif
883
3.71k
  mxAtomicsSub();
884
3.71k
  if (result == 1) {
885
3.71k
    c_free(chunk);
886
3.71k
  }
887
3.71k
}
888
889
void fxUnlockSharedChunk(void* data)
890
0
{
891
#if mxThreads && !defined(mxUseGCCAtomics)
892
  txSharedChunk* chunk = (txSharedChunk*)(((txByte*)data) - sizeof(txSharedChunk));
893
    mxUnlockMutex(&(chunk->mutex));
894
#endif
895
0
}
896
897
#if mxThreads
898
void fxWaitSharedChunkCallback(void* timer, void* refcon, txInteger refconSize)
899
0
{
900
0
  txSharedWaiter** address = (txSharedWaiter**)refcon;
901
0
  txSharedWaiter* waiter = *address;
902
0
  txSharedWaiter* link;
903
0
  txMachine* the;
904
0
  mxLockMutex(&gxSharedCluster->waiterMutex);
905
0
  address = &(gxSharedCluster->first);
906
0
  while ((link = *address)) {
907
0
    if (link == waiter)
908
0
      *address = link->next;
909
0
    else
910
0
      address = &(link->next);
911
0
  }
912
0
  mxUnlockMutex(&gxSharedCluster->waiterMutex);
913
0
  the = waiter->the;
914
0
  fxBeginHost(waiter->the);
915
0
  mxTry(the) {
916
0
    mxPushUndefined();
917
0
    mxPush(waiter->resolve);
918
0
    mxCall();
919
0
    if (waiter->ok)
920
0
      mxPushStringX("ok");
921
0
    else
922
0
      mxPushStringX("timed-out");
923
0
    mxRunCount(1);
924
0
    mxPop();
925
0
  }
926
0
  mxCatch(the) {
927
0
  }
928
0
  fxForget(the, &waiter->resolve);
929
0
  fxEndHost(the);
930
0
  c_free(waiter);
931
0
}
932
#endif
933
934
txInteger fxWaitSharedChunk(txMachine* the, void* data, txNumber timeout, txSlot* resolveFunction)
935
0
{
936
0
  txInteger result = 1;
937
0
  if (gxSharedCluster) {
938
0
  #if mxThreads
939
0
    txSharedWaiter* waiter;
940
0
    txSharedWaiter** address;
941
0
    txSharedWaiter* link;
942
0
    waiter = c_calloc(1, sizeof(txSharedWaiter));
943
0
    if (!waiter)
944
0
      fxAbort(the, XS_NOT_ENOUGH_MEMORY_EXIT);
945
0
    waiter->the = the;
946
0
    waiter->data = data;
947
0
    mxLockMutex(&gxSharedCluster->waiterMutex);
948
0
    address = &(gxSharedCluster->first);
949
0
    while ((link = *address))
950
0
      address = &(link->next);
951
0
    *address = waiter;
952
    
953
0
    if (resolveFunction) {
954
0
      mxUnlockMutex(&gxSharedCluster->waiterMutex);
955
0
      waiter->resolve = *resolveFunction;
956
0
      fxRemember(the, &waiter->resolve);
957
0
    #ifdef mxScheduleSharedTimer
958
0
      waiter->timer = mxScheduleSharedTimer(timeout, 0, (txSharedTimerCallback)fxWaitSharedChunkCallback, &waiter, sizeof(txSharedWaiter*));
959
0
      if (!waiter->timer)
960
0
        fxAbort(the, XS_NOT_ENOUGH_MEMORY_EXIT);
961
    #else
962
      fxAbort(the, XS_DEAD_STRIP_EXIT);
963
    #endif
964
0
    }
965
0
    else if (gxSharedCluster->mainThread != mxCurrentThread()) {
966
0
      txCondition condition;
967
0
      mxCreateCondition(&condition);
968
0
      waiter->condition = &condition;
969
0
      if (timeout == C_INFINITY) {
970
0
      #if defined(mxUsePOSIXThreads)
971
0
        while (waiter->data == data)
972
0
          pthread_cond_wait(&condition, &gxSharedCluster->waiterMutex);
973
      #elif defined(mxUseFreeRTOSTasks)
974
        mxUnlockMutex(&gxSharedCluster->waiterMutex);
975
        ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
976
        mxLockMutex(&gxSharedCluster->waiterMutex);
977
      #else
978
        while (waiter->data == data)
979
          SleepConditionVariableCS(&condition, &gxSharedCluster->waiterMutex, INFINITE);
980
      #endif
981
0
      }
982
0
      else {
983
0
      #if defined(mxUsePOSIXThreads)
984
0
        struct timespec ts;
985
0
        timeout += fxDateNow();
986
0
        ts.tv_sec = c_floor(timeout / 1000);
987
0
        ts.tv_nsec = c_fmod(timeout, 1000) * 1000000;
988
0
        while (waiter->data == data) {
989
0
          result = (pthread_cond_timedwait(&condition, &gxSharedCluster->waiterMutex, &ts) == ETIMEDOUT) ? 0 : 1;
990
0
          if (!result)
991
0
            break;
992
0
        }
993
      #elif defined(mxUseFreeRTOSTasks)
994
        mxUnlockMutex(&gxSharedCluster->waiterMutex);
995
        ulTaskNotifyTake(pdTRUE, pdMS_TO_TICKS(timeout));
996
        mxLockMutex(&gxSharedCluster->waiterMutex);
997
        result = (waiter->data == data) ? 0 : 1;
998
      #else
999
        timeout += fxDateNow();
1000
        while (waiter->data == data) {
1001
          result = (SleepConditionVariableCS(&condition, &gxSharedCluster->waiterMutex, (DWORD)(timeout - fxDateNow()))) ? 1 : 0;
1002
          if (!result)
1003
            break;
1004
        }
1005
      #endif
1006
0
      }
1007
0
      address = &(gxSharedCluster->first);
1008
0
      while ((link = *address)) {
1009
0
        if (link == waiter)
1010
0
          *address = link->next;
1011
0
        else
1012
0
          address = &(link->next);
1013
0
      }
1014
0
      mxUnlockMutex(&gxSharedCluster->waiterMutex);
1015
0
      c_free(waiter);
1016
0
      mxDeleteCondition(&condition);
1017
0
    }
1018
0
    else {
1019
0
      mxTypeError("main thread cannot wait");
1020
0
    }
1021
0
  #endif  
1022
0
  }
1023
0
  else {
1024
0
    mxTypeError("no shared cluster");
1025
0
  }
1026
0
  return result;
1027
0
}
1028
1029
#endif /* mxUseDefaultSharedChunks */
1030
1031