Coverage Report

Created: 2025-09-08 08:10

/src/solidity/libsolidity/codegen/CompilerUtils.cpp
Line
Count
Source (jump to first uncovered line)
1
/*
2
  This file is part of solidity.
3
4
  solidity is free software: you can redistribute it and/or modify
5
  it under the terms of the GNU General Public License as published by
6
  the Free Software Foundation, either version 3 of the License, or
7
  (at your option) any later version.
8
9
  solidity is distributed in the hope that it will be useful,
10
  but WITHOUT ANY WARRANTY; without even the implied warranty of
11
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
  GNU General Public License for more details.
13
14
  You should have received a copy of the GNU General Public License
15
  along with solidity.  If not, see <http://www.gnu.org/licenses/>.
16
*/
17
// SPDX-License-Identifier: GPL-3.0
18
/**
19
 * @author Christian <c@ethdev.com>
20
 * @date 2014
21
 * Routines used by both the compiler and the expression compiler.
22
 */
23
24
#include <libsolidity/codegen/CompilerUtils.h>
25
26
#include <libsolidity/ast/AST.h>
27
#include <libsolidity/ast/TypeProvider.h>
28
#include <libsolidity/codegen/ABIFunctions.h>
29
#include <libsolidity/codegen/ArrayUtils.h>
30
#include <libsolidity/codegen/LValue.h>
31
#include <libsolutil/FunctionSelector.h>
32
#include <libevmasm/Instruction.h>
33
#include <libsolutil/Whiskers.h>
34
#include <libsolutil/StackTooDeepString.h>
35
36
using namespace solidity;
37
using namespace solidity::evmasm;
38
using namespace solidity::frontend;
39
using namespace solidity::langutil;
40
41
using solidity::util::Whiskers;
42
using solidity::util::h256;
43
using solidity::toCompactHexWithPrefix;
44
45
unsigned const CompilerUtils::dataStartOffset = 4;
46
size_t const CompilerUtils::freeMemoryPointer = 64;
47
size_t const CompilerUtils::zeroPointer = CompilerUtils::freeMemoryPointer + 32;
48
size_t const CompilerUtils::generalPurposeMemoryStart = CompilerUtils::zeroPointer + 32;
49
50
static_assert(CompilerUtils::freeMemoryPointer >= 64, "Free memory pointer must not overlap with scratch area.");
51
static_assert(CompilerUtils::zeroPointer >= CompilerUtils::freeMemoryPointer + 32, "Zero pointer must not overlap with free memory pointer.");
52
static_assert(CompilerUtils::generalPurposeMemoryStart >= CompilerUtils::zeroPointer + 32, "General purpose memory must not overlap with zero area.");
53
54
void CompilerUtils::initialiseFreeMemoryPointer()
55
24.9k
{
56
24.9k
  size_t reservedMemory = m_context.reservedMemory();
57
24.9k
  solAssert(bigint(generalPurposeMemoryStart) + bigint(reservedMemory) < bigint(1) << 63);
58
24.9k
  m_context << (u256(generalPurposeMemoryStart) + reservedMemory);
59
24.9k
  storeFreeMemoryPointer();
60
24.9k
}
61
62
void CompilerUtils::fetchFreeMemoryPointer()
63
55.9k
{
64
55.9k
  m_context << u256(freeMemoryPointer) << Instruction::MLOAD;
65
55.9k
}
66
67
void CompilerUtils::storeFreeMemoryPointer()
68
53.3k
{
69
53.3k
  m_context << u256(freeMemoryPointer) << Instruction::MSTORE;
70
53.3k
}
71
72
void CompilerUtils::allocateMemory()
73
2.91k
{
74
2.91k
  fetchFreeMemoryPointer();
75
2.91k
  m_context << Instruction::SWAP1 << Instruction::DUP2 << Instruction::ADD;
76
2.91k
  storeFreeMemoryPointer();
77
2.91k
}
78
79
void CompilerUtils::allocateMemory(u256 const& size)
80
22.4k
{
81
22.4k
  fetchFreeMemoryPointer();
82
22.4k
  m_context << Instruction::DUP1 << size << Instruction::ADD;
83
22.4k
  storeFreeMemoryPointer();
84
22.4k
}
85
86
void CompilerUtils::toSizeAfterFreeMemoryPointer()
87
8.88k
{
88
8.88k
  fetchFreeMemoryPointer();
89
8.88k
  m_context << Instruction::DUP1 << Instruction::SWAP2 << Instruction::SUB;
90
8.88k
  m_context << Instruction::SWAP1;
91
8.88k
}
92
93
void CompilerUtils::revertWithStringData(Type const& _argumentType)
94
59
{
95
59
  solAssert(_argumentType.isImplicitlyConvertibleTo(*TypeProvider::fromElementaryTypeName("string memory")));
96
59
  fetchFreeMemoryPointer();
97
59
  m_context << util::selectorFromSignatureU256("Error(string)");
98
59
  m_context << Instruction::DUP2 << Instruction::MSTORE;
99
59
  m_context << u256(4) << Instruction::ADD;
100
  // Stack: <string data> <mem pos of encoding start>
101
59
  abiEncode({&_argumentType}, {TypeProvider::array(DataLocation::Memory, true)});
102
59
  toSizeAfterFreeMemoryPointer();
103
59
  m_context << Instruction::REVERT;
104
59
}
105
106
void CompilerUtils::revertWithError(
107
  std::string const& _signature,
108
  std::vector<Type const*> const& _parameterTypes,
109
  std::vector<Type const*> const& _argumentTypes
110
)
111
53
{
112
53
  fetchFreeMemoryPointer();
113
53
  m_context << util::selectorFromSignatureU256(_signature);
114
53
  m_context << Instruction::DUP2 << Instruction::MSTORE;
115
53
  m_context << u256(4) << Instruction::ADD;
116
  // Stack: <arguments...> <mem pos of encoding start>
117
53
  abiEncode(_argumentTypes, _parameterTypes);
118
53
  toSizeAfterFreeMemoryPointer();
119
53
  m_context << Instruction::REVERT;
120
53
}
121
122
void CompilerUtils::returnDataToArray()
123
763
{
124
763
  if (m_context.evmVersion().supportsReturndata())
125
712
  {
126
712
    m_context << Instruction::RETURNDATASIZE;
127
712
    m_context.appendInlineAssembly(R"({
128
712
      switch v case 0 {
129
712
        v := 0x60
130
712
      } default {
131
712
        v := mload(0x40)
132
712
        mstore(0x40, add(v, and(add(returndatasize(), 0x3f), not(0x1f))))
133
712
        mstore(v, returndatasize())
134
712
        returndatacopy(add(v, 0x20), 0, returndatasize())
135
712
      }
136
712
    })", {"v"});
137
712
  }
138
51
  else
139
51
    pushZeroPointer();
140
763
}
141
142
void CompilerUtils::accessCalldataTail(Type const& _type)
143
14.5k
{
144
14.5k
  m_context << Instruction::SWAP1;
145
14.5k
  m_context.callYulFunction(
146
14.5k
    m_context.utilFunctions().accessCalldataTailFunction(_type),
147
14.5k
    2,
148
14.5k
    _type.isDynamicallySized() ? 2 : 1
149
14.5k
  );
150
14.5k
}
151
152
unsigned CompilerUtils::loadFromMemory(
153
  unsigned _offset,
154
  Type const& _type,
155
  bool _fromCalldata,
156
  bool _padToWordBoundaries
157
)
158
9.30k
{
159
9.30k
  solAssert(_type.category() != Type::Category::Array, "Unable to statically load dynamic type.");
160
9.30k
  m_context << u256(_offset);
161
9.30k
  return loadFromMemoryHelper(_type, _fromCalldata, _padToWordBoundaries);
162
9.30k
}
163
164
void CompilerUtils::loadFromMemoryDynamic(
165
  Type const& _type,
166
  bool _fromCalldata,
167
  bool _padToWordBoundaries,
168
  bool _keepUpdatedMemoryOffset
169
)
170
27.5k
{
171
27.5k
  if (_keepUpdatedMemoryOffset)
172
192
    m_context << Instruction::DUP1;
173
174
27.5k
  if (auto arrayType = dynamic_cast<ArrayType const*>(&_type))
175
0
  {
176
0
    solAssert(!arrayType->isDynamicallySized());
177
0
    solAssert(!_fromCalldata);
178
0
    solAssert(_padToWordBoundaries);
179
0
    if (_keepUpdatedMemoryOffset)
180
0
      m_context << arrayType->memoryDataSize() << Instruction::ADD;
181
0
  }
182
27.5k
  else
183
27.5k
  {
184
27.5k
    unsigned numBytes = loadFromMemoryHelper(_type, _fromCalldata, _padToWordBoundaries);
185
27.5k
    if (_keepUpdatedMemoryOffset)
186
192
    {
187
      // update memory counter
188
192
      moveToStackTop(_type.sizeOnStack());
189
192
      m_context << u256(numBytes) << Instruction::ADD;
190
192
    }
191
27.5k
  }
192
27.5k
}
193
194
void CompilerUtils::storeInMemory(unsigned _offset)
195
35.7k
{
196
35.7k
  unsigned numBytes = prepareMemoryStore(*TypeProvider::uint256(), true);
197
35.7k
  if (numBytes > 0)
198
35.7k
    m_context << u256(_offset) << Instruction::MSTORE;
199
35.7k
}
200
201
void CompilerUtils::storeInMemoryDynamic(Type const& _type, bool _padToWordBoundaries, bool _cleanup)
202
56.4k
{
203
  // process special types (Reference, StringLiteral, Function)
204
56.4k
  if (auto ref = dynamic_cast<ReferenceType const*>(&_type))
205
8.33k
  {
206
8.33k
    solUnimplementedAssert(
207
8.33k
      ref->location() == DataLocation::Memory,
208
8.33k
      "Only in-memory reference type can be stored."
209
8.33k
    );
210
8.33k
    storeInMemoryDynamic(*TypeProvider::uint256(), _padToWordBoundaries, _cleanup);
211
8.33k
  }
212
48.0k
  else if (auto str = dynamic_cast<StringLiteralType const*>(&_type))
213
6
  {
214
6
    m_context << Instruction::DUP1;
215
6
    storeStringData(bytesConstRef(str->value()));
216
6
    if (_padToWordBoundaries)
217
3
      m_context << u256(std::max<size_t>(32, ((str->value().size() + 31) / 32) * 32));
218
3
    else
219
3
      m_context << u256(str->value().size());
220
6
    m_context << Instruction::ADD;
221
6
  }
222
48.0k
  else if (
223
48.0k
    _type.category() == Type::Category::Function &&
224
48.0k
    dynamic_cast<FunctionType const&>(_type).kind() == FunctionType::Kind::External
225
48.0k
  )
226
42
  {
227
42
    combineExternalFunctionType(true);
228
42
    m_context << Instruction::DUP2 << Instruction::MSTORE;
229
42
    m_context << u256(_padToWordBoundaries ? 32 : 24) << Instruction::ADD;
230
42
  }
231
48.0k
  else if (_type.isValueType())
232
48.0k
  {
233
48.0k
    unsigned numBytes = prepareMemoryStore(_type, _padToWordBoundaries, _cleanup);
234
48.0k
    m_context << Instruction::DUP2 << Instruction::MSTORE;
235
48.0k
    m_context << u256(numBytes) << Instruction::ADD;
236
48.0k
  }
237
0
  else // Should never happen
238
0
  {
239
0
    solAssert(
240
0
      false,
241
0
      "Memory store of type " + _type.toString(true) + " not allowed."
242
0
    );
243
0
  }
244
56.4k
}
245
246
void CompilerUtils::abiDecode(TypePointers const& _typeParameters, bool _fromMemory)
247
7.98k
{
248
  /// Stack: <source_offset> <length>
249
7.98k
  if (m_context.useABICoderV2())
250
7.89k
  {
251
    // Use the new Yul-based decoding function
252
7.89k
    auto stackHeightBefore = m_context.stackHeight();
253
7.89k
    abiDecodeV2(_typeParameters, _fromMemory);
254
7.89k
    solAssert(m_context.stackHeight() - stackHeightBefore == sizeOnStack(_typeParameters) - 2);
255
7.89k
    return;
256
7.89k
  }
257
258
  //@todo this does not yet support nested dynamic arrays
259
82
  size_t encodedSize = 0;
260
82
  for (auto const& t: _typeParameters)
261
169
    encodedSize += t->decodingType()->calldataHeadSize();
262
263
82
  Whiskers templ(R"({
264
82
    if lt(len, <encodedSize>) { <revertString> }
265
82
  })");
266
82
  templ("encodedSize", std::to_string(encodedSize));
267
82
  templ("revertString", m_context.revertReasonIfDebug("Calldata too short"));
268
82
  m_context.appendInlineAssembly(templ.render(), {"len"});
269
270
82
  m_context << Instruction::DUP2 << Instruction::ADD;
271
82
  m_context << Instruction::SWAP1;
272
  /// Stack: <input_end> <source_offset>
273
274
  // Retain the offset pointer as base_offset, the point from which the data offsets are computed.
275
82
  m_context << Instruction::DUP1;
276
82
  for (Type const* parameterType: _typeParameters)
277
169
  {
278
    // stack: v1 v2 ... v(k-1) input_end base_offset current_offset
279
169
    Type const* type = parameterType->decodingType();
280
169
    solUnimplementedAssert(type, "No decoding type found.");
281
169
    if (type->category() == Type::Category::Array)
282
35
    {
283
35
      auto const& arrayType = dynamic_cast<ArrayType const&>(*type);
284
35
      solUnimplementedAssert(!arrayType.baseType()->isDynamicallyEncoded(), "Nested arrays not yet implemented.");
285
35
      if (_fromMemory)
286
1
      {
287
1
        solUnimplementedAssert(
288
0
          arrayType.baseType()->isValueType(),
289
0
          "Nested memory arrays not yet implemented here."
290
0
        );
291
        // @todo If base type is an array or struct, it is still calldata-style encoded, so
292
        // we would have to convert it like below.
293
0
        solAssert(arrayType.location() == DataLocation::Memory);
294
0
        if (arrayType.isDynamicallySized())
295
0
        {
296
          // compute data pointer
297
0
          m_context << Instruction::DUP1 << Instruction::MLOAD;
298
          // stack: v1 v2 ... v(k-1) input_end base_offset current_offset data_offset
299
300
0
          fetchFreeMemoryPointer();
301
          // stack: v1 v2 ... v(k-1) input_end base_offset current_offset data_offset dstmem
302
0
          moveIntoStack(4);
303
          // stack: v1 v2 ... v(k-1) dstmem input_end base_offset current_offset data_offset
304
0
          m_context << Instruction::DUP5;
305
          // stack: v1 v2 ... v(k-1) dstmem input_end base_offset current_offset data_offset dstmem
306
307
          // Check that the data pointer is valid and that length times
308
          // item size is still inside the range.
309
0
          Whiskers templ(R"({
310
0
            if gt(ptr, 0x100000000) { <revertStringPointer> }
311
0
            ptr := add(ptr, base_offset)
312
0
            let array_data_start := add(ptr, 0x20)
313
0
            if gt(array_data_start, input_end) { <revertStringStart> }
314
0
            let array_length := mload(ptr)
315
0
            if or(
316
0
              gt(array_length, 0x100000000),
317
0
              gt(add(array_data_start, mul(array_length, <item_size>)), input_end)
318
0
            ) { <revertStringLength> }
319
0
            mstore(dst, array_length)
320
0
            dst := add(dst, 0x20)
321
0
          })");
322
0
          templ("item_size", std::to_string(arrayType.calldataStride()));
323
          // TODO add test
324
0
          templ("revertStringPointer", m_context.revertReasonIfDebug("ABI memory decoding: invalid data pointer"));
325
0
          templ("revertStringStart", m_context.revertReasonIfDebug("ABI memory decoding: invalid data start"));
326
0
          templ("revertStringLength", m_context.revertReasonIfDebug("ABI memory decoding: invalid data length"));
327
0
          m_context.appendInlineAssembly(templ.render(), {"input_end", "base_offset", "offset", "ptr", "dst"});
328
          // stack: v1 v2 ... v(k-1) dstmem input_end base_offset current_offset data_ptr dstdata
329
0
          m_context << Instruction::SWAP1;
330
          // stack: v1 v2 ... v(k-1) dstmem input_end base_offset current_offset dstdata data_ptr
331
0
          ArrayUtils(m_context).copyArrayToMemory(arrayType, true);
332
          // stack: v1 v2 ... v(k-1) dstmem input_end base_offset current_offset mem_end
333
0
          storeFreeMemoryPointer();
334
0
          m_context << u256(0x20) << Instruction::ADD;
335
0
        }
336
0
        else
337
0
        {
338
          // Size has already been checked for this one.
339
0
          moveIntoStack(2);
340
0
          m_context << Instruction::DUP3;
341
0
          m_context << u256(arrayType.calldataHeadSize()) << Instruction::ADD;
342
0
        }
343
0
      }
344
34
      else
345
34
      {
346
        // first load from calldata and potentially convert to memory if arrayType is memory
347
34
        Type const* calldataType = TypeProvider::withLocation(&arrayType, DataLocation::CallData, false);
348
34
        if (calldataType->isDynamicallySized())
349
29
        {
350
          // put on stack: data_pointer length
351
29
          loadFromMemoryDynamic(*TypeProvider::uint256(), !_fromMemory);
352
29
          m_context << Instruction::SWAP1;
353
          // stack: input_end base_offset next_pointer data_offset
354
29
          m_context.appendInlineAssembly(Whiskers(R"({
355
29
            if gt(data_offset, 0x100000000) { <revertString> }
356
29
          })")
357
          // TODO add test
358
29
          ("revertString", m_context.revertReasonIfDebug("ABI calldata decoding: invalid data offset"))
359
29
          .render(), {"data_offset"});
360
29
          m_context << Instruction::DUP3 << Instruction::ADD;
361
          // stack: input_end base_offset next_pointer array_head_ptr
362
29
          m_context.appendInlineAssembly(Whiskers(R"({
363
29
            if gt(add(array_head_ptr, 0x20), input_end) { <revertString> }
364
29
          })")
365
29
          ("revertString", m_context.revertReasonIfDebug("ABI calldata decoding: invalid head pointer"))
366
29
          .render(), {"input_end", "base_offset", "next_ptr", "array_head_ptr"});
367
368
          // retrieve length
369
29
          loadFromMemoryDynamic(*TypeProvider::uint256(), !_fromMemory, true);
370
          // stack: input_end base_offset next_pointer array_length data_pointer
371
29
          m_context << Instruction::SWAP2;
372
          // stack: input_end base_offset data_pointer array_length next_pointer
373
29
          m_context.appendInlineAssembly(Whiskers(R"({
374
29
            if or(
375
29
              gt(array_length, 0x100000000),
376
29
              gt(add(data_ptr, mul(array_length, )" + std::to_string(arrayType.calldataStride()) + R"()), input_end)
377
29
            ) { <revertString> }
378
29
          })")
379
29
          ("revertString", m_context.revertReasonIfDebug("ABI calldata decoding: invalid data pointer"))
380
29
          .render(), {"input_end", "base_offset", "data_ptr", "array_length", "next_ptr"});
381
29
        }
382
5
        else
383
5
        {
384
          // size has already been checked
385
          // stack: input_end base_offset data_offset
386
5
          m_context << Instruction::DUP1;
387
5
          m_context << u256(calldataType->calldataHeadSize()) << Instruction::ADD;
388
5
        }
389
34
        if (arrayType.location() == DataLocation::Memory)
390
16
        {
391
          // stack: input_end base_offset calldata_ref [length] next_calldata
392
          // copy to memory
393
          // move calldata type up again
394
16
          moveIntoStack(calldataType->sizeOnStack());
395
16
          convertType(*calldataType, arrayType, false, false, true);
396
          // fetch next pointer again
397
16
          moveToStackTop(arrayType.sizeOnStack());
398
16
        }
399
        // move input_end up
400
        // stack: input_end base_offset calldata_ref [length] next_calldata
401
34
        moveToStackTop(2 + arrayType.sizeOnStack());
402
34
        m_context << Instruction::SWAP1;
403
        // stack: base_offset calldata_ref [length] input_end next_calldata
404
34
        moveToStackTop(2 + arrayType.sizeOnStack());
405
34
        m_context << Instruction::SWAP1;
406
        // stack: calldata_ref [length] input_end base_offset next_calldata
407
34
      }
408
35
    }
409
134
    else
410
134
    {
411
134
      solAssert(!type->isDynamicallyEncoded(), "Unknown dynamically sized type: " + type->toString());
412
134
      loadFromMemoryDynamic(*type, !_fromMemory, true);
413
      // stack: v1 v2 ... v(k-1) input_end base_offset v(k) mem_offset
414
134
      moveToStackTop(1, type->sizeOnStack());
415
134
      moveIntoStack(3, type->sizeOnStack());
416
134
    }
417
    // stack: v1 v2 ... v(k-1) v(k) input_end base_offset next_offset
418
169
  }
419
81
  popStackSlots(3);
420
81
}
421
422
void CompilerUtils::encodeToMemory(
423
  TypePointers const& _givenTypes,
424
  TypePointers const& _targetTypes,
425
  bool _padToWordBoundaries,
426
  bool _copyDynamicDataInPlace,
427
  bool _encodeAsLibraryTypes
428
)
429
12.8k
{
430
  // stack: <v1> <v2> ... <vn> <mem>
431
12.8k
  bool const encoderV2 = m_context.useABICoderV2();
432
12.8k
  TypePointers targetTypes = _targetTypes.empty() ? _givenTypes : _targetTypes;
433
12.8k
  solAssert(targetTypes.size() == _givenTypes.size());
434
12.8k
  for (Type const*& t: targetTypes)
435
15.6k
  {
436
15.6k
    Type const* tEncoding = t->fullEncodingType(_encodeAsLibraryTypes, encoderV2, !_padToWordBoundaries);
437
15.6k
    solUnimplementedAssert(tEncoding, "Encoding type \"" + t->toString() + "\" not yet implemented.");
438
15.6k
    t = std::move(tEncoding);
439
15.6k
  }
440
441
12.8k
  if (_givenTypes.empty())
442
962
    return;
443
11.9k
  if (encoderV2)
444
11.8k
  {
445
    // Use the new Yul-based encoding function
446
11.8k
    solAssert(
447
11.8k
      _padToWordBoundaries != _copyDynamicDataInPlace,
448
11.8k
      "Non-padded and in-place encoding can only be combined."
449
11.8k
    );
450
11.8k
    auto stackHeightBefore = m_context.stackHeight();
451
11.8k
    abiEncodeV2(_givenTypes, targetTypes, _encodeAsLibraryTypes, _padToWordBoundaries);
452
11.8k
    solAssert(stackHeightBefore - m_context.stackHeight() == sizeOnStack(_givenTypes));
453
11.8k
    return;
454
11.8k
  }
455
456
  // Stack during operation:
457
  // <v1> <v2> ... <vn> <mem_start> <dyn_head_1> ... <dyn_head_r> <end_of_mem>
458
  // The values dyn_head_n are added during the first loop and they point to the head part
459
  // of the nth dynamic parameter, which is filled once the dynamic parts are processed.
460
461
  // store memory start pointer
462
115
  m_context << Instruction::DUP1;
463
464
115
  unsigned argSize = CompilerUtils::sizeOnStack(_givenTypes);
465
115
  unsigned stackPos = 0; // advances through the argument values
466
115
  unsigned dynPointers = 0; // number of dynamic head pointers on the stack
467
297
  for (size_t i = 0; i < _givenTypes.size(); ++i)
468
182
  {
469
182
    Type const* targetType = targetTypes[i];
470
182
    solAssert(!!targetType, "Externalable type expected.");
471
182
    if (targetType->isDynamicallySized() && !_copyDynamicDataInPlace)
472
48
    {
473
      // leave end_of_mem as dyn head pointer
474
48
      m_context << Instruction::DUP1 << u256(32) << Instruction::ADD;
475
48
      dynPointers++;
476
48
      assertThrow(
477
48
        (argSize + dynPointers) < 16,
478
48
        StackTooDeepError,
479
48
        util::stackTooDeepString
480
48
      );
481
48
    }
482
134
    else
483
134
    {
484
134
      bool needCleanup = true;
485
134
      copyToStackTop(argSize - stackPos + dynPointers + 2, _givenTypes[i]->sizeOnStack());
486
134
      solAssert(!!targetType, "Externalable type expected.");
487
134
      Type const* type = targetType;
488
134
      if (_givenTypes[i]->dataStoredIn(DataLocation::Storage) && targetType->isValueType())
489
2
      {
490
        // special case: convert storage reference type to value type - this is only
491
        // possible for library calls where we just forward the storage reference
492
2
        solAssert(_encodeAsLibraryTypes);
493
2
        solAssert(_givenTypes[i]->sizeOnStack() == 1);
494
2
      }
495
132
      else if (
496
132
        _givenTypes[i]->dataStoredIn(DataLocation::Storage) ||
497
132
        _givenTypes[i]->dataStoredIn(DataLocation::CallData) ||
498
132
        _givenTypes[i]->category() == Type::Category::StringLiteral ||
499
132
        _givenTypes[i]->category() == Type::Category::Function
500
132
      )
501
21
        type = _givenTypes[i]; // delay conversion
502
111
      else
503
111
      {
504
111
        convertType(*_givenTypes[i], *targetType, true);
505
111
        needCleanup = false;
506
111
      }
507
508
134
      if (auto arrayType = dynamic_cast<ArrayType const*>(type))
509
2
        ArrayUtils(m_context).copyArrayToMemory(*arrayType, _padToWordBoundaries);
510
132
      else if (auto arraySliceType = dynamic_cast<ArraySliceType const*>(type))
511
0
      {
512
0
        solAssert(
513
0
          arraySliceType->dataStoredIn(DataLocation::CallData) &&
514
0
          arraySliceType->isDynamicallySized() &&
515
0
          !arraySliceType->arrayType().baseType()->isDynamicallyEncoded(),
516
0
          ""
517
0
        );
518
0
        ArrayUtils(m_context).copyArrayToMemory(arraySliceType->arrayType(), _padToWordBoundaries);
519
0
      }
520
132
      else
521
132
        storeInMemoryDynamic(*type, _padToWordBoundaries, needCleanup);
522
134
    }
523
182
    stackPos += _givenTypes[i]->sizeOnStack();
524
182
  }
525
526
  // now copy the dynamic part
527
  // Stack: <v1> <v2> ... <vn> <mem_start> <dyn_head_1> ... <dyn_head_r> <end_of_mem>
528
115
  stackPos = 0;
529
115
  unsigned thisDynPointer = 0;
530
297
  for (size_t i = 0; i < _givenTypes.size(); ++i)
531
182
  {
532
182
    Type const* targetType = targetTypes[i];
533
182
    solAssert(!!targetType, "Externalable type expected.");
534
182
    if (targetType->isDynamicallySized() && !_copyDynamicDataInPlace)
535
48
    {
536
      // copy tail pointer (=mem_end - mem_start) to memory
537
48
      assertThrow(
538
48
        (2 + dynPointers) <= 16,
539
48
        StackTooDeepError,
540
48
        util::stackTooDeepString
541
48
      );
542
48
      m_context << dupInstruction(2 + dynPointers) << Instruction::DUP2;
543
48
      m_context << Instruction::SUB;
544
48
      m_context << dupInstruction(2 + dynPointers - thisDynPointer);
545
48
      m_context << Instruction::MSTORE;
546
      // stack: ... <end_of_mem>
547
48
      if (_givenTypes[i]->category() == Type::Category::StringLiteral)
548
8
      {
549
8
        auto const& strType = dynamic_cast<StringLiteralType const&>(*_givenTypes[i]);
550
8
        auto const size = strType.value().size();
551
8
        m_context << u256(size);
552
8
        storeInMemoryDynamic(*TypeProvider::uint256(), true);
553
        // stack: ... <end_of_mem'>
554
555
        // Do not output empty padding for zero-length strings.
556
        // TODO: handle this in storeInMemoryDynamic
557
8
        if (size != 0)
558
3
          storeInMemoryDynamic(strType, _padToWordBoundaries);
559
8
      }
560
40
      else
561
40
      {
562
40
        ArrayType const* arrayType = nullptr;
563
40
        switch (_givenTypes[i]->category())
564
40
        {
565
39
          case Type::Category::Array:
566
39
            arrayType = dynamic_cast<ArrayType const*>(_givenTypes[i]);
567
39
            break;
568
1
          case Type::Category::ArraySlice:
569
1
            arrayType = &dynamic_cast<ArraySliceType const*>(_givenTypes[i])->arrayType();
570
1
            solAssert(
571
1
              arrayType->isDynamicallySized() &&
572
1
              arrayType->dataStoredIn(DataLocation::CallData) &&
573
1
              !arrayType->baseType()->isDynamicallyEncoded(),
574
1
              ""
575
1
            );
576
1
            break;
577
1
          default:
578
0
            solAssert(false, "Unknown dynamic type.");
579
0
            break;
580
40
        }
581
        // now copy the array
582
40
        copyToStackTop(argSize - stackPos + dynPointers + 2, arrayType->sizeOnStack());
583
        // stack: ... <end_of_mem> <value...>
584
        // copy length to memory
585
40
        m_context << dupInstruction(1 + arrayType->sizeOnStack());
586
40
        ArrayUtils(m_context).retrieveLength(*arrayType, 1);
587
        // stack: ... <end_of_mem> <value...> <end_of_mem'> <length>
588
40
        storeInMemoryDynamic(*TypeProvider::uint256(), true);
589
        // stack: ... <end_of_mem> <value...> <end_of_mem''>
590
        // copy the new memory pointer
591
40
        m_context << swapInstruction(arrayType->sizeOnStack() + 1) << Instruction::POP;
592
        // stack: ... <end_of_mem''> <value...>
593
        // copy data part
594
40
        ArrayUtils(m_context).copyArrayToMemory(*arrayType, _padToWordBoundaries);
595
        // stack: ... <end_of_mem'''>
596
40
      }
597
598
48
      thisDynPointer++;
599
48
    }
600
182
    stackPos += _givenTypes[i]->sizeOnStack();
601
182
  }
602
603
  // remove unneeded stack elements (and retain memory pointer)
604
115
  m_context << swapInstruction(argSize + dynPointers + 1);
605
115
  popStackSlots(argSize + dynPointers + 1);
606
115
}
607
608
void CompilerUtils::abiEncodeV2(
609
  TypePointers const& _givenTypes,
610
  TypePointers const& _targetTypes,
611
  bool _encodeAsLibraryTypes,
612
  bool _padToWordBoundaries
613
)
614
11.8k
{
615
11.8k
  if (!_padToWordBoundaries)
616
11.8k
    solAssert(!_encodeAsLibraryTypes, "Library calls cannot be packed.");
617
618
  // stack: <$value0> <$value1> ... <$value(n-1)> <$headStart>
619
620
11.8k
  std::string encoderName =
621
11.8k
    _padToWordBoundaries ?
622
10.7k
    m_context.abiFunctions().tupleEncoderReversed(_givenTypes, _targetTypes, _encodeAsLibraryTypes) :
623
11.8k
    m_context.abiFunctions().tupleEncoderPackedReversed(_givenTypes, _targetTypes);
624
11.8k
  m_context.callYulFunction(encoderName, sizeOnStack(_givenTypes) + 1, 1);
625
11.8k
}
626
627
void CompilerUtils::abiDecodeV2(TypePointers const& _parameterTypes, bool _fromMemory)
628
11.9k
{
629
  // stack: <source_offset> <length> [stack top]
630
11.9k
  m_context << Instruction::DUP2 << Instruction::ADD;
631
11.9k
  m_context << Instruction::SWAP1;
632
  // stack: <end> <start>
633
11.9k
  std::string decoderName = m_context.abiFunctions().tupleDecoder(_parameterTypes, _fromMemory);
634
11.9k
  m_context.callYulFunction(decoderName, 2, sizeOnStack(_parameterTypes));
635
11.9k
}
636
637
void CompilerUtils::zeroInitialiseMemoryArray(ArrayType const& _type)
638
2.58k
{
639
2.58k
  if (_type.baseType()->hasSimpleZeroValueInMemory())
640
1.24k
  {
641
1.24k
    solAssert(_type.baseType()->isValueType());
642
1.24k
    Whiskers templ(R"({
643
1.24k
      let size := mul(length, <element_size>)
644
1.24k
      // cheap way of zero-initializing a memory range
645
1.24k
      calldatacopy(memptr, calldatasize(), size)
646
1.24k
      memptr := add(memptr, size)
647
1.24k
    })");
648
1.24k
    templ("element_size", std::to_string(_type.memoryStride()));
649
1.24k
    m_context.appendInlineAssembly(templ.render(), {"length", "memptr"});
650
1.24k
  }
651
1.34k
  else
652
1.34k
  {
653
1.34k
    auto repeat = m_context.newTag();
654
1.34k
    m_context << repeat;
655
1.34k
    pushZeroValue(*_type.baseType());
656
1.34k
    storeInMemoryDynamic(*_type.baseType());
657
1.34k
    m_context << Instruction::SWAP1 << u256(1) << Instruction::SWAP1;
658
1.34k
    m_context << Instruction::SUB << Instruction::SWAP1;
659
1.34k
    m_context << Instruction::DUP2;
660
1.34k
    m_context.appendConditionalJumpTo(repeat);
661
1.34k
  }
662
2.58k
  m_context << Instruction::SWAP1 << Instruction::POP;
663
2.58k
}
664
665
void CompilerUtils::memoryCopy32()
666
34
{
667
  // Stack here: size target source
668
669
34
  m_context.appendInlineAssembly(R"(
670
34
    {
671
34
      for { let i := 0 } lt(i, len) { i := add(i, 32) } {
672
34
        mstore(add(dst, i), mload(add(src, i)))
673
34
      }
674
34
    }
675
34
  )",
676
34
    { "len", "dst", "src" }
677
34
  );
678
34
  m_context << Instruction::POP << Instruction::POP << Instruction::POP;
679
34
}
680
681
void CompilerUtils::memoryCopy()
682
0
{
683
  // Stack here: size target source
684
685
0
  m_context.appendInlineAssembly(R"(
686
0
    {
687
0
      // copy 32 bytes at once
688
0
      for
689
0
        {}
690
0
        iszero(lt(len, 32))
691
0
        {
692
0
          dst := add(dst, 32)
693
0
          src := add(src, 32)
694
0
          len := sub(len, 32)
695
0
        }
696
0
        { mstore(dst, mload(src)) }
697
0
698
0
      // copy the remainder (0 < len < 32)
699
0
      let mask := sub(exp(256, sub(32, len)), 1)
700
0
      let srcpart := and(mload(src), not(mask))
701
0
      let dstpart := and(mload(dst), mask)
702
0
      mstore(dst, or(srcpart, dstpart))
703
0
    }
704
0
  )",
705
0
    { "len", "dst", "src" }
706
0
  );
707
0
  m_context << Instruction::POP << Instruction::POP << Instruction::POP;
708
0
}
709
710
void CompilerUtils::splitExternalFunctionType(bool _leftAligned)
711
87
{
712
  // We have to split the left-aligned <address><function identifier> into two stack slots:
713
  // address (right aligned), function identifier (right aligned)
714
87
  if (_leftAligned)
715
41
  {
716
41
    m_context << Instruction::DUP1;
717
41
    rightShiftNumberOnStack(64 + 32);
718
    // <input> <address>
719
41
    m_context << Instruction::SWAP1;
720
41
    rightShiftNumberOnStack(64);
721
41
  }
722
46
  else
723
46
  {
724
46
    m_context << Instruction::DUP1;
725
46
    rightShiftNumberOnStack(32);
726
46
    m_context << ((u256(1) << 160) - 1) << Instruction::AND << Instruction::SWAP1;
727
46
  }
728
87
  m_context << u256(0xffffffffUL) << Instruction::AND;
729
87
}
730
731
void CompilerUtils::combineExternalFunctionType(bool _leftAligned)
732
71
{
733
  // <address> <function_id>
734
71
  m_context << u256(0xffffffffUL) << Instruction::AND << Instruction::SWAP1;
735
71
  if (!_leftAligned)
736
22
    m_context << ((u256(1) << 160) - 1) << Instruction::AND;
737
71
  leftShiftNumberOnStack(32);
738
71
  m_context << Instruction::OR;
739
71
  if (_leftAligned)
740
49
    leftShiftNumberOnStack(64);
741
71
}
742
743
void CompilerUtils::pushCombinedFunctionEntryLabel(Declaration const& _function, bool _runtimeOnly)
744
10.7k
{
745
10.7k
  m_context << m_context.functionEntryLabel(_function).pushTag();
746
  // If there is a runtime context, we have to merge both labels into the same
747
  // stack slot in case we store it in storage.
748
10.7k
  if (CompilerContext* rtc = m_context.runtimeContext())
749
251
  {
750
251
    leftShiftNumberOnStack(32);
751
251
    if (_runtimeOnly)
752
52
      m_context <<
753
52
        rtc->functionEntryLabel(_function).toSubAssemblyTag(m_context.runtimeSub()) <<
754
52
        Instruction::OR;
755
251
  }
756
10.7k
}
757
758
void CompilerUtils::convertType(
759
  Type const& _typeOnStack,
760
  Type const& _targetType,
761
  bool _cleanupNeeded,
762
  bool _chopSignBits,
763
  bool _asPartOfArgumentDecoding
764
)
765
510k
{
766
  // For a type extension, we need to remove all higher-order bits that we might have ignored in
767
  // previous operations.
768
  // @todo: store in the AST whether the operand might have "dirty" higher order bits
769
770
510k
  if (_typeOnStack == _targetType && !_cleanupNeeded)
771
61.4k
    return;
772
448k
  Type::Category stackTypeCategory = _typeOnStack.category();
773
448k
  Type::Category targetTypeCategory = _targetType.category();
774
775
448k
  if (stackTypeCategory == Type::Category::UserDefinedValueType)
776
76
  {
777
76
    solAssert(_cleanupNeeded);
778
76
    auto& userDefined = dynamic_cast<UserDefinedValueType const&>(_typeOnStack);
779
76
    solAssert(_typeOnStack == _targetType || _targetType == userDefined.underlyingType());
780
76
    return convertType(
781
76
      userDefined.underlyingType(),
782
76
      _targetType,
783
76
      _cleanupNeeded,
784
76
      _chopSignBits,
785
76
      _asPartOfArgumentDecoding
786
76
    );
787
76
  }
788
448k
  if (targetTypeCategory == Type::Category::UserDefinedValueType)
789
76
  {
790
76
    solAssert(_cleanupNeeded);
791
76
    auto& userDefined = dynamic_cast<UserDefinedValueType const&>(_targetType);
792
76
    solAssert(_typeOnStack.isImplicitlyConvertibleTo(userDefined.underlyingType()));
793
76
    return convertType(
794
76
      _typeOnStack,
795
76
      userDefined.underlyingType(),
796
76
      _cleanupNeeded,
797
76
      _chopSignBits,
798
76
      _asPartOfArgumentDecoding
799
76
    );
800
76
  }
801
802
448k
  if (auto contrType = dynamic_cast<ContractType const*>(&_typeOnStack))
803
448k
    solAssert(!contrType->isSuper(), "Cannot convert magic variable \"super\"");
804
805
448k
  bool enumOverflowCheckPending = (targetTypeCategory == Type::Category::Enum || stackTypeCategory == Type::Category::Enum);
806
448k
  bool chopSignBitsPending = _chopSignBits && targetTypeCategory == Type::Category::Integer;
807
448k
  if (chopSignBitsPending)
808
3.32k
  {
809
3.32k
    IntegerType const& targetIntegerType = dynamic_cast<IntegerType const&>(_targetType);
810
3.32k
    chopSignBitsPending = targetIntegerType.isSigned();
811
3.32k
  }
812
813
448k
  if (targetTypeCategory == Type::Category::FixedPoint)
814
448k
    solUnimplemented("Not yet implemented - FixedPointType.");
815
816
448k
  switch (stackTypeCategory)
817
448k
  {
818
9.75k
  case Type::Category::FixedBytes:
819
9.75k
  {
820
9.75k
    FixedBytesType const& typeOnStack = dynamic_cast<FixedBytesType const&>(_typeOnStack);
821
9.75k
    if (targetTypeCategory == Type::Category::Integer)
822
127
    {
823
      // conversion from bytes to integer. no need to clean the high bit
824
      // only to shift right because of opposite alignment
825
127
      IntegerType const& targetIntegerType = dynamic_cast<IntegerType const&>(_targetType);
826
127
      rightShiftNumberOnStack(256 - typeOnStack.numBytes() * 8);
827
127
      if (targetIntegerType.numBits() < typeOnStack.numBytes() * 8)
828
0
        convertType(IntegerType(typeOnStack.numBytes() * 8), _targetType, _cleanupNeeded);
829
127
    }
830
9.63k
    else if (targetTypeCategory == Type::Category::Address)
831
12
    {
832
12
      solAssert(typeOnStack.numBytes() * 8 == 160);
833
12
      rightShiftNumberOnStack(256 - 160);
834
12
    }
835
9.61k
    else
836
9.61k
    {
837
      // clear for conversion to longer bytes
838
9.61k
      solAssert(targetTypeCategory == Type::Category::FixedBytes, "Invalid type conversion requested.");
839
9.61k
      FixedBytesType const& targetType = dynamic_cast<FixedBytesType const&>(_targetType);
840
9.61k
      if (typeOnStack.numBytes() == 0 || targetType.numBytes() == 0)
841
0
        m_context << Instruction::POP << u256(0);
842
9.61k
      else if (targetType.numBytes() > typeOnStack.numBytes() || _cleanupNeeded)
843
9.60k
      {
844
9.60k
        unsigned bytes = std::min(typeOnStack.numBytes(), targetType.numBytes());
845
9.60k
        m_context << ((u256(1) << (256 - bytes * 8)) - 1);
846
9.60k
        m_context << Instruction::NOT << Instruction::AND;
847
9.60k
      }
848
9.61k
    }
849
9.75k
    break;
850
9.75k
  }
851
9.75k
  case Type::Category::Enum:
852
128
    solAssert(_targetType == _typeOnStack || targetTypeCategory == Type::Category::Integer);
853
128
    if (enumOverflowCheckPending)
854
128
    {
855
128
      EnumType const& enumType = dynamic_cast<decltype(enumType)>(_typeOnStack);
856
128
      solAssert(enumType.numberOfMembers() > 0, "empty enum should have caused a parser error.");
857
128
      m_context << u256(enumType.numberOfMembers() - 1) << Instruction::DUP2 << Instruction::GT;
858
128
      if (_asPartOfArgumentDecoding)
859
2
        m_context.appendConditionalRevert(false, "Enum out of range");
860
126
      else
861
126
        m_context.appendConditionalPanic(util::PanicCode::EnumConversionError);
862
128
      enumOverflowCheckPending = false;
863
128
    }
864
128
    break;
865
128
  case Type::Category::FixedPoint:
866
0
    solUnimplemented("Not yet implemented - FixedPointType.");
867
14.1k
  case Type::Category::Address:
868
132k
  case Type::Category::Integer:
869
136k
  case Type::Category::Contract:
870
374k
  case Type::Category::RationalNumber:
871
374k
    if (targetTypeCategory == Type::Category::FixedBytes)
872
521
    {
873
521
      solAssert(
874
521
        stackTypeCategory == Type::Category::Address ||
875
521
        stackTypeCategory == Type::Category::Integer ||
876
521
        stackTypeCategory == Type::Category::RationalNumber,
877
521
        "Invalid conversion to FixedBytesType requested."
878
521
      );
879
      // conversion from bytes to string. no need to clean the high bit
880
      // only to shift left because of opposite alignment
881
521
      FixedBytesType const& targetBytesType = dynamic_cast<FixedBytesType const&>(_targetType);
882
521
      if (auto typeOnStack = dynamic_cast<IntegerType const*>(&_typeOnStack))
883
125
      {
884
125
        if (targetBytesType.numBytes() * 8 > typeOnStack->numBits())
885
0
          cleanHigherOrderBits(*typeOnStack);
886
125
      }
887
396
      else if (stackTypeCategory == Type::Category::Address)
888
396
        solAssert(targetBytesType.numBytes() * 8 == 160);
889
521
      leftShiftNumberOnStack(256 - targetBytesType.numBytes() * 8);
890
521
    }
891
374k
    else if (targetTypeCategory == Type::Category::Enum)
892
13
    {
893
13
      solAssert(stackTypeCategory != Type::Category::Address, "Invalid conversion to EnumType requested.");
894
13
      solAssert(_typeOnStack.mobileType());
895
      // just clean
896
13
      convertType(_typeOnStack, *_typeOnStack.mobileType(), true);
897
13
      EnumType const& enumType = dynamic_cast<decltype(enumType)>(_targetType);
898
13
      solAssert(enumType.numberOfMembers() > 0, "empty enum should have caused a parser error.");
899
13
      m_context << u256(enumType.numberOfMembers() - 1) << Instruction::DUP2 << Instruction::GT;
900
13
      m_context.appendConditionalPanic(util::PanicCode::EnumConversionError);
901
13
      enumOverflowCheckPending = false;
902
13
    }
903
374k
    else if (targetTypeCategory == Type::Category::FixedPoint)
904
0
    {
905
0
      solAssert(
906
0
        stackTypeCategory == Type::Category::Integer ||
907
0
        stackTypeCategory == Type::Category::RationalNumber ||
908
0
        stackTypeCategory == Type::Category::FixedPoint,
909
0
        "Invalid conversion to FixedMxNType requested."
910
0
      );
911
      //shift all integer bits onto the left side of the fixed type
912
0
      FixedPointType const& targetFixedPointType = dynamic_cast<FixedPointType const&>(_targetType);
913
0
      if (auto typeOnStack = dynamic_cast<IntegerType const*>(&_typeOnStack))
914
0
        if (targetFixedPointType.numBits() > typeOnStack->numBits())
915
0
          cleanHigherOrderBits(*typeOnStack);
916
0
      solUnimplemented("Not yet implemented - FixedPointType.");
917
0
    }
918
374k
    else
919
374k
    {
920
374k
      solAssert(
921
374k
        targetTypeCategory == Type::Category::Integer ||
922
374k
        targetTypeCategory == Type::Category::Contract ||
923
374k
        targetTypeCategory == Type::Category::Address,
924
374k
        ""
925
374k
      );
926
374k
      IntegerType addressType(160);
927
374k
      IntegerType const& targetType = targetTypeCategory == Type::Category::Integer
928
374k
        ? dynamic_cast<IntegerType const&>(_targetType) : addressType;
929
374k
      if (stackTypeCategory == Type::Category::RationalNumber)
930
237k
      {
931
237k
        RationalNumberType const& constType = dynamic_cast<RationalNumberType const&>(_typeOnStack);
932
        // We know that the stack is clean, we only have to clean for a narrowing conversion
933
        // where cleanup is forced.
934
237k
        solUnimplementedAssert(!constType.isFractional(), "Not yet implemented - FixedPointType.");
935
237k
        if (targetType.numBits() < constType.integerType()->numBits() && _cleanupNeeded)
936
0
          cleanHigherOrderBits(targetType);
937
237k
      }
938
136k
      else
939
136k
      {
940
136k
        IntegerType const& typeOnStack = stackTypeCategory == Type::Category::Integer
941
136k
          ? dynamic_cast<IntegerType const&>(_typeOnStack) : addressType;
942
        // Widening: clean up according to source type width
943
        // Non-widening and force: clean up according to target type bits
944
136k
        if (targetType.numBits() > typeOnStack.numBits())
945
657
          cleanHigherOrderBits(typeOnStack);
946
135k
        else if (_cleanupNeeded)
947
134k
          cleanHigherOrderBits(targetType);
948
136k
        if (chopSignBitsPending)
949
1.51k
        {
950
1.51k
          if (targetType.numBits() < 256)
951
1.51k
            m_context
952
1.51k
              << ((u256(1) << targetType.numBits()) - 1)
953
1.51k
              << Instruction::AND;
954
1.51k
          chopSignBitsPending = false;
955
1.51k
        }
956
136k
      }
957
374k
    }
958
374k
    break;
959
374k
  case Type::Category::StringLiteral:
960
24.6k
  {
961
24.6k
    auto const& literalType = dynamic_cast<StringLiteralType const&>(_typeOnStack);
962
24.6k
    std::string const& value = literalType.value();
963
24.6k
    bytesConstRef data(value);
964
24.6k
    if (targetTypeCategory == Type::Category::FixedBytes)
965
7.42k
    {
966
7.42k
      unsigned const numBytes = dynamic_cast<FixedBytesType const&>(_targetType).numBytes();
967
7.42k
      solAssert(data.size() <= 32);
968
7.42k
      m_context << (u256(h256(data, h256::AlignLeft)) & (~(u256(-1) >> (8 * numBytes))));
969
7.42k
    }
970
17.2k
    else if (targetTypeCategory == Type::Category::Array)
971
17.2k
    {
972
17.2k
      auto const& arrayType = dynamic_cast<ArrayType const&>(_targetType);
973
17.2k
      solAssert(arrayType.isByteArrayOrString());
974
17.2k
      size_t storageSize = 32 + ((data.size() + 31) / 32) * 32;
975
17.2k
      allocateMemory(storageSize);
976
      // stack: mempos
977
17.2k
      m_context << Instruction::DUP1 << u256(data.size());
978
17.2k
      storeInMemoryDynamic(*TypeProvider::uint256());
979
      // stack: mempos datapos
980
17.2k
      storeStringData(data);
981
17.2k
    }
982
0
    else
983
17.2k
      solAssert(
984
24.6k
        false,
985
24.6k
        "Invalid conversion from string literal to " + _targetType.toString(false) + " requested."
986
24.6k
      );
987
24.6k
    break;
988
24.6k
  }
989
24.6k
  case Type::Category::Array:
990
6.57k
  {
991
6.57k
    auto const& typeOnStack = dynamic_cast<ArrayType const&>(_typeOnStack);
992
6.57k
    if (_targetType.category() == Type::Category::FixedBytes)
993
12
    {
994
12
      solAssert(
995
12
        typeOnStack.isByteArray(),
996
12
        "Array types other than bytes not convertible to bytesNN."
997
12
      );
998
12
      solAssert(typeOnStack.isDynamicallySized());
999
1000
12
      bool fromCalldata = typeOnStack.dataStoredIn(DataLocation::CallData);
1001
12
      solAssert(typeOnStack.sizeOnStack() == (fromCalldata ? 2 : 1));
1002
12
      if (fromCalldata)
1003
2
        m_context << Instruction::SWAP1;
1004
1005
12
      m_context.callYulFunction(
1006
12
        m_context.utilFunctions().bytesToFixedBytesConversionFunction(
1007
12
          typeOnStack,
1008
12
          dynamic_cast<FixedBytesType const &>(_targetType)
1009
12
        ),
1010
12
        typeOnStack.sizeOnStack(),
1011
12
        1
1012
12
      );
1013
12
      break;
1014
12
    }
1015
6.56k
    solAssert(targetTypeCategory == stackTypeCategory);
1016
6.56k
    auto const& targetType = dynamic_cast<ArrayType const&>(_targetType);
1017
6.56k
    switch (targetType.location())
1018
6.56k
    {
1019
3.55k
    case DataLocation::Storage:
1020
      // Other cases are done explicitly in LValue::storeValue, and only possible by assignment.
1021
3.55k
      solAssert(
1022
3.55k
        (targetType.isPointer() || (typeOnStack.isByteArrayOrString() && targetType.isByteArrayOrString())) &&
1023
3.55k
        typeOnStack.location() == DataLocation::Storage,
1024
3.55k
        "Invalid conversion to storage type."
1025
3.55k
      );
1026
3.55k
      break;
1027
3.55k
    case DataLocation::Transient:
1028
0
      solUnimplemented("Transient data location is only supported for value types.");
1029
0
      break;
1030
2.99k
    case DataLocation::Memory:
1031
2.99k
    {
1032
      // Copy the array to a free position in memory, unless it is already in memory.
1033
2.99k
      if (typeOnStack.location() != DataLocation::Memory)
1034
2.92k
      {
1035
2.92k
        if (
1036
2.92k
          typeOnStack.dataStoredIn(DataLocation::CallData) &&
1037
2.92k
          typeOnStack.baseType()->isDynamicallyEncoded()
1038
2.92k
        )
1039
22
        {
1040
22
          solAssert(m_context.useABICoderV2());
1041
          // stack: offset length(optional in case of dynamically sized array)
1042
22
          solAssert(typeOnStack.sizeOnStack() == (typeOnStack.isDynamicallySized() ? 2 : 1));
1043
22
          if (typeOnStack.isDynamicallySized())
1044
5
            m_context << Instruction::SWAP1;
1045
1046
22
          m_context.callYulFunction(
1047
22
            m_context.utilFunctions().conversionFunction(typeOnStack, targetType),
1048
22
            typeOnStack.isDynamicallySized() ? 2 : 1,
1049
22
            1
1050
22
          );
1051
22
        }
1052
2.90k
        else
1053
2.90k
        {
1054
          // stack: <source ref> (variably sized)
1055
2.90k
          unsigned stackSize = typeOnStack.sizeOnStack();
1056
2.90k
          ArrayUtils(m_context).retrieveLength(typeOnStack);
1057
1058
          // allocate memory
1059
          // stack: <source ref> (variably sized) <length>
1060
2.90k
          m_context << Instruction::DUP1;
1061
2.90k
          ArrayUtils(m_context).convertLengthToSize(targetType, true);
1062
          // stack: <source ref> (variably sized) <length> <size>
1063
2.90k
          if (targetType.isDynamicallySized())
1064
2.55k
            m_context << u256(0x20) << Instruction::ADD;
1065
2.90k
          allocateMemory();
1066
          // stack: <source ref> (variably sized) <length> <mem start>
1067
2.90k
          m_context << Instruction::DUP1;
1068
2.90k
          moveIntoStack(2 + stackSize);
1069
2.90k
          if (targetType.isDynamicallySized())
1070
2.55k
          {
1071
2.55k
            m_context << Instruction::DUP2;
1072
2.55k
            storeInMemoryDynamic(*TypeProvider::uint256());
1073
2.55k
          }
1074
          // stack: <mem start> <source ref> (variably sized) <length> <mem data pos>
1075
2.90k
          if (targetType.baseType()->isValueType())
1076
2.37k
          {
1077
2.37k
            copyToStackTop(2 + stackSize, stackSize);
1078
2.37k
            ArrayUtils(m_context).copyArrayToMemory(typeOnStack);
1079
2.37k
          }
1080
530
          else
1081
530
          {
1082
530
            m_context << u256(0) << Instruction::SWAP1;
1083
            // stack: <mem start> <source ref> (variably sized) <length> <counter> <mem data pos>
1084
530
            auto repeat = m_context.newTag();
1085
530
            m_context << repeat;
1086
530
            m_context << Instruction::DUP3 << Instruction::DUP3;
1087
530
            m_context << Instruction::LT << Instruction::ISZERO;
1088
530
            auto loopEnd = m_context.appendConditionalJump();
1089
530
            copyToStackTop(3 + stackSize, stackSize);
1090
530
            copyToStackTop(2 + stackSize, 1);
1091
530
            ArrayUtils(m_context).accessIndex(typeOnStack, false);
1092
530
            if (typeOnStack.location() == DataLocation::Storage)
1093
524
              StorageItem(m_context, *typeOnStack.baseType()).retrieveValue(SourceLocation(), true);
1094
530
            convertType(*typeOnStack.baseType(), *targetType.baseType(), _cleanupNeeded);
1095
530
            storeInMemoryDynamic(*targetType.baseType(), true);
1096
530
            m_context << Instruction::SWAP1 << u256(1) << Instruction::ADD;
1097
530
            m_context << Instruction::SWAP1;
1098
530
            m_context.appendJumpTo(repeat);
1099
530
            m_context << loopEnd;
1100
530
            m_context << Instruction::POP;
1101
530
          }
1102
          // stack: <mem start> <source ref> (variably sized) <length> <mem data pos updated>
1103
2.90k
          popStackSlots(2 + stackSize);
1104
          // Stack: <mem start>
1105
2.90k
        }
1106
2.92k
      }
1107
2.99k
      break;
1108
2.99k
    }
1109
2.99k
    case DataLocation::CallData:
1110
17
      solAssert(
1111
17
        ((targetType.isByteArrayOrString() && typeOnStack.isByteArrayOrString()) || _typeOnStack == _targetType) &&
1112
17
        typeOnStack.location() == DataLocation::CallData,
1113
17
        "Invalid conversion to calldata type."
1114
17
      );
1115
17
      break;
1116
6.56k
    }
1117
6.56k
    break;
1118
6.56k
  }
1119
6.56k
  case Type::Category::ArraySlice:
1120
60
  {
1121
60
    auto& typeOnStack = dynamic_cast<ArraySliceType const&>(_typeOnStack);
1122
60
    if (_targetType.category() == Type::Category::FixedBytes)
1123
7
    {
1124
7
      solAssert(
1125
7
        typeOnStack.arrayType().isByteArray(),
1126
7
        "Array types other than bytes not convertible to bytesNN."
1127
7
      );
1128
7
      solAssert(typeOnStack.isDynamicallySized());
1129
7
      solAssert(typeOnStack.dataStoredIn(DataLocation::CallData));
1130
7
      solAssert(typeOnStack.sizeOnStack() == 2);
1131
1132
7
      m_context << Instruction::SWAP1;
1133
7
      m_context.callYulFunction(
1134
7
        m_context.utilFunctions().bytesToFixedBytesConversionFunction(
1135
7
          typeOnStack.arrayType(),
1136
7
          dynamic_cast<FixedBytesType const &>(_targetType)
1137
7
        ),
1138
7
        2,
1139
7
        1
1140
7
      );
1141
7
      break;
1142
7
    }
1143
1144
53
    solAssert(_targetType.category() == Type::Category::Array);
1145
53
    auto const& targetArrayType = dynamic_cast<ArrayType const&>(_targetType);
1146
53
    solAssert(
1147
53
      typeOnStack.arrayType().isImplicitlyConvertibleTo(targetArrayType) ||
1148
53
      (typeOnStack.arrayType().isByteArrayOrString() && targetArrayType.isByteArrayOrString())
1149
53
    );
1150
53
    solAssert(
1151
53
      typeOnStack.arrayType().dataStoredIn(DataLocation::CallData) &&
1152
53
      typeOnStack.arrayType().isDynamicallySized() &&
1153
53
      !typeOnStack.arrayType().baseType()->isDynamicallyEncoded()
1154
53
    );
1155
53
    if (!_targetType.dataStoredIn(DataLocation::CallData))
1156
2
      return convertType(typeOnStack.arrayType(), _targetType);
1157
51
    break;
1158
53
  }
1159
1.17k
  case Type::Category::Struct:
1160
1.17k
  {
1161
1.17k
    solAssert(targetTypeCategory == stackTypeCategory);
1162
1.17k
    auto& targetType = dynamic_cast<StructType const&>(_targetType);
1163
1.17k
    auto& typeOnStack = dynamic_cast<StructType const&>(_typeOnStack);
1164
1.17k
    switch (targetType.location())
1165
1.17k
    {
1166
74
    case DataLocation::Storage:
1167
      // Other cases are done explicitly in LValue::storeValue, and only possible by assignment.
1168
74
      solAssert(
1169
74
        targetType.isPointer() &&
1170
74
        typeOnStack.location() == DataLocation::Storage,
1171
74
        "Invalid conversion to storage type."
1172
74
      );
1173
74
      break;
1174
74
    case DataLocation::Transient:
1175
0
      solUnimplemented("Transient data location is only supported for value types.");
1176
0
      break;
1177
1.10k
    case DataLocation::Memory:
1178
      // Copy the array to a free position in memory, unless it is already in memory.
1179
1.10k
      switch (typeOnStack.location())
1180
1.10k
      {
1181
1.05k
      case DataLocation::Storage:
1182
1.05k
      {
1183
1.05k
        auto conversionImpl =
1184
1.05k
          [typeOnStack = &typeOnStack, targetType = &targetType](CompilerContext& _context)
1185
1.05k
        {
1186
1.05k
          CompilerUtils utils(_context);
1187
          // stack: <source ref>
1188
1.05k
          utils.allocateMemory(typeOnStack->memoryDataSize());
1189
1.05k
          _context << Instruction::SWAP1 << Instruction::DUP2;
1190
          // stack: <memory ptr> <source ref> <memory ptr>
1191
1.05k
          for (auto const& member: typeOnStack->members(nullptr))
1192
2.12k
          {
1193
2.12k
            solAssert(!member.type->containsNestedMapping());
1194
2.12k
            std::pair<u256, unsigned> const& offsets = typeOnStack->storageOffsetsOfMember(member.name);
1195
2.12k
            _context << offsets.first << Instruction::DUP3 << Instruction::ADD;
1196
2.12k
            _context << u256(offsets.second);
1197
2.12k
            StorageItem(_context, *member.type).retrieveValue(SourceLocation(), true);
1198
2.12k
            Type const* targetMemberType = targetType->memberType(member.name);
1199
2.12k
            solAssert(!!targetMemberType, "Member not found in target type.");
1200
2.12k
            utils.convertType(*member.type, *targetMemberType, true);
1201
2.12k
            utils.storeInMemoryDynamic(*targetMemberType, true);
1202
2.12k
          }
1203
1.05k
          _context << Instruction::POP << Instruction::POP;
1204
1.05k
        };
1205
1.05k
        if (typeOnStack.recursive())
1206
0
          m_context.callLowLevelFunction(
1207
0
            "$convertRecursiveArrayStorageToMemory_" + typeOnStack.identifier() + "_to_" + targetType.identifier(),
1208
0
            1,
1209
0
            1,
1210
0
            conversionImpl
1211
0
          );
1212
1.05k
        else
1213
1.05k
          conversionImpl(m_context);
1214
1.05k
        break;
1215
0
      }
1216
0
      case DataLocation::Transient:
1217
0
        solUnimplemented("Transient data location is only supported for value types.");
1218
0
        break;
1219
23
      case DataLocation::CallData:
1220
23
      {
1221
23
        if (typeOnStack.isDynamicallyEncoded())
1222
14
        {
1223
14
          solAssert(m_context.useABICoderV2());
1224
14
          m_context.callYulFunction(
1225
14
            m_context.utilFunctions().conversionFunction(typeOnStack, targetType),
1226
14
            1,
1227
14
            1
1228
14
          );
1229
14
        }
1230
9
        else
1231
9
        {
1232
9
          m_context << Instruction::DUP1;
1233
9
          m_context << Instruction::CALLDATASIZE;
1234
9
          m_context << Instruction::SUB;
1235
9
          abiDecode({&targetType}, false);
1236
9
        }
1237
23
        break;
1238
23
      }
1239
24
      case DataLocation::Memory:
1240
        // nothing to do
1241
24
        break;
1242
1.10k
      }
1243
1.10k
      break;
1244
1.10k
    case DataLocation::CallData:
1245
4
      solAssert(_typeOnStack == _targetType);
1246
      // nothing to do
1247
4
      break;
1248
1.17k
    }
1249
1.17k
    break;
1250
1.17k
  }
1251
1.17k
  case Type::Category::Tuple:
1252
1.01k
  {
1253
1.01k
    TupleType const& sourceTuple = dynamic_cast<TupleType const&>(_typeOnStack);
1254
1.01k
    TupleType const& targetTuple = dynamic_cast<TupleType const&>(_targetType);
1255
1.01k
    solAssert(targetTuple.components().size() == sourceTuple.components().size());
1256
1.01k
    unsigned depth = sourceTuple.sizeOnStack();
1257
3.16k
    for (size_t i = 0; i < sourceTuple.components().size(); ++i)
1258
2.14k
    {
1259
2.14k
      Type const* sourceType = sourceTuple.components()[i];
1260
2.14k
      Type const* targetType = targetTuple.components()[i];
1261
2.14k
      if (!sourceType)
1262
0
      {
1263
0
        solAssert(!targetType);
1264
0
        continue;
1265
0
      }
1266
2.14k
      unsigned sourceSize = sourceType->sizeOnStack();
1267
2.14k
      unsigned targetSize = targetType ? targetType->sizeOnStack() : 0;
1268
2.14k
      if (!targetType || *sourceType != *targetType || _cleanupNeeded)
1269
1.09k
      {
1270
1.09k
        if (targetType)
1271
742
        {
1272
742
          if (sourceSize > 0)
1273
714
            copyToStackTop(depth, sourceSize);
1274
742
          convertType(*sourceType, *targetType, _cleanupNeeded);
1275
742
        }
1276
1.09k
        if (sourceSize > 0 || targetSize > 0)
1277
1.09k
        {
1278
          // Move it back into its place.
1279
1.80k
          for (unsigned j = 0; j < std::min(sourceSize, targetSize); ++j)
1280
716
            m_context <<
1281
716
              swapInstruction(depth + targetSize - sourceSize) <<
1282
716
              Instruction::POP;
1283
          // Value shrank
1284
1.45k
          for (unsigned j = targetSize; j < sourceSize; ++j)
1285
360
          {
1286
360
            moveToStackTop(depth + targetSize - sourceSize, 1);
1287
360
            m_context << Instruction::POP;
1288
360
          }
1289
          // Value grew
1290
1.09k
          if (targetSize > sourceSize)
1291
28
            moveIntoStack(depth - sourceSize, targetSize - sourceSize);
1292
1.09k
        }
1293
1.09k
      }
1294
2.14k
      depth -= sourceSize;
1295
2.14k
    }
1296
1.01k
    break;
1297
1.01k
  }
1298
29.8k
  case Type::Category::Bool:
1299
29.8k
    solAssert(_targetType == _typeOnStack, "Invalid conversion for bool.");
1300
29.8k
    if (_cleanupNeeded)
1301
29.8k
      m_context << Instruction::ISZERO << Instruction::ISZERO;
1302
29.8k
    break;
1303
152
  default:
1304
    // we used to allow conversions from function to address
1305
152
    solAssert(!(stackTypeCategory == Type::Category::Function && targetTypeCategory == Type::Category::Address));
1306
152
    if (stackTypeCategory == Type::Category::Function && targetTypeCategory == Type::Category::Function)
1307
150
    {
1308
150
      FunctionType const& typeOnStack = dynamic_cast<FunctionType const&>(_typeOnStack);
1309
150
      FunctionType const& targetType = dynamic_cast<FunctionType const&>(_targetType);
1310
150
      solAssert(
1311
150
        typeOnStack.isImplicitlyConvertibleTo(targetType) &&
1312
150
        typeOnStack.sizeOnStack() == targetType.sizeOnStack() &&
1313
150
        (typeOnStack.kind() == FunctionType::Kind::Internal || typeOnStack.kind() == FunctionType::Kind::External) &&
1314
150
        typeOnStack.kind() == targetType.kind(),
1315
150
        "Invalid function type conversion requested."
1316
150
      );
1317
150
    }
1318
2
    else
1319
      // All other types should not be convertible to non-equal types.
1320
152
      solAssert(_typeOnStack == _targetType, "Invalid type conversion requested.");
1321
1322
152
    if (_cleanupNeeded && _targetType.canBeStored() && _targetType.storageBytes() < 32)
1323
118
      m_context
1324
118
        << ((u256(1) << (8 * _targetType.storageBytes())) - 1)
1325
118
        << Instruction::AND;
1326
152
    break;
1327
448k
  }
1328
1329
448k
  solAssert(!enumOverflowCheckPending, "enum overflow checking missing.");
1330
448k
  solAssert(!chopSignBitsPending, "forgot to chop the sign bits.");
1331
448k
}
1332
1333
void CompilerUtils::pushZeroValue(Type const& _type)
1334
24.8k
{
1335
24.8k
  if (auto const* funType = dynamic_cast<FunctionType const*>(&_type))
1336
310
  {
1337
310
    if (funType->kind() == FunctionType::Kind::Internal)
1338
158
    {
1339
158
      m_context << m_context.lowLevelFunctionTag("$invalidFunction", 0, 0, [](CompilerContext& _context) {
1340
83
        _context.appendPanic(util::PanicCode::InvalidInternalFunction);
1341
83
      });
1342
158
      if (CompilerContext* runCon = m_context.runtimeContext())
1343
39
      {
1344
39
        leftShiftNumberOnStack(32);
1345
39
        m_context << runCon->lowLevelFunctionTag("$invalidFunction", 0, 0, [](CompilerContext& _context) {
1346
26
          _context.appendPanic(util::PanicCode::InvalidInternalFunction);
1347
26
        }).toSubAssemblyTag(m_context.runtimeSub());
1348
39
        m_context << Instruction::OR;
1349
39
      }
1350
158
      return;
1351
158
    }
1352
310
  }
1353
24.7k
  auto const* referenceType = dynamic_cast<ReferenceType const*>(&_type);
1354
24.7k
  if (!referenceType || referenceType->location() == DataLocation::Storage)
1355
17.7k
  {
1356
35.6k
    for (size_t i = 0; i < _type.sizeOnStack(); ++i)
1357
17.9k
      m_context << u256(0);
1358
17.7k
    return;
1359
17.7k
  }
1360
6.98k
  if (referenceType->location() == DataLocation::CallData)
1361
45
  {
1362
45
    solAssert(referenceType->sizeOnStack() == 1 || referenceType->sizeOnStack() == 2);
1363
45
    m_context << Instruction::CALLDATASIZE;
1364
45
    if (referenceType->sizeOnStack() == 2)
1365
25
      m_context << 0;
1366
45
    return;
1367
45
  }
1368
1369
6.93k
  solAssert(referenceType->location() == DataLocation::Memory);
1370
6.93k
  if (auto arrayType = dynamic_cast<ArrayType const*>(&_type))
1371
3.56k
    if (arrayType->isDynamicallySized())
1372
2.94k
    {
1373
      // Push a memory location that is (hopefully) always zero.
1374
2.94k
      pushZeroPointer();
1375
2.94k
      return;
1376
2.94k
    }
1377
1378
3.99k
  Type const* type = &_type;
1379
3.99k
  m_context.callLowLevelFunction(
1380
3.99k
    "$pushZeroValue_" + referenceType->identifier(),
1381
3.99k
    0,
1382
3.99k
    1,
1383
3.99k
    [type](CompilerContext& _context) {
1384
3.57k
      CompilerUtils utils(_context);
1385
1386
3.57k
      utils.allocateMemory(std::max<u256>(32u, type->memoryDataSize()));
1387
3.57k
      _context << Instruction::DUP1;
1388
1389
3.57k
      if (auto structType = dynamic_cast<StructType const*>(type))
1390
3.06k
        for (auto const& member: structType->members(nullptr))
1391
7.07k
        {
1392
7.07k
          utils.pushZeroValue(*member.type);
1393
7.07k
          utils.storeInMemoryDynamic(*member.type);
1394
7.07k
        }
1395
503
      else if (auto arrayType = dynamic_cast<ArrayType const*>(type))
1396
503
      {
1397
503
        solAssert(!arrayType->isDynamicallySized());
1398
503
        if (arrayType->length() > 0)
1399
503
        {
1400
503
          _context << arrayType->length() << Instruction::SWAP1;
1401
          // stack: items_to_do memory_pos
1402
503
          utils.zeroInitialiseMemoryArray(*arrayType);
1403
          // stack: updated_memory_pos
1404
503
        }
1405
503
      }
1406
0
      else
1407
503
        solAssert(false, "Requested initialisation for unknown type: " + type->toString());
1408
1409
      // remove the updated memory pointer
1410
3.57k
      _context << Instruction::POP;
1411
3.57k
    }
1412
3.99k
  );
1413
3.99k
}
1414
1415
void CompilerUtils::pushZeroPointer()
1416
2.99k
{
1417
2.99k
  m_context << u256(zeroPointer);
1418
2.99k
}
1419
1420
void CompilerUtils::moveToStackVariable(VariableDeclaration const& _variable)
1421
61.2k
{
1422
61.2k
  unsigned const stackPosition = m_context.baseToCurrentStackOffset(m_context.baseStackOffsetOfVariable(_variable));
1423
61.2k
  unsigned const size = _variable.annotation().type->sizeOnStack();
1424
61.2k
  solAssert(stackPosition >= size, "Variable size and position mismatch.");
1425
  // move variable starting from its top end in the stack
1426
61.2k
  if (stackPosition - size + 1 > 16)
1427
0
    BOOST_THROW_EXCEPTION(
1428
61.2k
      StackTooDeepError() <<
1429
61.2k
      errinfo_sourceLocation(_variable.location()) <<
1430
61.2k
      util::errinfo_comment(util::stackTooDeepString)
1431
61.2k
    );
1432
122k
  for (unsigned i = 0; i < size; ++i)
1433
61.2k
    m_context << swapInstruction(stackPosition - size + 1) << Instruction::POP;
1434
61.2k
}
1435
1436
void CompilerUtils::copyToStackTop(unsigned _stackDepth, unsigned _itemSize)
1437
25.3k
{
1438
25.3k
  assertThrow(
1439
25.3k
    _stackDepth <= 16,
1440
25.3k
    StackTooDeepError,
1441
25.3k
    util::stackTooDeepString
1442
25.3k
  );
1443
52.4k
  for (unsigned i = 0; i < _itemSize; ++i)
1444
27.1k
    m_context << dupInstruction(_stackDepth);
1445
25.3k
}
1446
1447
void CompilerUtils::moveToStackTop(unsigned _stackDepth, unsigned _itemSize)
1448
14.1k
{
1449
14.1k
  moveIntoStack(_itemSize, _stackDepth);
1450
14.1k
}
1451
1452
void CompilerUtils::moveIntoStack(unsigned _stackDepth, unsigned _itemSize)
1453
232k
{
1454
232k
  if (_stackDepth <= _itemSize)
1455
199k
    for (unsigned i = 0; i < _stackDepth; ++i)
1456
23.9k
      rotateStackDown(_stackDepth + _itemSize);
1457
57.0k
  else
1458
113k
    for (unsigned i = 0; i < _itemSize; ++i)
1459
56.4k
      rotateStackUp(_stackDepth + _itemSize);
1460
232k
}
1461
1462
void CompilerUtils::rotateStackUp(unsigned _items)
1463
56.4k
{
1464
56.4k
  assertThrow(
1465
56.4k
    _items - 1 <= 16,
1466
56.4k
    StackTooDeepError,
1467
56.4k
    util::stackTooDeepString
1468
56.4k
  );
1469
177k
  for (unsigned i = 1; i < _items; ++i)
1470
121k
    m_context << swapInstruction(_items - i);
1471
56.4k
}
1472
1473
void CompilerUtils::rotateStackDown(unsigned _items)
1474
23.9k
{
1475
23.9k
  assertThrow(
1476
23.9k
    _items - 1 <= 16,
1477
23.9k
    StackTooDeepError,
1478
23.9k
    util::stackTooDeepString
1479
23.9k
  );
1480
49.3k
  for (unsigned i = 1; i < _items; ++i)
1481
25.4k
    m_context << swapInstruction(i);
1482
23.9k
}
1483
1484
void CompilerUtils::popStackElement(Type const& _type)
1485
49.1k
{
1486
49.1k
  popStackSlots(_type.sizeOnStack());
1487
49.1k
}
1488
1489
void CompilerUtils::popStackSlots(size_t _amount)
1490
144k
{
1491
237k
  for (size_t i = 0; i < _amount; ++i)
1492
93.2k
    m_context << Instruction::POP;
1493
144k
}
1494
1495
void CompilerUtils::popAndJump(unsigned _toHeight, evmasm::AssemblyItem const& _jumpTo)
1496
52.9k
{
1497
52.9k
  solAssert(m_context.stackHeight() >= _toHeight);
1498
52.9k
  unsigned amount = m_context.stackHeight() - _toHeight;
1499
52.9k
  popStackSlots(amount);
1500
52.9k
  m_context.appendJumpTo(_jumpTo);
1501
52.9k
  m_context.adjustStackOffset(static_cast<int>(amount));
1502
52.9k
}
1503
1504
unsigned CompilerUtils::sizeOnStack(std::vector<Type const*> const& _variableTypes)
1505
128k
{
1506
128k
  unsigned size = 0;
1507
128k
  for (Type const* type: _variableTypes)
1508
131k
    size += type->sizeOnStack();
1509
128k
  return size;
1510
128k
}
1511
1512
void CompilerUtils::computeHashStatic()
1513
35.5k
{
1514
35.5k
  storeInMemory(0);
1515
35.5k
  m_context << u256(32) << u256(0) << Instruction::KECCAK256;
1516
35.5k
}
1517
1518
void CompilerUtils::copyContractCodeToMemory(ContractDefinition const& contract, bool _creation)
1519
215
{
1520
215
  std::string which = _creation ? "Creation" : "Runtime";
1521
215
  m_context.callLowLevelFunction(
1522
215
    "$copyContract" + which + "CodeToMemory_" + contract.type()->identifier(),
1523
215
    1,
1524
215
    1,
1525
215
    [&contract, _creation](CompilerContext& _context)
1526
215
    {
1527
      // copy the contract's code into memory
1528
193
      std::shared_ptr<evmasm::Assembly> assembly =
1529
193
        _creation ?
1530
187
        _context.compiledContract(contract) :
1531
193
        _context.compiledContractRuntime(contract);
1532
      // pushes size
1533
193
      auto subroutine = _context.addSubroutine(assembly);
1534
193
      _context << Instruction::DUP1 << subroutine;
1535
193
      _context << Instruction::DUP4 << Instruction::CODECOPY;
1536
193
      _context << Instruction::ADD;
1537
193
    }
1538
215
  );
1539
215
}
1540
1541
void CompilerUtils::storeStringData(bytesConstRef _data)
1542
17.2k
{
1543
  //@todo provide both alternatives to the optimiser
1544
  // stack: mempos
1545
17.2k
  if (_data.size() <= 32)
1546
5.21k
  {
1547
10.3k
    for (unsigned i = 0; i < _data.size(); i += 32)
1548
5.11k
    {
1549
5.11k
      m_context << u256(h256(_data.cropped(i), h256::AlignLeft));
1550
5.11k
      storeInMemoryDynamic(*TypeProvider::uint256());
1551
5.11k
    }
1552
5.21k
    m_context << Instruction::POP;
1553
5.21k
  }
1554
12.0k
  else
1555
12.0k
  {
1556
    // stack: mempos mempos_data
1557
12.0k
    m_context.appendData(_data.toBytes());
1558
12.0k
    m_context << u256(_data.size()) << Instruction::SWAP2;
1559
12.0k
    m_context << Instruction::CODECOPY;
1560
12.0k
  }
1561
17.2k
}
1562
1563
unsigned CompilerUtils::loadFromMemoryHelper(Type const& _type, bool _fromCalldata, bool _padToWords)
1564
36.8k
{
1565
36.8k
  solAssert(_type.isValueType());
1566
36.8k
  Type const* type = &_type;
1567
36.8k
  if (auto const* userDefined = dynamic_cast<UserDefinedValueType const*>(type))
1568
0
    type = &userDefined->underlyingType();
1569
1570
36.8k
  unsigned numBytes = type->calldataEncodedSize(_padToWords);
1571
36.8k
  bool isExternalFunctionType = false;
1572
36.8k
  if (auto const* funType = dynamic_cast<FunctionType const*>(type))
1573
56
    if (funType->kind() == FunctionType::Kind::External)
1574
41
      isExternalFunctionType = true;
1575
36.8k
  if (numBytes == 0)
1576
0
  {
1577
0
    m_context << Instruction::POP << u256(0);
1578
0
    return numBytes;
1579
0
  }
1580
36.8k
  solAssert(numBytes <= 32, "Static memory load of more than 32 bytes requested.");
1581
36.8k
  m_context << (_fromCalldata ? Instruction::CALLDATALOAD : Instruction::MLOAD);
1582
36.8k
  bool cleanupNeeded = true;
1583
36.8k
  if (isExternalFunctionType)
1584
41
    splitExternalFunctionType(true);
1585
36.7k
  else if (numBytes != 32)
1586
10.0k
  {
1587
    // add leading or trailing zeros by dividing/multiplying depending on alignment
1588
10.0k
    unsigned shiftFactor = (32 - numBytes) * 8;
1589
10.0k
    rightShiftNumberOnStack(shiftFactor);
1590
10.0k
    if (type->leftAligned())
1591
1.22k
    {
1592
1.22k
      leftShiftNumberOnStack(shiftFactor);
1593
1.22k
      cleanupNeeded = false;
1594
1.22k
    }
1595
8.79k
    else if (IntegerType const* intType = dynamic_cast<IntegerType const*>(type))
1596
8.79k
      if (!intType->isSigned())
1597
8.79k
        cleanupNeeded = false;
1598
10.0k
  }
1599
36.8k
  if (_fromCalldata)
1600
9.32k
    convertType(_type, *type, cleanupNeeded, false, true);
1601
1602
36.8k
  return numBytes;
1603
36.8k
}
1604
1605
void CompilerUtils::cleanHigherOrderBits(IntegerType const& _typeOnStack)
1606
135k
{
1607
135k
  if (_typeOnStack.numBits() == 256)
1608
94.1k
    return;
1609
41.3k
  else if (_typeOnStack.isSigned())
1610
9.74k
    m_context << u256(_typeOnStack.numBits() / 8 - 1) << Instruction::SIGNEXTEND;
1611
31.6k
  else
1612
31.6k
    m_context << ((u256(1) << _typeOnStack.numBits()) - 1) << Instruction::AND;
1613
135k
}
1614
1615
void CompilerUtils::leftShiftNumberOnStack(unsigned _bits)
1616
6.21k
{
1617
6.21k
  solAssert(_bits < 256);
1618
6.21k
  if (m_context.evmVersion().hasBitwiseShifting())
1619
5.34k
    m_context << _bits << Instruction::SHL;
1620
875
  else
1621
875
    m_context << (u256(1) << _bits) << Instruction::MUL;
1622
6.21k
}
1623
1624
void CompilerUtils::rightShiftNumberOnStack(unsigned _bits)
1625
12.1k
{
1626
12.1k
  solAssert(_bits < 256);
1627
  // NOTE: If we add signed right shift, SAR rounds differently than SDIV
1628
12.1k
  if (m_context.evmVersion().hasBitwiseShifting())
1629
9.45k
    m_context << _bits << Instruction::SHR;
1630
2.68k
  else
1631
2.68k
    m_context << (u256(1) << _bits) << Instruction::SWAP1 << Instruction::DIV;
1632
12.1k
}
1633
1634
unsigned CompilerUtils::prepareMemoryStore(Type const& _type, bool _padToWords, bool _cleanup)
1635
83.7k
{
1636
83.7k
  solAssert(
1637
83.7k
    _type.sizeOnStack() == 1,
1638
83.7k
    "Memory store of types with stack size != 1 not allowed (Type: " + _type.toString(true) + ")."
1639
83.7k
  );
1640
1641
83.7k
  solAssert(!_type.isDynamicallyEncoded());
1642
1643
83.7k
  unsigned numBytes = _type.calldataEncodedSize(_padToWords);
1644
1645
83.7k
  solAssert(
1646
83.7k
    numBytes > 0,
1647
83.7k
    "Memory store of 0 bytes requested (Type: " + _type.toString(true) + ")."
1648
83.7k
  );
1649
1650
83.7k
  solAssert(
1651
83.7k
    numBytes <= 32,
1652
83.7k
    "Memory store of more than 32 bytes requested (Type: " + _type.toString(true) + ")."
1653
83.7k
  );
1654
1655
83.7k
  if (_cleanup)
1656
83.6k
    convertType(_type, _type, true);
1657
1658
83.7k
  if (numBytes != 32 && !_type.leftAligned() && !_padToWords)
1659
    // shift the value accordingly before storing
1660
2.52k
    leftShiftNumberOnStack((32 - numBytes) * 8);
1661
1662
83.7k
  return numBytes;
1663
83.7k
}