Coverage Report

Created: 2025-09-27 06:48

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/spirv-cross/spirv_parser.cpp
Line
Count
Source
1
/*
2
 * Copyright 2018-2021 Arm Limited
3
 * SPDX-License-Identifier: Apache-2.0 OR MIT
4
 *
5
 * Licensed under the Apache License, Version 2.0 (the "License");
6
 * you may not use this file except in compliance with the License.
7
 * You may obtain a copy of the License at
8
 *
9
 *     http://www.apache.org/licenses/LICENSE-2.0
10
 *
11
 * Unless required by applicable law or agreed to in writing, software
12
 * distributed under the License is distributed on an "AS IS" BASIS,
13
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
 * See the License for the specific language governing permissions and
15
 * limitations under the License.
16
 */
17
18
/*
19
 * At your option, you may choose to accept this material under either:
20
 *  1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
21
 *  2. The MIT License, found at <http://opensource.org/licenses/MIT>.
22
 */
23
24
#include "spirv_parser.hpp"
25
#include <assert.h>
26
27
using namespace std;
28
using namespace SPIRV_CROSS_SPV_HEADER_NAMESPACE;
29
30
namespace SPIRV_CROSS_NAMESPACE
31
{
32
Parser::Parser(vector<uint32_t> spirv)
33
891
{
34
891
  ir.spirv = std::move(spirv);
35
891
}
36
37
Parser::Parser(const uint32_t *spirv_data, size_t word_count)
38
0
{
39
0
  ir.spirv = vector<uint32_t>(spirv_data, spirv_data + word_count);
40
0
}
41
42
static bool decoration_is_string(Decoration decoration)
43
48.3k
{
44
48.3k
  switch (decoration)
45
48.3k
  {
46
621
  case DecorationHlslSemanticGOOGLE:
47
621
    return true;
48
49
47.7k
  default:
50
47.7k
    return false;
51
48.3k
  }
52
48.3k
}
53
54
static inline uint32_t swap_endian(uint32_t v)
55
891
{
56
891
  return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u);
57
891
}
58
59
static bool is_valid_spirv_version(uint32_t version)
60
891
{
61
891
  switch (version)
62
891
  {
63
  // Allow v99 since it tends to just work.
64
0
  case 99:
65
0
  case 0x10000: // SPIR-V 1.0
66
0
  case 0x10100: // SPIR-V 1.1
67
0
  case 0x10200: // SPIR-V 1.2
68
0
  case 0x10300: // SPIR-V 1.3
69
0
  case 0x10400: // SPIR-V 1.4
70
0
  case 0x10500: // SPIR-V 1.5
71
891
  case 0x10600: // SPIR-V 1.6
72
891
    return true;
73
74
0
  default:
75
0
    return false;
76
891
  }
77
891
}
78
79
void Parser::parse()
80
891
{
81
891
  auto &spirv = ir.spirv;
82
83
891
  auto len = spirv.size();
84
891
  if (len < 5)
85
0
    SPIRV_CROSS_THROW("SPIRV file too small.");
86
87
891
  auto s = spirv.data();
88
89
  // Endian-swap if we need to.
90
891
  if (s[0] == swap_endian(MagicNumber))
91
0
    transform(begin(spirv), end(spirv), begin(spirv), [](uint32_t c) { return swap_endian(c); });
92
93
891
  if (s[0] != MagicNumber || !is_valid_spirv_version(s[1]))
94
0
    SPIRV_CROSS_THROW("Invalid SPIRV format.");
95
96
891
  uint32_t bound = s[3];
97
98
891
  const uint32_t MaximumNumberOfIDs = 0x3fffff;
99
891
  if (bound > MaximumNumberOfIDs)
100
3
    SPIRV_CROSS_THROW("ID bound exceeds limit of 0x3fffff.\n");
101
102
888
  ir.set_id_bounds(bound);
103
104
888
  uint32_t offset = 5;
105
106
888
  SmallVector<Instruction> instructions;
107
320k
  while (offset < len)
108
320k
  {
109
320k
    Instruction instr = {};
110
320k
    instr.op = spirv[offset] & 0xffff;
111
320k
    instr.count = (spirv[offset] >> 16) & 0xffff;
112
113
320k
    if (instr.count == 0)
114
13
      SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file.");
115
116
320k
    instr.offset = offset + 1;
117
320k
    instr.length = instr.count - 1;
118
119
320k
    offset += instr.count;
120
121
320k
    if (offset > spirv.size())
122
10
      SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds.");
123
124
320k
    instructions.push_back(instr);
125
320k
  }
126
127
865
  for (auto &i : instructions)
128
249k
    parse(i);
129
130
865
  for (auto &fixup : forward_pointer_fixups)
131
2.29k
  {
132
2.29k
    auto &target = get<SPIRType>(fixup.first);
133
2.29k
    auto &source = get<SPIRType>(fixup.second);
134
2.29k
    target.member_types = source.member_types;
135
2.29k
    target.basetype = source.basetype;
136
2.29k
    target.self = source.self;
137
2.29k
  }
138
865
  forward_pointer_fixups.clear();
139
140
865
  if (current_function)
141
139
    SPIRV_CROSS_THROW("Function was not terminated.");
142
726
  if (current_block)
143
0
    SPIRV_CROSS_THROW("Block was not terminated.");
144
726
  if (ir.default_entry_point == 0)
145
258
    SPIRV_CROSS_THROW("There is no entry point in the SPIR-V module.");
146
726
}
147
148
const uint32_t *Parser::stream(const Instruction &instr) const
149
249k
{
150
  // If we're not going to use any arguments, just return nullptr.
151
  // We want to avoid case where we return an out of range pointer
152
  // that trips debug assertions on some platforms.
153
249k
  if (!instr.length)
154
131k
    return nullptr;
155
156
118k
  if (instr.offset + instr.length > ir.spirv.size())
157
0
    SPIRV_CROSS_THROW("Compiler::stream() out of range.");
158
118k
  return &ir.spirv[instr.offset];
159
118k
}
160
161
static string extract_string(const vector<uint32_t> &spirv, uint32_t offset)
162
141k
{
163
141k
  string ret;
164
151k
  for (uint32_t i = offset; i < spirv.size(); i++)
165
151k
  {
166
151k
    uint32_t w = spirv[i];
167
168
337k
    for (uint32_t j = 0; j < 4; j++, w >>= 8)
169
327k
    {
170
327k
      char c = w & 0xff;
171
327k
      if (c == '\0')
172
141k
        return ret;
173
185k
      ret += c;
174
185k
    }
175
151k
  }
176
177
141k
  SPIRV_CROSS_THROW("String was not terminated before EOF");
178
141k
}
179
180
void Parser::parse(const Instruction &instruction)
181
249k
{
182
249k
  auto *ops = stream(instruction);
183
249k
  auto op = static_cast<Op>(instruction.op);
184
249k
  uint32_t length = instruction.length;
185
186
  // HACK for glslang that might emit OpEmitMeshTasksEXT followed by return / branch.
187
  // Instead of failing hard, just ignore it.
188
249k
  if (ignore_trailing_block_opcodes)
189
0
  {
190
0
    ignore_trailing_block_opcodes = false;
191
0
    if (op == OpReturn || op == OpBranch || op == OpUnreachable)
192
0
      return;
193
0
  }
194
195
249k
  switch (op)
196
249k
  {
197
26
  case OpSourceContinued:
198
290
  case OpSourceExtension:
199
466
  case OpNop:
200
468
  case OpModuleProcessed:
201
468
    break;
202
203
452
  case OpString:
204
452
  {
205
452
    set<SPIRString>(ops[0], extract_string(ir.spirv, instruction.offset + 1));
206
452
    break;
207
466
  }
208
209
1.85k
  case OpMemoryModel:
210
1.85k
    ir.addressing_model = static_cast<AddressingModel>(ops[0]);
211
1.85k
    ir.memory_model = static_cast<MemoryModel>(ops[1]);
212
1.85k
    break;
213
214
284
  case OpSource:
215
284
  {
216
284
    ir.source.lang = static_cast<SourceLanguage>(ops[0]);
217
284
    switch (ir.source.lang)
218
284
    {
219
205
    case SourceLanguageESSL:
220
205
      ir.source.es = true;
221
205
      ir.source.version = ops[1];
222
205
      ir.source.known = true;
223
205
      ir.source.hlsl = false;
224
205
      break;
225
226
53
    case SourceLanguageGLSL:
227
53
      ir.source.es = false;
228
53
      ir.source.version = ops[1];
229
53
      ir.source.known = true;
230
53
      ir.source.hlsl = false;
231
53
      break;
232
233
3
    case SourceLanguageHLSL:
234
      // For purposes of cross-compiling, this is GLSL 450.
235
3
      ir.source.es = false;
236
3
      ir.source.version = 450;
237
3
      ir.source.known = true;
238
3
      ir.source.hlsl = true;
239
3
      break;
240
241
23
    default:
242
23
      ir.source.known = false;
243
23
      break;
244
284
    }
245
284
    break;
246
284
  }
247
248
608
  case OpUndef:
249
608
  {
250
608
    uint32_t result_type = ops[0];
251
608
    uint32_t id = ops[1];
252
608
    set<SPIRUndef>(id, result_type);
253
608
    if (current_block)
254
2
      current_block->ops.push_back(instruction);
255
608
    break;
256
284
  }
257
258
2.67k
  case OpCapability:
259
2.67k
  {
260
2.67k
    uint32_t cap = ops[0];
261
2.67k
    if (cap == CapabilityKernel)
262
0
      SPIRV_CROSS_THROW("Kernel capability not supported.");
263
264
2.67k
    ir.declared_capabilities.push_back(static_cast<Capability>(ops[0]));
265
2.67k
    break;
266
2.67k
  }
267
268
128k
  case OpExtension:
269
128k
  {
270
128k
    auto ext = extract_string(ir.spirv, instruction.offset);
271
128k
    ir.declared_extensions.push_back(std::move(ext));
272
128k
    break;
273
2.67k
  }
274
275
596
  case OpExtInstImport:
276
596
  {
277
596
    uint32_t id = ops[0];
278
279
596
    SPIRExtension::Extension spirv_ext = SPIRExtension::Unsupported;
280
281
596
    auto ext = extract_string(ir.spirv, instruction.offset + 1);
282
596
    if (ext == "GLSL.std.450")
283
190
      spirv_ext = SPIRExtension::GLSL;
284
406
    else if (ext == "DebugInfo")
285
0
      spirv_ext = SPIRExtension::SPV_debug_info;
286
406
    else if (ext == "SPV_AMD_shader_ballot")
287
0
      spirv_ext = SPIRExtension::SPV_AMD_shader_ballot;
288
406
    else if (ext == "SPV_AMD_shader_explicit_vertex_parameter")
289
0
      spirv_ext = SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter;
290
406
    else if (ext == "SPV_AMD_shader_trinary_minmax")
291
0
      spirv_ext = SPIRExtension::SPV_AMD_shader_trinary_minmax;
292
406
    else if (ext == "SPV_AMD_gcn_shader")
293
0
      spirv_ext = SPIRExtension::SPV_AMD_gcn_shader;
294
406
    else if (ext == "NonSemantic.DebugPrintf")
295
0
      spirv_ext = SPIRExtension::NonSemanticDebugPrintf;
296
406
    else if (ext == "NonSemantic.Shader.DebugInfo.100")
297
0
      spirv_ext = SPIRExtension::NonSemanticShaderDebugInfo;
298
406
    else if (ext.find("NonSemantic.") == 0)
299
0
      spirv_ext = SPIRExtension::NonSemanticGeneric;
300
301
596
    set<SPIRExtension>(id, spirv_ext);
302
    // Other SPIR-V extensions which have ExtInstrs are currently not supported.
303
304
596
    break;
305
2.67k
  }
306
307
1.03k
  case OpExtInst:
308
1.03k
  case OpExtInstWithForwardRefsKHR:
309
1.03k
  {
310
    // The SPIR-V debug information extended instructions might come at global scope.
311
1.03k
    if (current_block)
312
1.03k
    {
313
1.03k
      current_block->ops.push_back(instruction);
314
1.03k
      if (length >= 2)
315
1.02k
      {
316
1.02k
        const auto *type = maybe_get<SPIRType>(ops[0]);
317
1.02k
        if (type)
318
991
          ir.load_type_width.insert({ ops[1], type->width });
319
1.02k
      }
320
1.03k
    }
321
1
    else if (op == OpExtInst)
322
1
    {
323
      // Don't want to deal with ForwardRefs here.
324
325
1
      auto &ext = get<SPIRExtension>(ops[2]);
326
1
      if (ext.ext == SPIRExtension::NonSemanticShaderDebugInfo)
327
0
      {
328
        // Parse global ShaderDebugInfo we care about.
329
        // Just forward the string information.
330
0
        if (ops[3] == SPIRExtension::DebugSource)
331
0
          set<SPIRString>(ops[1], get<SPIRString>(ops[4]).str);
332
0
      }
333
1
    }
334
1.03k
    break;
335
1.03k
  }
336
337
5.42k
  case OpEntryPoint:
338
5.42k
  {
339
5.42k
    auto itr =
340
5.42k
        ir.entry_points.insert(make_pair(ops[1], SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]),
341
5.42k
                                                                extract_string(ir.spirv, instruction.offset + 2))));
342
5.42k
    auto &e = itr.first->second;
343
344
    // Strings need nul-terminator and consume the whole word.
345
5.42k
    uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2);
346
347
19.3k
    for (uint32_t i = strlen_words + 2; i < instruction.length; i++)
348
13.9k
      e.interface_variables.push_back(ops[i]);
349
350
    // Set the name of the entry point in case OpName is not provided later.
351
5.42k
    ir.set_name(ops[1], e.name);
352
353
    // If we don't have an entry, make the first one our "default".
354
5.42k
    if (!ir.default_entry_point)
355
526
      ir.default_entry_point = ops[1];
356
5.42k
    break;
357
1.03k
  }
358
359
1.50k
  case OpExecutionMode:
360
1.50k
  {
361
1.50k
    auto &execution = ir.entry_points[ops[0]];
362
1.50k
    auto mode = static_cast<ExecutionMode>(ops[1]);
363
1.50k
    execution.flags.set(mode);
364
365
1.50k
    switch (mode)
366
1.50k
    {
367
97
    case ExecutionModeInvocations:
368
97
      execution.invocations = ops[2];
369
97
      break;
370
371
114
    case ExecutionModeLocalSize:
372
114
      execution.workgroup_size.x = ops[2];
373
114
      execution.workgroup_size.y = ops[3];
374
114
      execution.workgroup_size.z = ops[4];
375
114
      break;
376
377
16
    case ExecutionModeOutputVertices:
378
16
      execution.output_vertices = ops[2];
379
16
      break;
380
381
0
    case ExecutionModeOutputPrimitivesEXT:
382
0
      execution.output_primitives = ops[2];
383
0
      break;
384
385
0
    case ExecutionModeSignedZeroInfNanPreserve:
386
0
      switch (ops[2])
387
0
      {
388
0
      case 8:
389
0
        execution.signed_zero_inf_nan_preserve_8 = true;
390
0
        break;
391
392
0
      case 16:
393
0
        execution.signed_zero_inf_nan_preserve_16 = true;
394
0
        break;
395
396
0
      case 32:
397
0
        execution.signed_zero_inf_nan_preserve_32 = true;
398
0
        break;
399
400
0
      case 64:
401
0
        execution.signed_zero_inf_nan_preserve_64 = true;
402
0
        break;
403
404
0
      default:
405
0
        SPIRV_CROSS_THROW("Invalid bit-width for SignedZeroInfNanPreserve.");
406
0
      }
407
0
      break;
408
409
1.27k
    default:
410
1.27k
      break;
411
1.50k
    }
412
1.50k
    break;
413
1.50k
  }
414
415
1.50k
  case OpExecutionModeId:
416
1
  {
417
1
    auto &execution = ir.entry_points[ops[0]];
418
1
    auto mode = static_cast<ExecutionMode>(ops[1]);
419
1
    execution.flags.set(mode);
420
421
1
    switch (mode)
422
1
    {
423
0
    case ExecutionModeLocalSizeId:
424
0
      execution.workgroup_size.id_x = ops[2];
425
0
      execution.workgroup_size.id_y = ops[3];
426
0
      execution.workgroup_size.id_z = ops[4];
427
0
      break;
428
429
0
    case ExecutionModeFPFastMathDefault:
430
0
      execution.fp_fast_math_defaults[ops[2]] = ops[3];
431
0
      break;
432
433
1
    default:
434
1
      break;
435
1
    }
436
1
    break;
437
1
  }
438
439
5.89k
  case OpName:
440
5.89k
  {
441
5.89k
    uint32_t id = ops[0];
442
5.89k
    ir.set_name(id, extract_string(ir.spirv, instruction.offset + 1));
443
5.89k
    break;
444
1
  }
445
446
847
  case OpMemberName:
447
847
  {
448
847
    uint32_t id = ops[0];
449
847
    uint32_t member = ops[1];
450
847
    ir.set_member_name(id, member, extract_string(ir.spirv, instruction.offset + 2));
451
847
    break;
452
1
  }
453
454
456
  case OpDecorationGroup:
455
456
  {
456
    // Noop, this simply means an ID should be a collector of decorations.
457
    // The meta array is already a flat array of decorations which will contain the relevant decorations.
458
456
    break;
459
1
  }
460
461
3.20k
  case OpGroupDecorate:
462
3.20k
  {
463
3.20k
    uint32_t group_id = ops[0];
464
3.20k
    auto &decorations = ir.meta[group_id].decoration;
465
3.20k
    auto &flags = decorations.decoration_flags;
466
467
    // Copies decorations from one ID to another. Only copy decorations which are set in the group,
468
    // i.e., we cannot just copy the meta structure directly.
469
13.0k
    for (uint32_t i = 1; i < length; i++)
470
9.87k
    {
471
9.87k
      uint32_t target = ops[i];
472
34.6k
      flags.for_each_bit([&](uint32_t bit) {
473
34.6k
        auto decoration = static_cast<Decoration>(bit);
474
475
34.6k
        if (decoration_is_string(decoration))
476
414
        {
477
414
          ir.set_decoration_string(target, decoration, ir.get_decoration_string(group_id, decoration));
478
414
        }
479
34.2k
        else
480
34.2k
        {
481
34.2k
          ir.meta[target].decoration_word_offset[decoration] =
482
34.2k
              ir.meta[group_id].decoration_word_offset[decoration];
483
34.2k
          ir.set_decoration(target, decoration, ir.get_decoration(group_id, decoration));
484
34.2k
        }
485
34.6k
      });
486
9.87k
    }
487
3.20k
    break;
488
1
  }
489
490
4.63k
  case OpGroupMemberDecorate:
491
4.63k
  {
492
4.63k
    uint32_t group_id = ops[0];
493
4.63k
    auto &flags = ir.meta[group_id].decoration.decoration_flags;
494
495
    // Copies decorations from one ID to another. Only copy decorations which are set in the group,
496
    // i.e., we cannot just copy the meta structure directly.
497
10.5k
    for (uint32_t i = 1; i + 1 < length; i += 2)
498
5.88k
    {
499
5.88k
      uint32_t target = ops[i + 0];
500
5.88k
      uint32_t index = ops[i + 1];
501
13.7k
      flags.for_each_bit([&](uint32_t bit) {
502
13.7k
        auto decoration = static_cast<Decoration>(bit);
503
504
13.7k
        if (decoration_is_string(decoration))
505
207
          ir.set_member_decoration_string(target, index, decoration,
506
207
                                          ir.get_decoration_string(group_id, decoration));
507
13.4k
        else
508
13.4k
          ir.set_member_decoration(target, index, decoration, ir.get_decoration(group_id, decoration));
509
13.7k
      });
510
5.88k
    }
511
4.63k
    break;
512
1
  }
513
514
8.38k
  case OpDecorate:
515
8.38k
  case OpDecorateId:
516
8.38k
  {
517
    // OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint,
518
    // so merge decorate and decorate-id here.
519
8.38k
    uint32_t id = ops[0];
520
521
8.38k
    auto decoration = static_cast<Decoration>(ops[1]);
522
8.38k
    if (length >= 3)
523
3.03k
    {
524
3.03k
      ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data());
525
3.03k
      ir.set_decoration(id, decoration, ops[2]);
526
3.03k
    }
527
5.34k
    else
528
5.34k
      ir.set_decoration(id, decoration);
529
530
8.38k
    break;
531
8.38k
  }
532
533
52
  case OpDecorateStringGOOGLE:
534
52
  {
535
52
    uint32_t id = ops[0];
536
52
    auto decoration = static_cast<Decoration>(ops[1]);
537
52
    ir.set_decoration_string(id, decoration, extract_string(ir.spirv, instruction.offset + 2));
538
52
    break;
539
8.38k
  }
540
541
302
  case OpMemberDecorate:
542
302
  {
543
302
    uint32_t id = ops[0];
544
302
    uint32_t member = ops[1];
545
302
    auto decoration = static_cast<Decoration>(ops[2]);
546
302
    if (length >= 4)
547
266
      ir.set_member_decoration(id, member, decoration, ops[3]);
548
36
    else
549
36
      ir.set_member_decoration(id, member, decoration);
550
302
    break;
551
8.38k
  }
552
553
68
  case OpMemberDecorateStringGOOGLE:
554
68
  {
555
68
    uint32_t id = ops[0];
556
68
    uint32_t member = ops[1];
557
68
    auto decoration = static_cast<Decoration>(ops[2]);
558
68
    ir.set_member_decoration_string(id, member, decoration, extract_string(ir.spirv, instruction.offset + 3));
559
68
    break;
560
8.38k
  }
561
562
  // Build up basic types.
563
330
  case OpTypeVoid:
564
330
  {
565
330
    uint32_t id = ops[0];
566
330
    auto &type = set<SPIRType>(id, op);
567
330
    type.basetype = SPIRType::Void;
568
330
    break;
569
8.38k
  }
570
571
196
  case OpTypeBool:
572
196
  {
573
196
    uint32_t id = ops[0];
574
196
    auto &type = set<SPIRType>(id, op);
575
196
    type.basetype = SPIRType::Boolean;
576
196
    type.width = 1;
577
196
    break;
578
8.38k
  }
579
580
395
  case OpTypeFloat:
581
395
  {
582
395
    uint32_t id = ops[0];
583
395
    uint32_t width = ops[1];
584
395
    auto &type = set<SPIRType>(id, op);
585
586
395
    if (width != 16 && width != 8 && length > 2)
587
0
      SPIRV_CROSS_THROW("Unrecognized FP encoding mode for OpTypeFloat.");
588
589
395
    if (width == 64)
590
12
      type.basetype = SPIRType::Double;
591
383
    else if (width == 32)
592
345
      type.basetype = SPIRType::Float;
593
38
    else if (width == 16)
594
32
    {
595
32
      if (length > 2)
596
0
      {
597
0
        if (ops[2] == FPEncodingBFloat16KHR)
598
0
          type.basetype = SPIRType::BFloat16;
599
0
        else
600
0
          SPIRV_CROSS_THROW("Unrecognized encoding for OpTypeFloat 16.");
601
0
      }
602
32
      else
603
32
        type.basetype = SPIRType::Half;
604
32
    }
605
6
    else if (width == 8)
606
0
    {
607
0
      if (length < 2)
608
0
        SPIRV_CROSS_THROW("Missing encoding for OpTypeFloat 8.");
609
0
      else if (ops[2] == FPEncodingFloat8E4M3EXT)
610
0
        type.basetype = SPIRType::FloatE4M3;
611
0
      else if (ops[2] == FPEncodingFloat8E5M2EXT)
612
0
        type.basetype = SPIRType::FloatE5M2;
613
0
      else
614
0
        SPIRV_CROSS_THROW("Invalid encoding for OpTypeFloat 8.");
615
0
    }
616
6
    else
617
6
      SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type.");
618
389
    type.width = width;
619
389
    break;
620
395
  }
621
622
1.59k
  case OpTypeInt:
623
1.59k
  {
624
1.59k
    uint32_t id = ops[0];
625
1.59k
    uint32_t width = ops[1];
626
1.59k
    bool signedness = ops[2] != 0;
627
1.59k
    auto &type = set<SPIRType>(id, op);
628
1.59k
    type.basetype = signedness ? to_signed_basetype(width) : to_unsigned_basetype(width);
629
1.59k
    type.width = width;
630
1.59k
    break;
631
395
  }
632
633
  // Build composite types by "inheriting".
634
  // NOTE: The self member is also copied! For pointers and array modifiers this is a good thing
635
  // since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc.
636
602
  case OpTypeVector:
637
602
  {
638
602
    uint32_t id = ops[0];
639
602
    uint32_t vecsize = ops[2];
640
641
602
    auto &base = get<SPIRType>(ops[1]);
642
602
    auto &vecbase = set<SPIRType>(id, base);
643
644
602
    vecbase.op = op;
645
602
    vecbase.vecsize = vecsize;
646
602
    vecbase.self = id;
647
602
    vecbase.parent_type = ops[1];
648
602
    break;
649
395
  }
650
651
225
  case OpTypeMatrix:
652
225
  {
653
225
    uint32_t id = ops[0];
654
225
    uint32_t colcount = ops[2];
655
656
225
    auto &base = get<SPIRType>(ops[1]);
657
225
    auto &matrixbase = set<SPIRType>(id, base);
658
659
225
    matrixbase.op = op;
660
225
    matrixbase.columns = colcount;
661
225
    matrixbase.self = id;
662
225
    matrixbase.parent_type = ops[1];
663
225
    break;
664
395
  }
665
666
0
  case OpTypeCooperativeMatrixKHR:
667
0
  {
668
0
    uint32_t id = ops[0];
669
0
    auto &base = get<SPIRType>(ops[1]);
670
0
    auto &matrixbase = set<SPIRType>(id, base);
671
672
0
    matrixbase.op = op;
673
0
    matrixbase.ext.cooperative.scope_id = ops[2];
674
0
    matrixbase.ext.cooperative.rows_id = ops[3];
675
0
    matrixbase.ext.cooperative.columns_id = ops[4];
676
0
    matrixbase.ext.cooperative.use_id = ops[5];
677
0
    matrixbase.self = id;
678
0
    matrixbase.parent_type = ops[1];
679
0
    break;
680
395
  }
681
682
0
  case OpTypeCooperativeVectorNV:
683
0
  {
684
0
    uint32_t id = ops[0];
685
0
    auto &type = set<SPIRType>(id, op);
686
687
0
    type.basetype = SPIRType::CoopVecNV;
688
0
    type.op = op;
689
0
    type.ext.coopVecNV.component_type_id = ops[1];
690
0
    type.ext.coopVecNV.component_count_id = ops[2];
691
0
    type.parent_type = ops[1];
692
693
    // CoopVec-Nv can be used with integer operations like SMax where
694
    // where spirv-opt does explicit checks on integer bitwidth
695
0
    auto component_type = get<SPIRType>(type.ext.coopVecNV.component_type_id);
696
0
    type.width = component_type.width;
697
0
    break;
698
395
  }
699
700
1.30k
  case OpTypeArray:
701
1.30k
  {
702
1.30k
    uint32_t id = ops[0];
703
1.30k
    uint32_t tid = ops[1];
704
1.30k
    auto &base = get<SPIRType>(tid);
705
1.30k
    auto &arraybase = set<SPIRType>(id, base);
706
707
1.30k
    arraybase.op = op;
708
1.30k
    arraybase.parent_type = tid;
709
710
1.30k
    uint32_t cid = ops[2];
711
1.30k
    ir.mark_used_as_array_length(cid);
712
1.30k
    auto *c = maybe_get<SPIRConstant>(cid);
713
1.30k
    bool literal = c && !c->specialization;
714
715
    // We're copying type information into Array types, so we'll need a fixup for any physical pointer
716
    // references.
717
1.30k
    if (base.forward_pointer)
718
0
      forward_pointer_fixups.push_back({ id, tid });
719
720
1.30k
    arraybase.array_size_literal.push_back(literal);
721
1.30k
    arraybase.array.push_back(literal ? c->scalar() : cid);
722
723
    // .self resolves down to non-array/non-pointer type.
724
1.30k
    arraybase.self = base.self;
725
1.30k
    break;
726
395
  }
727
728
4.35k
  case OpTypeRuntimeArray:
729
4.35k
  {
730
4.35k
    uint32_t id = ops[0];
731
732
4.35k
    auto &base = get<SPIRType>(ops[1]);
733
4.35k
    auto &arraybase = set<SPIRType>(id, base);
734
735
    // We're copying type information into Array types, so we'll need a fixup for any physical pointer
736
    // references.
737
4.35k
    if (base.forward_pointer)
738
2.29k
      forward_pointer_fixups.push_back({ id, ops[1] });
739
740
4.35k
    arraybase.op = op;
741
4.35k
    arraybase.array.push_back(0);
742
4.35k
    arraybase.array_size_literal.push_back(true);
743
4.35k
    arraybase.parent_type = ops[1];
744
745
    // .self resolves down to non-array/non-pointer type.
746
4.35k
    arraybase.self = base.self;
747
4.35k
    break;
748
395
  }
749
750
75
  case OpTypeImage:
751
75
  {
752
75
    uint32_t id = ops[0];
753
75
    auto &type = set<SPIRType>(id, op);
754
75
    type.basetype = SPIRType::Image;
755
75
    type.image.type = ops[1];
756
75
    type.image.dim = static_cast<Dim>(ops[2]);
757
75
    type.image.depth = ops[3] == 1;
758
75
    type.image.arrayed = ops[4] != 0;
759
75
    type.image.ms = ops[5] != 0;
760
75
    type.image.sampled = ops[6];
761
75
    type.image.format = static_cast<ImageFormat>(ops[7]);
762
75
    type.image.access = (length >= 9) ? static_cast<AccessQualifier>(ops[8]) : AccessQualifierMax;
763
75
    break;
764
395
  }
765
766
40
  case OpTypeSampledImage:
767
40
  {
768
40
    uint32_t id = ops[0];
769
40
    uint32_t imagetype = ops[1];
770
40
    auto &type = set<SPIRType>(id, op);
771
40
    type = get<SPIRType>(imagetype);
772
40
    type.basetype = SPIRType::SampledImage;
773
40
    type.self = id;
774
40
    break;
775
395
  }
776
777
4
  case OpTypeSampler:
778
4
  {
779
4
    uint32_t id = ops[0];
780
4
    auto &type = set<SPIRType>(id, op);
781
4
    type.basetype = SPIRType::Sampler;
782
4
    break;
783
395
  }
784
785
4.54k
  case OpTypePointer:
786
4.54k
  {
787
4.54k
    uint32_t id = ops[0];
788
789
    // Very rarely, we might receive a FunctionPrototype here.
790
    // We won't be able to compile it, but we shouldn't crash when parsing.
791
    // We should be able to reflect.
792
4.54k
    auto *base = maybe_get<SPIRType>(ops[2]);
793
4.54k
    auto &ptrbase = set<SPIRType>(id, op);
794
795
4.54k
    if (base)
796
2.86k
    {
797
2.86k
      ptrbase = *base;
798
2.86k
      ptrbase.op = op;
799
2.86k
    }
800
801
4.54k
    ptrbase.pointer = true;
802
4.54k
    ptrbase.pointer_depth++;
803
4.54k
    ptrbase.storage = static_cast<StorageClass>(ops[1]);
804
805
4.54k
    if (ptrbase.storage == StorageClassAtomicCounter)
806
24
      ptrbase.basetype = SPIRType::AtomicCounter;
807
808
4.54k
    if (base && base->forward_pointer)
809
56
      forward_pointer_fixups.push_back({ id, ops[2] });
810
811
4.54k
    ptrbase.parent_type = ops[2];
812
813
    // Do NOT set ptrbase.self!
814
4.54k
    break;
815
395
  }
816
817
93
  case OpTypeForwardPointer:
818
93
  {
819
93
    uint32_t id = ops[0];
820
93
    auto &ptrbase = set<SPIRType>(id, op);
821
93
    ptrbase.pointer = true;
822
93
    ptrbase.pointer_depth++;
823
93
    ptrbase.storage = static_cast<StorageClass>(ops[1]);
824
93
    ptrbase.forward_pointer = true;
825
826
93
    if (ptrbase.storage == StorageClassAtomicCounter)
827
0
      ptrbase.basetype = SPIRType::AtomicCounter;
828
829
93
    break;
830
395
  }
831
832
3.62k
  case OpTypeStruct:
833
3.62k
  {
834
3.62k
    uint32_t id = ops[0];
835
3.62k
    auto &type = set<SPIRType>(id, op);
836
3.62k
    type.basetype = SPIRType::Struct;
837
7.61k
    for (uint32_t i = 1; i < length; i++)
838
3.98k
      type.member_types.push_back(ops[i]);
839
840
    // Check if we have seen this struct type before, with just different
841
    // decorations.
842
    //
843
    // Add workaround for issue #17 as well by looking at OpName for the struct
844
    // types, which we shouldn't normally do.
845
    // We should not normally have to consider type aliases like this to begin with
846
    // however ... glslang issues #304, #307 cover this.
847
848
    // For stripped names, never consider struct type aliasing.
849
    // We risk declaring the same struct multiple times, but type-punning is not allowed
850
    // so this is safe.
851
3.62k
    bool consider_aliasing = !ir.get_name(type.self).empty();
852
3.62k
    if (consider_aliasing)
853
1.91k
    {
854
1.91k
      for (auto &other : global_struct_cache)
855
2.89k
      {
856
2.89k
        if (ir.get_name(type.self) == ir.get_name(other) &&
857
1.68k
            types_are_logically_equivalent(type, get<SPIRType>(other)))
858
1.68k
        {
859
1.68k
          type.type_alias = other;
860
1.68k
          break;
861
1.68k
        }
862
2.89k
      }
863
864
1.91k
      if (type.type_alias == TypeID(0))
865
228
        global_struct_cache.push_back(id);
866
1.91k
    }
867
3.62k
    break;
868
395
  }
869
870
3.84k
  case OpTypeFunction:
871
3.84k
  {
872
3.84k
    uint32_t id = ops[0];
873
3.84k
    uint32_t ret = ops[1];
874
875
3.84k
    auto &func = set<SPIRFunctionPrototype>(id, ret);
876
10.0k
    for (uint32_t i = 2; i < length; i++)
877
6.15k
      func.parameter_types.push_back(ops[i]);
878
3.84k
    break;
879
395
  }
880
881
0
  case OpTypeAccelerationStructureKHR:
882
0
  {
883
0
    uint32_t id = ops[0];
884
0
    auto &type = set<SPIRType>(id, op);
885
0
    type.basetype = SPIRType::AccelerationStructure;
886
0
    break;
887
395
  }
888
889
0
  case OpTypeRayQueryKHR:
890
0
  {
891
0
    uint32_t id = ops[0];
892
0
    auto &type = set<SPIRType>(id, op);
893
0
    type.basetype = SPIRType::RayQuery;
894
0
    break;
895
395
  }
896
897
0
  case OpTypeTensorARM:
898
0
  {
899
0
    uint32_t id = ops[0];
900
0
    auto &type = set<SPIRType>(id, op);
901
0
    type.basetype = SPIRType::Tensor;
902
0
    type.ext.tensor = {};
903
0
    type.ext.tensor.type = ops[1];
904
0
    if (length >= 3)
905
0
      type.ext.tensor.rank = ops[2];
906
0
    if (length >= 4)
907
0
      type.ext.tensor.shape = ops[3];
908
0
    break;
909
395
  }
910
911
  // Variable declaration
912
  // All variables are essentially pointers with a storage qualifier.
913
3.82k
  case OpVariable:
914
3.82k
  {
915
3.82k
    uint32_t type = ops[0];
916
3.82k
    uint32_t id = ops[1];
917
3.82k
    auto storage = static_cast<StorageClass>(ops[2]);
918
3.82k
    uint32_t initializer = length == 4 ? ops[3] : 0;
919
920
3.82k
    if (storage == StorageClassFunction)
921
1.36k
    {
922
1.36k
      if (!current_function)
923
1
        SPIRV_CROSS_THROW("No function currently in scope");
924
1.36k
      current_function->add_local_variable(id);
925
1.36k
    }
926
927
3.81k
    set<SPIRVariable>(id, type, storage, initializer);
928
3.81k
    break;
929
3.82k
  }
930
931
  // OpPhi
932
  // OpPhi is a fairly magical opcode.
933
  // It selects temporary variables based on which parent block we *came from*.
934
  // In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local
935
  // variable to emulate SSA Phi.
936
1.06k
  case OpPhi:
937
1.06k
  {
938
1.06k
    if (!current_function)
939
0
      SPIRV_CROSS_THROW("No function currently in scope");
940
1.06k
    if (!current_block)
941
1
      SPIRV_CROSS_THROW("No block currently in scope");
942
943
1.06k
    uint32_t result_type = ops[0];
944
1.06k
    uint32_t id = ops[1];
945
946
    // Instead of a temporary, create a new function-wide temporary with this ID instead.
947
1.06k
    auto &var = set<SPIRVariable>(id, result_type, StorageClassFunction);
948
1.06k
    var.phi_variable = true;
949
950
1.06k
    current_function->add_local_variable(id);
951
952
5.04k
    for (uint32_t i = 2; i + 2 <= length; i += 2)
953
3.98k
      current_block->phi_variables.push_back({ ops[i], ops[i + 1], id });
954
1.06k
    break;
955
1.06k
  }
956
957
  // Constants
958
26
  case OpSpecConstant:
959
4.64k
  case OpConstant:
960
4.64k
  case OpConstantCompositeReplicateEXT:
961
4.64k
  case OpSpecConstantCompositeReplicateEXT:
962
4.64k
  {
963
4.64k
    uint32_t id = ops[1];
964
4.64k
    auto &type = get<SPIRType>(ops[0]);
965
4.64k
    if (op == OpConstantCompositeReplicateEXT || op == OpSpecConstantCompositeReplicateEXT)
966
0
    {
967
0
      auto subconstant = uint32_t(ops[2]);
968
0
      set<SPIRConstant>(id, ops[0], &subconstant, 1, op == OpSpecConstantCompositeReplicateEXT, true);
969
0
    }
970
4.64k
    else
971
4.64k
    {
972
973
4.64k
      if (type.width > 32)
974
82
        set<SPIRConstant>(id, ops[0], ops[2] | (uint64_t(ops[3]) << 32), op == OpSpecConstant);
975
4.55k
      else
976
4.55k
        set<SPIRConstant>(id, ops[0], ops[2], op == OpSpecConstant);
977
4.64k
    }
978
4.64k
    break;
979
4.64k
  }
980
981
389
  case OpSpecConstantFalse:
982
761
  case OpConstantFalse:
983
761
  {
984
761
    uint32_t id = ops[1];
985
761
    set<SPIRConstant>(id, ops[0], uint32_t(0), op == OpSpecConstantFalse);
986
761
    break;
987
389
  }
988
989
218
  case OpSpecConstantTrue:
990
562
  case OpConstantTrue:
991
562
  {
992
562
    uint32_t id = ops[1];
993
562
    set<SPIRConstant>(id, ops[0], uint32_t(1), op == OpSpecConstantTrue);
994
562
    break;
995
218
  }
996
997
1.61k
  case OpConstantNull:
998
1.61k
  {
999
1.61k
    uint32_t id = ops[1];
1000
1.61k
    uint32_t type = ops[0];
1001
1.61k
    ir.make_constant_null(id, type, true);
1002
1.61k
    break;
1003
218
  }
1004
1005
345
  case OpSpecConstantComposite:
1006
1.02k
  case OpConstantComposite:
1007
1.02k
  {
1008
1.02k
    uint32_t id = ops[1];
1009
1.02k
    uint32_t type = ops[0];
1010
1011
1.02k
    auto &ctype = get<SPIRType>(type);
1012
1013
    // We can have constants which are structs and arrays.
1014
    // In this case, our SPIRConstant will be a list of other SPIRConstant ids which we
1015
    // can refer to.
1016
1.02k
    if (ctype.basetype == SPIRType::Struct || !ctype.array.empty())
1017
329
    {
1018
329
      set<SPIRConstant>(id, type, ops + 2, length - 2, op == OpSpecConstantComposite);
1019
329
    }
1020
700
    else
1021
700
    {
1022
700
      uint32_t elements = length - 2;
1023
700
      if (elements > 4)
1024
0
        SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements.");
1025
1026
700
      SPIRConstant remapped_constant_ops[4];
1027
700
      const SPIRConstant *c[4];
1028
2.57k
      for (uint32_t i = 0; i < elements; i++)
1029
1.87k
      {
1030
        // Specialization constants operations can also be part of this.
1031
        // We do not know their value, so any attempt to query SPIRConstant later
1032
        // will fail. We can only propagate the ID of the expression and use to_expression on it.
1033
1.87k
        auto *constant_op = maybe_get<SPIRConstantOp>(ops[2 + i]);
1034
1.87k
        auto *undef_op = maybe_get<SPIRUndef>(ops[2 + i]);
1035
1.87k
        if (constant_op)
1036
35
        {
1037
35
          if (op == OpConstantComposite)
1038
0
            SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite.");
1039
1040
35
          remapped_constant_ops[i].make_null(get<SPIRType>(constant_op->basetype));
1041
35
          remapped_constant_ops[i].self = constant_op->self;
1042
35
          remapped_constant_ops[i].constant_type = constant_op->basetype;
1043
35
          remapped_constant_ops[i].specialization = true;
1044
35
          c[i] = &remapped_constant_ops[i];
1045
35
        }
1046
1.84k
        else if (undef_op)
1047
55
        {
1048
          // Undefined, just pick 0.
1049
55
          remapped_constant_ops[i].make_null(get<SPIRType>(undef_op->basetype));
1050
55
          remapped_constant_ops[i].constant_type = undef_op->basetype;
1051
55
          c[i] = &remapped_constant_ops[i];
1052
55
        }
1053
1.78k
        else
1054
1.78k
          c[i] = &get<SPIRConstant>(ops[2 + i]);
1055
1.87k
      }
1056
700
      set<SPIRConstant>(id, type, c, elements, op == OpSpecConstantComposite);
1057
700
    }
1058
1.02k
    break;
1059
1.02k
  }
1060
1061
  // Functions
1062
1.02k
  case OpFunction:
1063
491
  {
1064
491
    uint32_t res = ops[0];
1065
491
    uint32_t id = ops[1];
1066
    // Control
1067
491
    uint32_t type = ops[3];
1068
1069
491
    if (current_function)
1070
1
      SPIRV_CROSS_THROW("Must end a function before starting a new one!");
1071
1072
490
    current_function = &set<SPIRFunction>(id, res, type);
1073
490
    break;
1074
491
  }
1075
1076
149
  case OpFunctionParameter:
1077
149
  {
1078
149
    uint32_t type = ops[0];
1079
149
    uint32_t id = ops[1];
1080
1081
149
    if (!current_function)
1082
4
      SPIRV_CROSS_THROW("Must be in a function!");
1083
1084
145
    current_function->add_parameter(type, id);
1085
145
    set<SPIRVariable>(id, type, StorageClassFunction);
1086
145
    break;
1087
149
  }
1088
1089
2.08k
  case OpFunctionEnd:
1090
2.08k
  {
1091
2.08k
    if (current_block)
1092
1
    {
1093
      // Very specific error message, but seems to come up quite often.
1094
1
      SPIRV_CROSS_THROW(
1095
1
          "Cannot end a function before ending the current block.\n"
1096
1
          "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid.");
1097
1
    }
1098
2.08k
    current_function = nullptr;
1099
2.08k
    break;
1100
2.08k
  }
1101
1102
  // Blocks
1103
9.25k
  case OpLabel:
1104
9.25k
  {
1105
    // OpLabel always starts a block.
1106
9.25k
    if (!current_function)
1107
4
      SPIRV_CROSS_THROW("Blocks cannot exist outside functions!");
1108
1109
9.25k
    uint32_t id = ops[0];
1110
1111
9.25k
    current_function->blocks.push_back(id);
1112
9.25k
    if (!current_function->entry_block)
1113
486
      current_function->entry_block = id;
1114
1115
9.25k
    if (current_block)
1116
9
      SPIRV_CROSS_THROW("Cannot start a block before ending the current block.");
1117
1118
9.24k
    current_block = &set<SPIRBlock>(id);
1119
9.24k
    break;
1120
9.25k
  }
1121
1122
  // Branch instructions end blocks.
1123
6.15k
  case OpBranch:
1124
6.15k
  {
1125
6.15k
    if (!current_block)
1126
3
      SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1127
1128
6.15k
    uint32_t target = ops[0];
1129
6.15k
    current_block->terminator = SPIRBlock::Direct;
1130
6.15k
    current_block->next_block = target;
1131
6.15k
    current_block = nullptr;
1132
6.15k
    break;
1133
6.15k
  }
1134
1135
1.68k
  case OpBranchConditional:
1136
1.68k
  {
1137
1.68k
    if (!current_block)
1138
2
      SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1139
1140
1.68k
    current_block->condition = ops[0];
1141
1.68k
    current_block->true_block = ops[1];
1142
1.68k
    current_block->false_block = ops[2];
1143
1144
1.68k
    current_block->terminator = SPIRBlock::Select;
1145
1146
1.68k
    if (current_block->true_block == current_block->false_block)
1147
81
    {
1148
      // Bogus conditional, translate to a direct branch.
1149
      // Avoids some ugly edge cases later when analyzing CFGs.
1150
1151
      // There are some super jank cases where the merge block is different from the true/false,
1152
      // and later branches can "break" out of the selection construct this way.
1153
      // This is complete nonsense, but CTS hits this case.
1154
      // In this scenario, we should see the selection construct as more of a Switch with one default case.
1155
      // The problem here is that this breaks any attempt to break out of outer switch statements,
1156
      // but it's theoretically solvable if this ever comes up using the ladder breaking system ...
1157
1158
81
      if (current_block->true_block != current_block->next_block &&
1159
68
          current_block->merge == SPIRBlock::MergeSelection)
1160
14
      {
1161
14
        uint32_t ids = ir.increase_bound_by(2);
1162
1163
14
        auto &type = set<SPIRType>(ids, OpTypeInt);
1164
14
        type.basetype = SPIRType::Int;
1165
14
        type.width = 32;
1166
14
        auto &c = set<SPIRConstant>(ids + 1, ids);
1167
1168
14
        current_block->condition = c.self;
1169
14
        current_block->default_block = current_block->true_block;
1170
14
        current_block->terminator = SPIRBlock::MultiSelect;
1171
14
        ir.block_meta[current_block->next_block] &= ~ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
1172
14
        ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
1173
14
      }
1174
67
      else
1175
67
      {
1176
        // Collapse loops if we have to.
1177
67
        bool collapsed_loop = current_block->true_block == current_block->merge_block &&
1178
57
                              current_block->merge == SPIRBlock::MergeLoop;
1179
1180
67
        if (collapsed_loop)
1181
45
        {
1182
45
          ir.block_meta[current_block->merge_block] &= ~ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
1183
45
          ir.block_meta[current_block->continue_block] &= ~ParsedIR::BLOCK_META_CONTINUE_BIT;
1184
45
        }
1185
1186
67
        current_block->next_block = current_block->true_block;
1187
67
        current_block->condition = 0;
1188
67
        current_block->true_block = 0;
1189
67
        current_block->false_block = 0;
1190
67
        current_block->merge_block = 0;
1191
67
        current_block->merge = SPIRBlock::MergeNone;
1192
67
        current_block->terminator = SPIRBlock::Direct;
1193
67
      }
1194
81
    }
1195
1196
1.68k
    current_block = nullptr;
1197
1.68k
    break;
1198
1.68k
  }
1199
1200
445
  case OpSwitch:
1201
445
  {
1202
445
    if (!current_block)
1203
0
      SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1204
1205
445
    current_block->terminator = SPIRBlock::MultiSelect;
1206
1207
445
    current_block->condition = ops[0];
1208
445
    current_block->default_block = ops[1];
1209
1210
445
    uint32_t remaining_ops = length - 2;
1211
445
    if ((remaining_ops % 2) == 0)
1212
430
    {
1213
2.48k
      for (uint32_t i = 2; i + 2 <= length; i += 2)
1214
2.05k
        current_block->cases_32bit.push_back({ ops[i], ops[i + 1] });
1215
430
    }
1216
1217
445
    if ((remaining_ops % 3) == 0)
1218
259
    {
1219
433
      for (uint32_t i = 2; i + 3 <= length; i += 3)
1220
174
      {
1221
174
        uint64_t value = (static_cast<uint64_t>(ops[i + 1]) << 32) | ops[i];
1222
174
        current_block->cases_64bit.push_back({ value, ops[i + 2] });
1223
174
      }
1224
259
    }
1225
1226
    // If we jump to next block, make it break instead since we're inside a switch case block at that point.
1227
445
    ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT;
1228
1229
445
    current_block = nullptr;
1230
445
    break;
1231
445
  }
1232
1233
38
  case OpKill:
1234
38
  case OpTerminateInvocation:
1235
38
  {
1236
38
    if (!current_block)
1237
0
      SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1238
38
    current_block->terminator = SPIRBlock::Kill;
1239
38
    current_block = nullptr;
1240
38
    break;
1241
38
  }
1242
1243
0
  case OpTerminateRayKHR:
1244
    // NV variant is not a terminator.
1245
0
    if (!current_block)
1246
0
      SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1247
0
    current_block->terminator = SPIRBlock::TerminateRay;
1248
0
    current_block = nullptr;
1249
0
    break;
1250
1251
0
  case OpIgnoreIntersectionKHR:
1252
    // NV variant is not a terminator.
1253
0
    if (!current_block)
1254
0
      SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1255
0
    current_block->terminator = SPIRBlock::IgnoreIntersection;
1256
0
    current_block = nullptr;
1257
0
    break;
1258
1259
0
  case OpEmitMeshTasksEXT:
1260
0
    if (!current_block)
1261
0
      SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1262
0
    current_block->terminator = SPIRBlock::EmitMeshTasks;
1263
0
    for (uint32_t i = 0; i < 3; i++)
1264
0
      current_block->mesh.groups[i] = ops[i];
1265
0
    current_block->mesh.payload = length >= 4 ? ops[3] : 0;
1266
0
    current_block = nullptr;
1267
    // Currently glslang is bugged and does not treat EmitMeshTasksEXT as a terminator.
1268
0
    ignore_trailing_block_opcodes = true;
1269
0
    break;
1270
1271
283
  case OpReturn:
1272
283
  {
1273
283
    if (!current_block)
1274
8
      SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1275
275
    current_block->terminator = SPIRBlock::Return;
1276
275
    current_block = nullptr;
1277
275
    break;
1278
283
  }
1279
1280
141
  case OpReturnValue:
1281
141
  {
1282
141
    if (!current_block)
1283
3
      SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1284
138
    current_block->terminator = SPIRBlock::Return;
1285
138
    current_block->return_value = ops[0];
1286
138
    current_block = nullptr;
1287
138
    break;
1288
141
  }
1289
1290
381
  case OpUnreachable:
1291
381
  {
1292
381
    if (!current_block)
1293
1
      SPIRV_CROSS_THROW("Trying to end a non-existing block.");
1294
380
    current_block->terminator = SPIRBlock::Unreachable;
1295
380
    current_block = nullptr;
1296
380
    break;
1297
381
  }
1298
1299
1.41k
  case OpSelectionMerge:
1300
1.41k
  {
1301
1.41k
    if (!current_block)
1302
3
      SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1303
1304
1.40k
    current_block->next_block = ops[0];
1305
1.40k
    current_block->merge = SPIRBlock::MergeSelection;
1306
1.40k
    ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT;
1307
1308
1.40k
    if (length >= 2)
1309
1.40k
    {
1310
1.40k
      if (ops[1] & SelectionControlFlattenMask)
1311
18
        current_block->hint = SPIRBlock::HintFlatten;
1312
1.38k
      else if (ops[1] & SelectionControlDontFlattenMask)
1313
7
        current_block->hint = SPIRBlock::HintDontFlatten;
1314
1.40k
    }
1315
1.40k
    break;
1316
1.41k
  }
1317
1318
1.45k
  case OpLoopMerge:
1319
1.45k
  {
1320
1.45k
    if (!current_block)
1321
0
      SPIRV_CROSS_THROW("Trying to modify a non-existing block.");
1322
1323
1.45k
    current_block->merge_block = ops[0];
1324
1.45k
    current_block->continue_block = ops[1];
1325
1.45k
    current_block->merge = SPIRBlock::MergeLoop;
1326
1327
1.45k
    ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT;
1328
1.45k
    ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT;
1329
1330
1.45k
    ir.continue_block_to_loop_header[current_block->continue_block] = BlockID(current_block->self);
1331
1332
    // Don't add loop headers to continue blocks,
1333
    // which would make it impossible branch into the loop header since
1334
    // they are treated as continues.
1335
1.45k
    if (current_block->continue_block != BlockID(current_block->self))
1336
1.41k
      ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT;
1337
1338
1.45k
    if (length >= 3)
1339
1.44k
    {
1340
1.44k
      if (ops[2] & LoopControlUnrollMask)
1341
512
        current_block->hint = SPIRBlock::HintUnroll;
1342
929
      else if (ops[2] & LoopControlDontUnrollMask)
1343
205
        current_block->hint = SPIRBlock::HintDontUnroll;
1344
1.44k
    }
1345
1.45k
    break;
1346
1.45k
  }
1347
1348
2.06k
  case OpSpecConstantOp:
1349
2.06k
  {
1350
2.06k
    if (length < 3)
1351
2
      SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments.");
1352
1353
2.06k
    uint32_t result_type = ops[0];
1354
2.06k
    uint32_t id = ops[1];
1355
2.06k
    auto spec_op = static_cast<Op>(ops[2]);
1356
1357
2.06k
    set<SPIRConstantOp>(id, result_type, spec_op, ops + 3, length - 3);
1358
2.06k
    break;
1359
2.06k
  }
1360
1361
116
  case OpLine:
1362
116
  {
1363
    // OpLine might come at global scope, but we don't care about those since they will not be declared in any
1364
    // meaningful correct order.
1365
    // Ignore all OpLine directives which live outside a function.
1366
116
    if (current_block)
1367
49
      current_block->ops.push_back(instruction);
1368
1369
    // Line directives may arrive before first OpLabel.
1370
    // Treat this as the line of the function declaration,
1371
    // so warnings for arguments can propagate properly.
1372
116
    if (current_function)
1373
49
    {
1374
      // Store the first one we find and emit it before creating the function prototype.
1375
49
      if (current_function->entry_line.file_id == 0)
1376
10
      {
1377
10
        current_function->entry_line.file_id = ops[0];
1378
10
        current_function->entry_line.line_literal = ops[1];
1379
10
      }
1380
49
    }
1381
116
    break;
1382
2.06k
  }
1383
1384
397
  case OpNoLine:
1385
397
  {
1386
    // OpNoLine might come at global scope.
1387
397
    if (current_block)
1388
0
      current_block->ops.push_back(instruction);
1389
397
    break;
1390
2.06k
  }
1391
1392
  // Actual opcodes.
1393
21.5k
  default:
1394
21.5k
  {
1395
21.5k
    if (length >= 2)
1396
21.5k
    {
1397
21.5k
      const auto *type = maybe_get<SPIRType>(ops[0]);
1398
21.5k
      if (type)
1399
18.1k
        ir.load_type_width.insert({ ops[1], type->width });
1400
21.5k
    }
1401
1402
21.5k
    if (!current_block)
1403
60
      SPIRV_CROSS_THROW("Currently no block to insert opcode.");
1404
1405
21.4k
    current_block->ops.push_back(instruction);
1406
21.4k
    break;
1407
21.5k
  }
1408
249k
  }
1409
249k
}
1410
1411
bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const
1412
3.46k
{
1413
3.46k
  if (a.basetype != b.basetype)
1414
0
    return false;
1415
3.46k
  if (a.width != b.width)
1416
0
    return false;
1417
3.46k
  if (a.vecsize != b.vecsize)
1418
0
    return false;
1419
3.46k
  if (a.columns != b.columns)
1420
0
    return false;
1421
3.46k
  if (a.array.size() != b.array.size())
1422
0
    return false;
1423
1424
3.46k
  size_t array_count = a.array.size();
1425
3.46k
  if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0)
1426
0
    return false;
1427
1428
3.46k
  if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage)
1429
6
  {
1430
6
    if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0)
1431
0
      return false;
1432
6
  }
1433
1434
3.46k
  if (a.member_types.size() != b.member_types.size())
1435
0
    return false;
1436
1437
3.46k
  size_t member_types = a.member_types.size();
1438
5.24k
  for (size_t i = 0; i < member_types; i++)
1439
1.77k
  {
1440
1.77k
    if (!types_are_logically_equivalent(get<SPIRType>(a.member_types[i]), get<SPIRType>(b.member_types[i])))
1441
0
      return false;
1442
1.77k
  }
1443
1444
3.46k
  return true;
1445
3.46k
}
1446
1447
bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const
1448
0
{
1449
0
  auto &type = get<SPIRType>(v.basetype);
1450
1451
0
  auto *type_meta = ir.find_meta(type.self);
1452
1453
0
  bool ssbo = v.storage == StorageClassStorageBuffer ||
1454
0
              (type_meta && type_meta->decoration.decoration_flags.get(DecorationBufferBlock));
1455
0
  bool image = type.basetype == SPIRType::Image;
1456
0
  bool counter = type.basetype == SPIRType::AtomicCounter;
1457
1458
0
  bool is_restrict;
1459
0
  if (ssbo)
1460
0
    is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict);
1461
0
  else
1462
0
    is_restrict = ir.has_decoration(v.self, DecorationRestrict);
1463
1464
0
  return !is_restrict && (ssbo || image || counter);
1465
0
}
1466
} // namespace SPIRV_CROSS_NAMESPACE