Coverage Report

Created: 2025-07-15 06:22

/src/keystone/llvm/lib/MC/MCAssembler.cpp
Line
Count
Source (jump to first uncovered line)
1
//===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===//
2
//
3
//                     The LLVM Compiler Infrastructure
4
//
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
10
#include "llvm/MC/MCAssembler.h"
11
#include "llvm/ADT/StringExtras.h"
12
#include "llvm/ADT/Twine.h"
13
#include "llvm/MC/MCAsmBackend.h"
14
#include "llvm/MC/MCAsmInfo.h"
15
#include "llvm/MC/MCAsmLayout.h"
16
#include "llvm/MC/MCCodeEmitter.h"
17
#include "llvm/MC/MCContext.h"
18
#include "llvm/MC/MCDwarf.h"
19
#include "llvm/MC/MCExpr.h"
20
#include "llvm/MC/MCFixupKindInfo.h"
21
#include "llvm/MC/MCObjectWriter.h"
22
#include "llvm/MC/MCSection.h"
23
#include "llvm/MC/MCSectionELF.h"
24
#include "llvm/MC/MCSymbol.h"
25
#include "llvm/MC/MCValue.h"
26
#include "llvm/Support/Debug.h"
27
#include "llvm/Support/ErrorHandling.h"
28
#include "llvm/Support/LEB128.h"
29
#include "llvm/Support/TargetRegistry.h"
30
#include "llvm/Support/raw_ostream.h"
31
#include <tuple>
32
33
#include "keystone/keystone.h"
34
35
using namespace llvm_ks;
36
37
#define DEBUG_TYPE "assembler"
38
39
// FIXME FIXME FIXME: There are number of places in this file where we convert
40
// what is a 64-bit assembler value used for computation into a value in the
41
// object file, which may truncate it. We should detect that truncation where
42
// invalid and report errors back.
43
44
/* *** */
45
46
MCAssembler::MCAssembler(MCContext &Context_, MCAsmBackend &Backend_,
47
                         MCCodeEmitter &Emitter_, MCObjectWriter &Writer_)
48
130k
    : Context(Context_), Backend(Backend_), Emitter(Emitter_), Writer(Writer_),
49
130k
      BundleAlignSize(0), RelaxAll(false), SubsectionsViaSymbols(false),
50
130k
      IncrementalLinkerCompatible(false), ELFHeaderEFlags(0) {
51
130k
  VersionMinInfo.Major = 0; // Major version == 0 for "none specified"
52
130k
}
53
54
130k
MCAssembler::~MCAssembler() {
55
130k
}
56
57
0
void MCAssembler::reset() {
58
0
  Sections.clear();
59
0
  Symbols.clear();
60
0
  IndirectSymbols.clear();
61
0
  DataRegions.clear();
62
0
  LinkerOptions.clear();
63
0
  FileNames.clear();
64
0
  ThumbFuncs.clear();
65
0
  BundleAlignSize = 0;
66
0
  RelaxAll = false;
67
0
  SubsectionsViaSymbols = false;
68
0
  IncrementalLinkerCompatible = false;
69
0
  ELFHeaderEFlags = 0;
70
0
  LOHContainer.reset();
71
0
  VersionMinInfo.Major = 0;
72
73
  // reset objects owned by us
74
0
  getBackend().reset();
75
0
  getEmitter().reset();
76
0
  getWriter().reset();
77
0
  getLOHContainer().reset();
78
0
}
79
80
139k
bool MCAssembler::registerSection(MCSection &Section) {
81
139k
  if (Section.isRegistered())
82
6.05k
    return false;
83
133k
  Sections.push_back(&Section);
84
133k
  Section.setIsRegistered(true);
85
133k
  return true;
86
139k
}
87
88
304k
bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const {
89
304k
  if (ThumbFuncs.count(Symbol))
90
0
    return true;
91
92
304k
  if (!Symbol->isVariable())
93
297k
    return false;
94
95
  // FIXME: It looks like gas supports some cases of the form "foo + 2". It
96
  // is not clear if that is a bug or a feature.
97
7.60k
  const MCExpr *Expr = Symbol->getVariableValue();
98
7.60k
  const MCSymbolRefExpr *Ref = dyn_cast<MCSymbolRefExpr>(Expr);
99
7.60k
  if (!Ref)
100
2.89k
    return false;
101
102
4.70k
  if (Ref->getKind() != MCSymbolRefExpr::VK_None)
103
0
    return false;
104
105
4.70k
  const MCSymbol &Sym = Ref->getSymbol();
106
4.70k
  if (!isThumbFunc(&Sym))
107
4.70k
    return false;
108
109
0
  ThumbFuncs.insert(Symbol); // Cache it.
110
0
  return true;
111
4.70k
}
112
113
0
bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
114
  // Non-temporary labels should always be visible to the linker.
115
0
  if (!Symbol.isTemporary())
116
0
    return true;
117
118
  // Absolute temporary labels are never visible.
119
0
  if (!Symbol.isInSection())
120
0
    return false;
121
122
0
  if (Symbol.isUsedInReloc())
123
0
    return true;
124
125
0
  return false;
126
0
}
127
128
0
const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const {
129
  // Linker visible symbols define atoms.
130
0
  if (isSymbolLinkerVisible(S))
131
0
    return &S;
132
133
  // Absolute and undefined symbols have no defining atom.
134
0
  if (!S.isInSection())
135
0
    return nullptr;
136
137
  // Non-linker visible symbols in sections which can't be atomized have no
138
  // defining atom.
139
0
  if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols(
140
0
          *S.getFragment()->getParent()))
141
0
    return nullptr;
142
143
  // Otherwise, return the atom for the containing fragment.
144
0
  return S.getFragment()->getAtom();
145
0
}
146
147
bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout,
148
                                const MCFixup &Fixup, const MCFragment *DF,
149
                                MCValue &Target, uint64_t &Value, unsigned int &KsError) const
150
218k
{
151
218k
  KsError = 0;
152
153
  // FIXME: This code has some duplication with recordRelocation. We should
154
  // probably merge the two into a single callback that tries to evaluate a
155
  // fixup and records a relocation if one is needed.
156
218k
  const MCExpr *Expr = Fixup.getValue();
157
218k
  if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) {
158
    // getContext().reportError(Fixup.getLoc(), "expected relocatable expression");
159
    // Claim to have completely evaluated the fixup, to prevent any further
160
    // processing from being done.
161
    // return true;
162
2.83k
    Value = 0;
163
2.83k
    KsError = KS_ERR_ASM_INVALIDOPERAND;
164
2.83k
    return false;
165
2.83k
  }
166
167
216k
  bool IsPCRel = Backend.getFixupKindInfo(
168
216k
    Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsPCRel;
169
170
216k
  bool IsResolved;
171
216k
  if (IsPCRel) {
172
52.3k
    if (Target.getSymB()) {
173
8.58k
      IsResolved = false;
174
43.7k
    } else if (!Target.getSymA()) {
175
14.6k
      if (getBackend().getArch() == KS_ARCH_X86)
176
11.9k
          IsResolved = true;
177
2.64k
      else
178
2.64k
          IsResolved = false;
179
29.1k
    } else {
180
29.1k
      const MCSymbolRefExpr *A = Target.getSymA();
181
29.1k
      const MCSymbol &SA = A->getSymbol();
182
29.1k
      if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) {
183
6.18k
        IsResolved = false;
184
22.9k
      } else {
185
22.9k
        IsResolved = getWriter().isSymbolRefDifferenceFullyResolvedImpl(
186
22.9k
            *this, SA, *DF, false, true);
187
22.9k
      }
188
29.1k
    }
189
163k
  } else {
190
163k
    IsResolved = Target.isAbsolute();
191
163k
  }
192
193
216k
  Value = Target.getConstant();
194
195
216k
  if (const MCSymbolRefExpr *A = Target.getSymA()) {
196
171k
    const MCSymbol &Sym = A->getSymbol();
197
171k
    bool valid;
198
171k
    if (Sym.isDefined()) {
199
163k
      Value += Layout.getSymbolOffset(Sym, valid);
200
163k
      if (!valid) {
201
136
        KsError = KS_ERR_ASM_FIXUP_INVALID;
202
136
        return false;
203
136
      }
204
163k
    } else {
205
        // a missing symbol. is there any resolver registered?
206
7.59k
        if (KsSymResolver) {
207
0
            uint64_t imm;
208
0
            ks_sym_resolver resolver = (ks_sym_resolver)KsSymResolver;
209
0
            if (resolver(Sym.getName().str().c_str(), &imm)) {
210
                // resolver handled this symbol
211
0
                Value = imm;
212
0
                IsResolved = true;
213
0
            } else {
214
                // resolver did not handle this symbol
215
0
                KsError = KS_ERR_ASM_SYMBOL_MISSING;
216
0
                return false;
217
0
            }
218
7.59k
        } else {
219
            // no resolver registered
220
7.59k
            KsError = KS_ERR_ASM_SYMBOL_MISSING;
221
7.59k
            return false;
222
7.59k
        }
223
7.59k
    }
224
171k
  }
225
226
208k
  if (const MCSymbolRefExpr *B = Target.getSymB()) {
227
78.7k
    const MCSymbol &Sym = B->getSymbol();
228
78.7k
    bool valid;
229
78.7k
    if (Sym.isDefined()) {
230
7.41k
      Value -= Layout.getSymbolOffset(Sym, valid);
231
7.41k
      if (!valid) {
232
20
        KsError = KS_ERR_ASM_FIXUP_INVALID;
233
20
        return false;
234
20
      }
235
7.41k
    }
236
78.7k
  }
237
238
208k
  bool ShouldAlignPC = Backend.getFixupKindInfo(Fixup.getKind()).Flags &
239
208k
                         MCFixupKindInfo::FKF_IsAlignedDownTo32Bits;
240
208k
  assert((ShouldAlignPC ? IsPCRel : true) &&
241
208k
    "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!");
242
243
208k
  if (IsPCRel) {
244
45.8k
    bool valid;
245
45.8k
    uint64_t Offset = Layout.getFragmentOffset(DF, valid) + Fixup.getOffset();
246
45.8k
    if (!valid) {
247
824
        KsError = KS_ERR_ASM_FRAGMENT_INVALID;
248
824
        return false;
249
824
    }
250
251
    // A number of ARM fixups in Thumb mode require that the effective PC
252
    // address be determined as the 32-bit aligned version of the actual offset.
253
45.0k
    if (ShouldAlignPC) Offset &= ~0x3;
254
45.0k
    Value -= Offset;
255
45.0k
  }
256
257
  // Let the backend adjust the fixup value if necessary, including whether
258
  // we need a relocation.
259
207k
  Backend.processFixupValue(*this, Layout, Fixup, DF, Target, Value,
260
207k
                            IsResolved);
261
262
207k
  return IsResolved;
263
208k
}
264
265
uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout,
266
                                          const MCFragment &F, bool &valid) const
267
698k
{
268
698k
  valid = true;
269
698k
  switch (F.getKind()) {
270
85.9k
  case MCFragment::FT_Data:
271
85.9k
    return cast<MCDataFragment>(F).getContents().size();
272
41.2k
  case MCFragment::FT_Relaxable:
273
41.2k
    return cast<MCRelaxableFragment>(F).getContents().size();
274
0
  case MCFragment::FT_CompactEncodedInst:
275
0
    return cast<MCCompactEncodedInstFragment>(F).getContents().size();
276
78.2k
  case MCFragment::FT_Fill:
277
78.2k
    return cast<MCFillFragment>(F).getSize();
278
279
0
  case MCFragment::FT_LEB:
280
0
    return cast<MCLEBFragment>(F).getContents().size();
281
282
0
  case MCFragment::FT_SafeSEH:
283
0
    return 4;
284
285
146k
  case MCFragment::FT_Align: {
286
146k
    const MCAlignFragment &AF = cast<MCAlignFragment>(F);
287
146k
    unsigned Offset = Layout.getFragmentOffset(&AF, valid);
288
146k
    if (!valid) {
289
0
        return 0;
290
0
    }
291
146k
    unsigned Size = OffsetToAlignment(Offset, AF.getAlignment());
292
    // If we are padding with nops, force the padding to be larger than the
293
    // minimum nop size.
294
146k
    if (Size > 0 && AF.hasEmitNops()) {
295
10.6k
      while (Size % getBackend().getMinimumNopSize())
296
0
        Size += AF.getAlignment();
297
10.6k
    }
298
146k
    if (Size > AF.getMaxBytesToEmit())
299
2.42k
      return 0;
300
144k
    return Size;
301
146k
  }
302
303
346k
  case MCFragment::FT_Org: {
304
346k
    const MCOrgFragment &OF = cast<MCOrgFragment>(F);
305
346k
    MCValue Value;
306
346k
    if (!OF.getOffset().evaluateAsValue(Value, Layout)) {
307
      //report_fatal_error("expected assembly-time absolute expression");
308
10.5k
      valid = false;
309
10.5k
      return 0;
310
10.5k
    }
311
312
    // FIXME: We need a way to communicate this error.
313
336k
    uint64_t FragmentOffset = Layout.getFragmentOffset(&OF, valid);
314
336k
    if (!valid) {
315
15
      return 0;
316
15
    }
317
336k
    int64_t TargetLocation = Value.getConstant();
318
336k
    if (const MCSymbolRefExpr *A = Value.getSymA()) {
319
22.4k
      uint64_t Val;
320
22.4k
      if (!Layout.getSymbolOffset(A->getSymbol(), Val, valid)) {
321
        //report_fatal_error("expected absolute expression");
322
9.47k
        valid = false;
323
9.47k
        return 0;
324
9.47k
      }
325
12.9k
      TargetLocation += Val;
326
12.9k
    }
327
326k
    int64_t Size = TargetLocation - FragmentOffset;
328
326k
    if (Size < 0 || Size >= 0x40000000) {
329
      //report_fatal_error("invalid .org offset '" + Twine(TargetLocation) +
330
      //                   "' (at offset '" + Twine(FragmentOffset) + "')");
331
32.2k
      valid = false;
332
32.2k
      return 0;
333
32.2k
    }
334
294k
    return Size;
335
326k
  }
336
337
0
  case MCFragment::FT_Dwarf:
338
0
    return cast<MCDwarfLineAddrFragment>(F).getContents().size();
339
0
  case MCFragment::FT_DwarfFrame:
340
0
    return cast<MCDwarfCallFrameFragment>(F).getContents().size();
341
0
  case MCFragment::FT_Dummy:
342
0
    llvm_unreachable("Should not have been added");
343
698k
  }
344
345
698k
  llvm_unreachable("invalid fragment kind");
346
698k
}
347
348
bool MCAsmLayout::layoutFragment(MCFragment *F)
349
487k
{
350
487k
  MCFragment *Prev = F->getPrevNode();
351
352
  // We should never try to recompute something which is valid.
353
  //assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!");
354
487k
  if (isFragmentValid(F))
355
0
      return true;
356
357
  // We should never try to compute the fragment layout if its predecessor
358
  // isn't valid.
359
  //assert((!Prev || isFragmentValid(Prev)) &&
360
  //       "Attempt to compute fragment before its predecessor!");
361
487k
  if (Prev && !isFragmentValid(Prev))
362
37.6k
      return true;
363
364
450k
  bool valid = true;
365
  // Compute fragment offset and size.
366
450k
  if (Prev)
367
390k
    F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev, valid);
368
59.3k
  else
369
59.3k
    F->Offset = getAssembler().getContext().getBaseAddress();
370
450k
  if (!valid) {
371
49.6k
      return false;
372
49.6k
  }
373
400k
  LastValidFragment[F->getParent()] = F;
374
375
  // If bundling is enabled and this fragment has instructions in it, it has to
376
  // obey the bundling restrictions. With padding, we'll have:
377
  //
378
  //
379
  //        BundlePadding
380
  //             |||
381
  // -------------------------------------
382
  //   Prev  |##########|       F        |
383
  // -------------------------------------
384
  //                    ^
385
  //                    |
386
  //                    F->Offset
387
  //
388
  // The fragment's offset will point to after the padding, and its computed
389
  // size won't include the padding.
390
  //
391
  // When the -mc-relax-all flag is used, we optimize bundling by writting the
392
  // padding directly into fragments when the instructions are emitted inside
393
  // the streamer. When the fragment is larger than the bundle size, we need to
394
  // ensure that it's bundle aligned. This means that if we end up with
395
  // multiple fragments, we must emit bundle padding between fragments.
396
  //
397
  // ".align N" is an example of a directive that introduces multiple
398
  // fragments. We could add a special case to handle ".align N" by emitting
399
  // within-fragment padding (which would produce less padding when N is less
400
  // than the bundle size), but for now we don't.
401
  //
402
400k
  if (Assembler.isBundlingEnabled() && F->hasInstructions()) {
403
0
    assert(isa<MCEncodedFragment>(F) &&
404
0
           "Only MCEncodedFragment implementations have instructions");
405
0
    if (!isa<MCEncodedFragment>(F))
406
0
        return true;
407
408
0
    bool valid;
409
0
    uint64_t FSize = Assembler.computeFragmentSize(*this, *F, valid);
410
0
    if (!valid)
411
0
        return true;
412
413
0
    if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize())
414
      //report_fatal_error("Fragment can't be larger than a bundle size");
415
0
      return true;
416
417
0
    uint64_t RequiredBundlePadding = computeBundlePadding(Assembler, F,
418
0
                                                          F->Offset, FSize);
419
0
    if (RequiredBundlePadding > UINT8_MAX)
420
      //report_fatal_error("Padding cannot exceed 255 bytes");
421
0
      return true;
422
423
0
    F->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding));
424
0
    F->Offset += RequiredBundlePadding;
425
0
  }
426
427
400k
  return false;
428
400k
}
429
430
3.19M
void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) {
431
3.19M
  bool New = !Symbol.isRegistered();
432
3.19M
  if (Created)
433
0
    *Created = New;
434
3.19M
  if (New) {
435
968k
    Symbol.setIsRegistered(true);
436
968k
    Symbols.push_back(&Symbol);
437
968k
  }
438
3.19M
}
439
440
void MCAssembler::writeFragmentPadding(const MCFragment &F, uint64_t FSize,
441
305k
                                       MCObjectWriter *OW) const {
442
  // Should NOP padding be written out before this fragment?
443
305k
  unsigned BundlePadding = F.getBundlePadding();
444
305k
  if (BundlePadding > 0) {
445
0
    assert(isBundlingEnabled() &&
446
0
           "Writing bundle padding with disabled bundling");
447
0
    assert(F.hasInstructions() &&
448
0
           "Writing bundle padding for a fragment without instructions");
449
450
0
    unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize);
451
0
    if (F.alignToBundleEnd() && TotalLength > getBundleAlignSize()) {
452
      // If the padding itself crosses a bundle boundary, it must be emitted
453
      // in 2 pieces, since even nop instructions must not cross boundaries.
454
      //             v--------------v   <- BundleAlignSize
455
      //        v---------v             <- BundlePadding
456
      // ----------------------------
457
      // | Prev |####|####|    F    |
458
      // ----------------------------
459
      //        ^-------------------^   <- TotalLength
460
0
      unsigned DistanceToBoundary = TotalLength - getBundleAlignSize();
461
0
      if (!getBackend().writeNopData(DistanceToBoundary, OW))
462
0
          report_fatal_error("unable to write NOP sequence of " +
463
0
                             Twine(DistanceToBoundary) + " bytes");
464
0
      BundlePadding -= DistanceToBoundary;
465
0
    }
466
0
    if (!getBackend().writeNopData(BundlePadding, OW))
467
0
      report_fatal_error("unable to write NOP sequence of " +
468
0
                         Twine(BundlePadding) + " bytes");
469
0
  }
470
305k
}
471
472
/// \brief Write the fragment \p F to the output file.
473
static void writeFragment(const MCAssembler &Asm, const MCAsmLayout &Layout,
474
                          const MCFragment &F)
475
452k
{
476
452k
  if (Asm.getError())
477
143k
      return;
478
479
308k
  MCObjectWriter *OW = &Asm.getWriter();
480
481
308k
  bool valid;
482
  // FIXME: Embed in fragments instead?
483
308k
  uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F, valid);
484
308k
  if (!valid) {
485
2.56k
      Asm.setError(KS_ERR_ASM_FRAGMENT_INVALID);
486
2.56k
      return;
487
2.56k
  }
488
489
305k
  Asm.writeFragmentPadding(F, FragmentSize, OW);
490
491
  // This variable (and its dummy usage) is to participate in the assert at
492
  // the end of the function.
493
305k
  uint64_t Start = OW->getStream().tell();
494
305k
  (void) Start;
495
496
305k
  switch (F.getKind()) {
497
59.7k
  case MCFragment::FT_Align: {
498
59.7k
    const MCAlignFragment &AF = cast<MCAlignFragment>(F);
499
59.7k
    assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!");
500
501
59.7k
    uint64_t Count = FragmentSize / AF.getValueSize();
502
503
    // FIXME: This error shouldn't actually occur (the front end should emit
504
    // multiple .align directives to enforce the semantics it wants), but is
505
    // severe enough that we want to report it. How to handle this?
506
59.7k
    if (Count * AF.getValueSize() != FragmentSize)
507
0
      report_fatal_error("undefined .align directive, value size '" +
508
0
                        Twine(AF.getValueSize()) +
509
0
                        "' is not a divisor of padding size '" +
510
0
                        Twine(FragmentSize) + "'");
511
512
    // See if we are aligning with nops, and if so do that first to try to fill
513
    // the Count bytes.  Then if that did not fill any bytes or there are any
514
    // bytes left to fill use the Value and ValueSize to fill the rest.
515
    // If we are aligning with nops, ask that target to emit the right data.
516
59.7k
    if (AF.hasEmitNops()) {
517
21.5k
      if (!Asm.getBackend().writeNopData(Count, OW))
518
0
        report_fatal_error("unable to write nop sequence of " +
519
0
                          Twine(Count) + " bytes");
520
21.5k
      break;
521
21.5k
    }
522
523
    // Otherwise, write out in multiples of the value size.
524
121M
    for (uint64_t i = 0; i != Count; ++i) {
525
121M
      switch (AF.getValueSize()) {
526
0
      default: llvm_unreachable("Invalid size!");
527
121M
      case 1: OW->write8 (uint8_t (AF.getValue())); break;
528
0
      case 2: OW->write16(uint16_t(AF.getValue())); break;
529
18
      case 4: OW->write32(uint32_t(AF.getValue())); break;
530
0
      case 8: OW->write64(uint64_t(AF.getValue())); break;
531
121M
      }
532
121M
    }
533
38.2k
    break;
534
38.2k
  }
535
536
52.6k
  case MCFragment::FT_Data: 
537
52.6k
    OW->writeBytes(cast<MCDataFragment>(F).getContents());
538
52.6k
    break;
539
540
9.25k
  case MCFragment::FT_Relaxable:
541
9.25k
    OW->writeBytes(cast<MCRelaxableFragment>(F).getContents());
542
9.25k
    break;
543
544
0
  case MCFragment::FT_CompactEncodedInst:
545
0
    OW->writeBytes(cast<MCCompactEncodedInstFragment>(F).getContents());
546
0
    break;
547
548
38.6k
  case MCFragment::FT_Fill: {
549
38.6k
    const MCFillFragment &FF = cast<MCFillFragment>(F);
550
38.6k
    uint8_t V = FF.getValue();
551
38.6k
    const unsigned MaxChunkSize = 16;
552
38.6k
    char Data[MaxChunkSize];
553
38.6k
    memcpy(Data, &V, 1);
554
617k
    for (unsigned I = 1; I < MaxChunkSize; ++I)
555
579k
      Data[I] = Data[0];
556
557
38.6k
    uint64_t Size = FF.getSize();
558
231k
    for (unsigned ChunkSize = MaxChunkSize; ChunkSize; ChunkSize /= 2) {
559
193k
      StringRef Ref(Data, ChunkSize);
560
26.2M
      for (uint64_t I = 0, E = Size / ChunkSize; I != E; ++I)
561
26.0M
        OW->writeBytes(Ref);
562
193k
      Size = Size % ChunkSize;
563
193k
    }
564
38.6k
    break;
565
38.2k
  }
566
567
0
  case MCFragment::FT_LEB: {
568
0
    const MCLEBFragment &LF = cast<MCLEBFragment>(F);
569
0
    OW->writeBytes(LF.getContents());
570
0
    break;
571
38.2k
  }
572
573
0
  case MCFragment::FT_SafeSEH: {
574
0
    const MCSafeSEHFragment &SF = cast<MCSafeSEHFragment>(F);
575
0
    OW->write32(SF.getSymbol()->getIndex());
576
0
    break;
577
38.2k
  }
578
579
145k
  case MCFragment::FT_Org: {
580
145k
    const MCOrgFragment &OF = cast<MCOrgFragment>(F);
581
582
1.18G
    for (uint64_t i = 0, e = FragmentSize; i != e; ++i)
583
1.18G
      OW->write8(uint8_t(OF.getValue()));
584
585
145k
    break;
586
38.2k
  }
587
588
0
  case MCFragment::FT_Dwarf: {
589
0
    const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F);
590
0
    OW->writeBytes(OF.getContents());
591
0
    break;
592
38.2k
  }
593
0
  case MCFragment::FT_DwarfFrame: {
594
0
    const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F);
595
0
    OW->writeBytes(CF.getContents());
596
0
    break;
597
38.2k
  }
598
0
  case MCFragment::FT_Dummy:
599
0
    llvm_unreachable("Should not have been added");
600
305k
  }
601
602
305k
  assert(OW->getStream().tell() - Start == FragmentSize &&
603
305k
         "The stream should advance by fragment size");
604
305k
}
605
606
void MCAssembler::writeSectionData(const MCSection *Sec,
607
                                   const MCAsmLayout &Layout) const
608
53.6k
{
609
  // Ignore virtual sections.
610
53.6k
  if (Sec->isVirtualSection()) {
611
181
    assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!");
612
613
    // Check that contents are only things legal inside a virtual section.
614
754
    for (const MCFragment &F : *Sec) {
615
754
      switch (F.getKind()) {
616
0
      default: llvm_unreachable("Invalid fragment in virtual section!");
617
0
      case MCFragment::FT_Data: {
618
        // Check that we aren't trying to write a non-zero contents (or fixups)
619
        // into a virtual section. This is to support clients which use standard
620
        // directives to fill the contents of virtual sections.
621
0
        const MCDataFragment &DF = cast<MCDataFragment>(F);
622
0
        assert(DF.fixup_begin() == DF.fixup_end() &&
623
0
               "Cannot have fixups in virtual section!");
624
0
        for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i)
625
0
          if (DF.getContents()[i]) {
626
0
            if (auto *ELFSec = dyn_cast<const MCSectionELF>(Sec))
627
0
              report_fatal_error("non-zero initializer found in section '" +
628
0
                  ELFSec->getSectionName() + "'");
629
0
            else
630
0
              report_fatal_error("non-zero initializer found in virtual section");
631
0
          }
632
0
        break;
633
0
      }
634
377
      case MCFragment::FT_Align:
635
        // Check that we aren't trying to write a non-zero value into a virtual
636
        // section.
637
377
        assert((cast<MCAlignFragment>(F).getValueSize() == 0 ||
638
377
                cast<MCAlignFragment>(F).getValue() == 0) &&
639
377
               "Invalid align in virtual section!");
640
377
        break;
641
377
      case MCFragment::FT_Fill:
642
377
        assert((cast<MCFillFragment>(F).getValue() == 0) &&
643
377
               "Invalid fill in virtual section!");
644
377
        break;
645
754
      }
646
754
    }
647
648
181
    return;
649
181
  }
650
651
53.4k
  uint64_t Start = getWriter().getStream().tell();
652
53.4k
  (void)Start;
653
654
53.4k
  setError(0);
655
53.4k
  for (const MCFragment &F : *Sec)
656
452k
    writeFragment(*this, Layout, F);
657
658
  //assert(getWriter().getStream().tell() - Start ==
659
  //       Layout.getSectionAddressSize(Sec));
660
53.4k
}
661
662
std::pair<uint64_t, bool> MCAssembler::handleFixup(const MCAsmLayout &Layout,
663
                                                   MCFragment &F,
664
193k
                                                   const MCFixup &Fixup, unsigned int &KsError) {
665
  // Evaluate the fixup.
666
193k
  MCValue Target;
667
193k
  uint64_t FixedValue;
668
193k
  bool IsPCRel = Backend.getFixupKindInfo(Fixup.getKind()).Flags &
669
193k
                 MCFixupKindInfo::FKF_IsPCRel;
670
193k
  if (!evaluateFixup(Layout, Fixup, &F, Target, FixedValue, KsError)) {
671
176k
    if (KsError) {
672
        // return a dummy value
673
5.06k
        return std::make_pair(0, false);
674
5.06k
    }
675
    // The fixup was unresolved, we need a relocation. Inform the object
676
    // writer of the relocation, and give it an opportunity to adjust the
677
    // fixup value if need be.
678
171k
    if (const MCSymbolRefExpr *RefB = Target.getSymB()) {
679
78.2k
        if (RefB->getKind() != MCSymbolRefExpr::VK_None) {
680
32
            KsError = KS_ERR_ASM_FIXUP_INVALID;
681
            // return a dummy value
682
32
            return std::make_pair(0, false);
683
32
        }
684
78.2k
    }
685
171k
    getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, IsPCRel,
686
171k
                                 FixedValue);
687
171k
  }
688
689
188k
  return std::make_pair(FixedValue, IsPCRel);
690
193k
}
691
692
void MCAssembler::layout(MCAsmLayout &Layout, unsigned int &KsError)
693
57.0k
{
694
57.0k
  DEBUG_WITH_TYPE("mc-dump", {
695
57.0k
      llvm_ks::errs() << "assembler backend - pre-layout\n--\n";
696
57.0k
      dump(); });
697
698
  // Create dummy fragments and assign section ordinals.
699
57.0k
  unsigned SectionIndex = 0;
700
58.9k
  for (MCSection &Sec : *this) {
701
    // Create dummy fragments to eliminate any empty sections, this simplifies
702
    // layout.
703
58.9k
    if (Sec.getFragmentList().empty())
704
0
      new MCDataFragment(&Sec);
705
706
58.9k
    Sec.setOrdinal(SectionIndex++);
707
58.9k
  }
708
709
  // Assign layout order indices to sections and fragments.
710
115k
  for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) {
711
58.9k
    MCSection *Sec = Layout.getSectionOrder()[i];
712
58.9k
    Sec->setLayoutOrder(i);
713
714
58.9k
    unsigned FragmentIndex = 0;
715
58.9k
    for (MCFragment &Frag : *Sec)
716
599k
      Frag.setLayoutOrder(FragmentIndex++);
717
58.9k
  }
718
719
  // Layout until everything fits.
720
57.5k
  while (layoutOnce(Layout))
721
520
    continue;
722
723
57.0k
  DEBUG_WITH_TYPE("mc-dump", {
724
57.0k
      llvm_ks::errs() << "assembler backend - post-relaxation\n--\n";
725
57.0k
      dump(); });
726
727
  // Finalize the layout, including fragment lowering.
728
57.0k
  finishLayout(Layout);
729
730
57.0k
  DEBUG_WITH_TYPE("mc-dump", {
731
57.0k
      llvm_ks::errs() << "assembler backend - final-layout\n--\n";
732
57.0k
      dump(); });
733
734
  // Allow the object writer a chance to perform post-layout binding (for
735
  // example, to set the index fields in the symbol data).
736
57.0k
  getWriter().executePostLayoutBinding(*this, Layout);
737
738
  // Evaluate and apply the fixups, generating relocation entries as necessary.
739
58.9k
  for (MCSection &Sec : *this) {
740
461k
    for (MCFragment &Frag : Sec) {
741
461k
      MCEncodedFragment *F = dyn_cast<MCEncodedFragment>(&Frag);
742
      // Data and relaxable fragments both have fixups.  So only process
743
      // those here.
744
      // FIXME: Is there a better way to do this?  MCEncodedFragmentWithFixups
745
      // being templated makes this tricky.
746
461k
      if (!F || isa<MCCompactEncodedInstFragment>(F))
747
381k
        continue;
748
79.5k
      ArrayRef<MCFixup> Fixups;
749
79.5k
      MutableArrayRef<char> Contents;
750
79.5k
      if (auto *FragWithFixups = dyn_cast<MCDataFragment>(F)) {
751
68.5k
        Fixups = FragWithFixups->getFixups();
752
68.5k
        Contents = FragWithFixups->getContents();
753
68.5k
      } else if (auto *FragWithFixups = dyn_cast<MCRelaxableFragment>(F)) {
754
11.0k
        Fixups = FragWithFixups->getFixups();
755
11.0k
        Contents = FragWithFixups->getContents();
756
11.0k
      } else
757
0
        llvm_unreachable("Unknown fragment with fixups!");
758
193k
      for (const MCFixup &Fixup : Fixups) {
759
193k
        uint64_t FixedValue;
760
193k
        bool IsPCRel;
761
193k
        std::tie(FixedValue, IsPCRel) = handleFixup(Layout, *F, Fixup, KsError);
762
193k
        if (KsError)
763
5.09k
            return;
764
188k
        getBackend().applyFixup(Fixup, Contents.data(),
765
188k
                                Contents.size(), FixedValue, IsPCRel, KsError);
766
188k
        if (KsError)
767
68
            return;
768
188k
      }
769
79.5k
    }
770
58.9k
  }
771
57.0k
}
772
773
57.0k
void MCAssembler::Finish(unsigned int &KsError) {
774
  // Create the layout object.
775
57.0k
  MCAsmLayout Layout(*this);
776
57.0k
  layout(Layout, KsError);
777
778
  // Write the object file.
779
57.0k
  if (!KsError) {
780
51.8k
      getWriter().writeObject(*this, Layout);
781
51.8k
      KsError = getError();
782
51.8k
  }
783
57.0k
}
784
785
bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup,
786
                                       const MCRelaxableFragment *DF,
787
                                       const MCAsmLayout &Layout, unsigned &KsError) const
788
25.3k
{
789
25.3k
  MCValue Target;
790
25.3k
  uint64_t Value;
791
25.3k
  bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, Value, KsError);
792
25.3k
  if (KsError) {
793
6.34k
      KsError = KS_ERR_ASM_FIXUP_INVALID;
794
      // return a dummy value
795
6.34k
      return false;
796
6.34k
  }
797
18.9k
  return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF,
798
18.9k
                                                   Layout);
799
25.3k
}
800
801
bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F,
802
                                          const MCAsmLayout &Layout, unsigned &KsError) const
803
45.8k
{
804
  // If this inst doesn't ever need relaxation, ignore it. This occurs when we
805
  // are intentionally pushing out inst fragments, or because we relaxed a
806
  // previous instruction to one that doesn't need relaxation.
807
45.8k
  if (!getBackend().mayNeedRelaxation(F->getInst()))
808
17.5k
    return false;
809
810
28.2k
  for (const MCFixup &Fixup : F->getFixups())
811
25.3k
    if (fixupNeedsRelaxation(Fixup, F, Layout, KsError))
812
8.79k
      return true;
813
814
19.4k
  return false;
815
28.2k
}
816
817
bool MCAssembler::relaxInstruction(MCAsmLayout &Layout,
818
                                   MCRelaxableFragment &F)
819
45.8k
{
820
45.8k
  unsigned KsError = 0;
821
45.8k
  if (!fragmentNeedsRelaxation(&F, Layout, KsError))
822
37.0k
    return false;
823
824
  // FIXME-PERF: We could immediately lower out instructions if we can tell
825
  // they are fully resolved, to avoid retesting on later passes.
826
827
  // Relax the fragment.
828
829
8.79k
  MCInst Relaxed;
830
8.79k
  getBackend().relaxInstruction(F.getInst(), Relaxed);
831
832
  // Encode the new instruction.
833
  //
834
  // FIXME-PERF: If it matters, we could let the target do this. It can
835
  // probably do so more efficiently in many cases.
836
8.79k
  SmallVector<MCFixup, 4> Fixups;
837
8.79k
  SmallString<256> Code;
838
8.79k
  raw_svector_ostream VecOS(Code);
839
8.79k
  getEmitter().encodeInstruction(Relaxed, VecOS, Fixups, F.getSubtargetInfo(), KsError);
840
841
  // Update the fragment.
842
8.79k
  F.setInst(Relaxed);
843
8.79k
  F.getContents() = Code;
844
8.79k
  F.getFixups() = Fixups;
845
846
8.79k
  return true;
847
45.8k
}
848
849
0
bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) {
850
0
  uint64_t OldSize = LF.getContents().size();
851
0
  int64_t Value;
852
0
  bool Abs = LF.getValue().evaluateKnownAbsolute(Value, Layout);
853
0
  if (!Abs)
854
0
    report_fatal_error("sleb128 and uleb128 expressions must be absolute");
855
0
  SmallString<8> &Data = LF.getContents();
856
0
  Data.clear();
857
0
  raw_svector_ostream OSE(Data);
858
0
  if (LF.isSigned())
859
0
    encodeSLEB128(Value, OSE);
860
0
  else
861
0
    encodeULEB128(Value, OSE);
862
0
  return OldSize != LF.getContents().size();
863
0
}
864
865
bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout,
866
0
                                     MCDwarfLineAddrFragment &DF) {
867
0
  return false;
868
0
}
869
870
bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout,
871
0
                                              MCDwarfCallFrameFragment &DF) {
872
0
  return false;
873
0
}
874
875
bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec)
876
60.0k
{
877
  // Holds the first fragment which needed relaxing during this layout. It will
878
  // remain NULL if none were relaxed.
879
  // When a fragment is relaxed, all the fragments following it should get
880
  // invalidated because their offset is going to change.
881
60.0k
  MCFragment *FirstRelaxedFragment = nullptr;
882
883
  // Attempt to relax all the fragments in the section.
884
687k
  for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) {
885
    // Check if this is a fragment that needs relaxation.
886
627k
    bool RelaxedFrag = false;
887
627k
    switch(I->getKind()) {
888
581k
    default:
889
581k
      break;
890
581k
    case MCFragment::FT_Relaxable:
891
45.8k
      assert(!getRelaxAll() &&
892
45.8k
             "Did not expect a MCRelaxableFragment in RelaxAll mode");
893
45.8k
      RelaxedFrag = relaxInstruction(Layout, *cast<MCRelaxableFragment>(I));
894
45.8k
      break;
895
0
    case MCFragment::FT_Dwarf:
896
0
      RelaxedFrag = relaxDwarfLineAddr(Layout,
897
0
                                       *cast<MCDwarfLineAddrFragment>(I));
898
0
      break;
899
0
    case MCFragment::FT_DwarfFrame:
900
0
      RelaxedFrag =
901
0
        relaxDwarfCallFrameFragment(Layout,
902
0
                                    *cast<MCDwarfCallFrameFragment>(I));
903
0
      break;
904
0
    case MCFragment::FT_LEB:
905
0
      RelaxedFrag = relaxLEB(Layout, *cast<MCLEBFragment>(I));
906
0
      break;
907
627k
    }
908
627k
    if (RelaxedFrag && !FirstRelaxedFragment)
909
520
      FirstRelaxedFragment = &*I;
910
627k
  }
911
60.0k
  if (FirstRelaxedFragment) {
912
520
    Layout.invalidateFragmentsFrom(FirstRelaxedFragment);
913
520
    return true;
914
520
  }
915
59.5k
  return false;
916
60.0k
}
917
918
bool MCAssembler::layoutOnce(MCAsmLayout &Layout)
919
57.5k
{
920
57.5k
  bool WasRelaxed = false;
921
117k
  for (iterator it = begin(), ie = end(); it != ie; ++it) {
922
59.5k
    MCSection &Sec = *it;
923
60.0k
    while (layoutSectionOnce(Layout, Sec))
924
520
      WasRelaxed = true;
925
59.5k
  }
926
927
57.5k
  return WasRelaxed;
928
57.5k
}
929
930
57.0k
void MCAssembler::finishLayout(MCAsmLayout &Layout) {
931
  // The layout is done. Mark every fragment as valid.
932
115k
  for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
933
58.9k
    bool valid;
934
58.9k
    Layout.getFragmentOffset(&*Layout.getSectionOrder()[i]->rbegin(), valid);
935
58.9k
  }
936
57.0k
}