Coverage Report

Created: 2025-07-14 06:17

/src/keystone/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
Line
Count
Source (jump to first uncovered line)
1
//===-- RISCVAsmBackend.cpp - RISCV Assembler Backend ---------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
9
#include "RISCVAsmBackend.h"
10
#include "RISCVMCExpr.h"
11
#include "llvm/ADT/APInt.h"
12
#include "llvm/MC/MCAssembler.h"
13
#include "llvm/MC/MCContext.h"
14
#include "llvm/MC/MCDirectives.h"
15
#include "llvm/MC/MCELFObjectWriter.h"
16
#include "llvm/MC/MCExpr.h"
17
#include "llvm/MC/MCObjectWriter.h"
18
#include "llvm/MC/MCSymbol.h"
19
#include "llvm/MC/MCValue.h"
20
#include "llvm/Support/ErrorHandling.h"
21
#include "llvm/Support/raw_ostream.h"
22
23
using namespace llvm_ks;
24
25
// If linker relaxation is enabled, or the relax option had previously been
26
// enabled, always emit relocations even if the fixup can be resolved. This is
27
// necessary for correctness as offsets may change during relaxation.
28
bool RISCVAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
29
                                            const MCFixup &Fixup,
30
0
                                            const MCValue &Target) {
31
0
  bool ShouldForce = false;
32
33
0
  switch ((unsigned)Fixup.getKind()) {
34
0
  default:
35
0
    break;
36
0
  case RISCV::fixup_riscv_got_hi20:
37
0
  case RISCV::fixup_riscv_tls_got_hi20:
38
0
  case RISCV::fixup_riscv_tls_gd_hi20:
39
0
    return true;
40
0
  case RISCV::fixup_riscv_pcrel_lo12_i:
41
0
  case RISCV::fixup_riscv_pcrel_lo12_s:
42
    // For pcrel_lo12, force a relocation if the target of the corresponding
43
    // pcrel_hi20 is not in the same fragment.
44
0
    const MCFixup *T = cast<RISCVMCExpr>(Fixup.getValue())->getPCRelHiFixup();
45
0
    if (!T) {
46
0
      Asm.getContext().reportError(Fixup.getLoc(),
47
0
                                   "could not find corresponding %pcrel_hi");
48
0
      return false;
49
0
    }
50
51
0
    switch ((unsigned)T->getKind()) {
52
0
    default:
53
0
      llvm_unreachable("Unexpected fixup kind for pcrel_lo12");
54
0
      break;
55
0
    case RISCV::fixup_riscv_got_hi20:
56
0
    case RISCV::fixup_riscv_tls_got_hi20:
57
0
    case RISCV::fixup_riscv_tls_gd_hi20:
58
0
      ShouldForce = true;
59
0
      break;
60
0
    case RISCV::fixup_riscv_pcrel_hi20:
61
0
      ShouldForce = T->getValue()->findAssociatedFragment() !=
62
0
                    Fixup.getValue()->findAssociatedFragment();
63
0
      break;
64
0
    }
65
0
    break;
66
0
  }
67
68
0
  return ShouldForce || STI.getFeatureBits()[RISCV::FeatureRelax] ||
69
0
         ForceRelocs;
70
0
}
71
72
bool RISCVAsmBackend::fixupNeedsRelaxationAdvanced(const MCFixup &Fixup,
73
                                                   bool Resolved,
74
                                                   uint64_t Value,
75
                                                   const MCRelaxableFragment *DF,
76
54
                                                   const MCAsmLayout &Layout) const {
77
  // Return true if the symbol is actually unresolved.
78
  // Resolved could be always false when shouldForceRelocation return true.
79
  // ~We use !WasForced to indicate that the symbol is unresolved and not forced
80
  // by shouldForceRelocation.~ - removed for backport compatibility
81
54
  if (!Resolved)
82
0
    return true;
83
84
54
  int64_t Offset = int64_t(Value);
85
54
  switch ((unsigned)Fixup.getKind()) {
86
0
  default:
87
0
    return false;
88
0
  case RISCV::fixup_riscv_rvc_branch:
89
    // For compressed branch instructions the immediate must be
90
    // in the range [-256, 254].
91
0
    return Offset > 254 || Offset < -256;
92
54
  case RISCV::fixup_riscv_rvc_jump:
93
    // For compressed jump instructions the immediate must be
94
    // in the range [-2048, 2046].
95
54
    return Offset > 2046 || Offset < -2048;
96
54
  }
97
54
}
98
99
void RISCVAsmBackend::relaxInstruction(const MCInst &Inst,
100
0
                                       MCInst &Res) const {
101
  // TODO: replace this with call to auto generated uncompressinstr() function.
102
0
  switch (Inst.getOpcode()) {
103
0
  default:
104
0
    llvm_unreachable("Opcode not expected!");
105
0
  case RISCV::C_BEQZ:
106
    // c.beqz $rs1, $imm -> beq $rs1, X0, $imm.
107
0
    Res.setOpcode(RISCV::BEQ);
108
0
    Res.addOperand(Inst.getOperand(0));
109
0
    Res.addOperand(MCOperand::createReg(RISCV::X0));
110
0
    Res.addOperand(Inst.getOperand(1));
111
0
    break;
112
0
  case RISCV::C_BNEZ:
113
    // c.bnez $rs1, $imm -> bne $rs1, X0, $imm.
114
0
    Res.setOpcode(RISCV::BNE);
115
0
    Res.addOperand(Inst.getOperand(0));
116
0
    Res.addOperand(MCOperand::createReg(RISCV::X0));
117
0
    Res.addOperand(Inst.getOperand(1));
118
0
    break;
119
0
  case RISCV::C_J:
120
    // c.j $imm -> jal X0, $imm.
121
0
    Res.setOpcode(RISCV::JAL);
122
0
    Res.addOperand(MCOperand::createReg(RISCV::X0));
123
0
    Res.addOperand(Inst.getOperand(0));
124
0
    break;
125
0
  case RISCV::C_JAL:
126
    // c.jal $imm -> jal X1, $imm.
127
0
    Res.setOpcode(RISCV::JAL);
128
0
    Res.addOperand(MCOperand::createReg(RISCV::X1));
129
0
    Res.addOperand(Inst.getOperand(0));
130
0
    break;
131
0
  }
132
0
}
133
134
// Given a compressed control flow instruction this function returns
135
// the expanded instruction.
136
3.07k
unsigned RISCVAsmBackend::getRelaxedOpcode(unsigned Op) const {
137
3.07k
  switch (Op) {
138
1.17k
  default:
139
1.17k
    return Op;
140
0
  case RISCV::C_BEQZ:
141
0
    return RISCV::BEQ;
142
0
  case RISCV::C_BNEZ:
143
0
    return RISCV::BNE;
144
1.89k
  case RISCV::C_J:
145
1.89k
  case RISCV::C_JAL: // fall through.
146
1.89k
    return RISCV::JAL;
147
3.07k
  }
148
3.07k
}
149
150
3.07k
bool RISCVAsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
151
3.07k
  return getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode();
152
3.07k
}
153
154
4.52k
bool RISCVAsmBackend::writeNopData(uint64_t Count, MCObjectWriter * OW) const {
155
4.52k
  bool HasStdExtC = STI.getFeatureBits()[RISCV::FeatureStdExtC];
156
4.52k
  unsigned MinNopLen = HasStdExtC ? 2 : 4;
157
158
4.52k
  if ((Count % MinNopLen) != 0)
159
0
    return false;
160
161
  // The canonical nop on RISC-V is addi x0, x0, 0.
162
14.6M
  for (; Count >= 4; Count -= 4)
163
14.6M
    OW->write32(0x00000013);
164
165
  // The canonical nop on RVC is c.nop.
166
4.52k
  if (Count && HasStdExtC)
167
2
    OW->write16(0x0001);
168
169
4.52k
  return true;
170
4.52k
}
171
172
17.4k
static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, unsigned int KsError) {
173
                                  
174
17.4k
  unsigned Kind = Fixup.getKind();
175
17.4k
  switch (Kind) {
176
0
  default:
177
0
    llvm_unreachable("Unknown fixup kind!");
178
0
  case RISCV::fixup_riscv_got_hi20:
179
0
  case RISCV::fixup_riscv_tls_got_hi20:
180
0
  case RISCV::fixup_riscv_tls_gd_hi20:
181
0
    llvm_unreachable("Relocation should be unconditionally forced\n");
182
65
  case FK_Data_1:
183
72
  case FK_Data_2:
184
17.3k
  case FK_Data_4:
185
17.3k
  case FK_Data_8:
186
17.3k
    return Value;
187
0
  case RISCV::fixup_riscv_lo12_i:
188
0
  case RISCV::fixup_riscv_pcrel_lo12_i:
189
0
  case RISCV::fixup_riscv_tprel_lo12_i:
190
0
    return Value & 0xfff;
191
0
  case RISCV::fixup_riscv_lo12_s:
192
0
  case RISCV::fixup_riscv_pcrel_lo12_s:
193
0
  case RISCV::fixup_riscv_tprel_lo12_s:
194
0
    return (((Value >> 5) & 0x7f) << 25) | ((Value & 0x1f) << 7);
195
0
  case RISCV::fixup_riscv_hi20:
196
0
  case RISCV::fixup_riscv_pcrel_hi20:
197
0
  case RISCV::fixup_riscv_tprel_hi20:
198
    // Add 1 if bit 11 is 1, to compensate for low 12 bits being negative.
199
0
    return ((Value + 0x800) >> 12) & 0xfffff;
200
8
  case RISCV::fixup_riscv_jal: {
201
8
    if (!isInt<21>(Value))
202
      //Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
203
      // FIXME: report a more specific error to keystone
204
0
      KsError = KS_ERR_ASM_FIXUP_INVALID;
205
8
      return -1;
206
0
    if (Value & 0x1)
207
      //Ctx.reportError(Fixup.getLoc(), "fixup value must be 2-byte aligned");
208
      // FIXME: report a more specific error to keystone
209
0
      KsError = KS_ERR_ASM_FIXUP_INVALID;
210
0
      return -1;
211
    // Need to produce imm[19|10:1|11|19:12] from the 21-bit Value.
212
0
    unsigned Sbit = (Value >> 20) & 0x1;
213
0
    unsigned Hi8 = (Value >> 12) & 0xff;
214
0
    unsigned Mid1 = (Value >> 11) & 0x1;
215
0
    unsigned Lo10 = (Value >> 1) & 0x3ff;
216
    // Inst{31} = Sbit;
217
    // Inst{30-21} = Lo10;
218
    // Inst{20} = Mid1;
219
    // Inst{19-12} = Hi8;
220
0
    Value = (Sbit << 19) | (Lo10 << 9) | (Mid1 << 8) | Hi8;
221
0
    return Value;
222
0
  }
223
0
  case RISCV::fixup_riscv_branch: {
224
0
    if (!isInt<13>(Value))
225
      //Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
226
0
      KsError = KS_ERR_ASM_FIXUP_INVALID;
227
0
      return -1;
228
0
    if (Value & 0x1)
229
      //Ctx.reportError(Fixup.getLoc(), "fixup value must be 2-byte aligned");
230
0
      KsError = KS_ERR_ASM_FIXUP_INVALID;
231
0
      return -1;
232
    // Need to extract imm[12], imm[10:5], imm[4:1], imm[11] from the 13-bit
233
    // Value.
234
0
    unsigned Sbit = (Value >> 12) & 0x1;
235
0
    unsigned Hi1 = (Value >> 11) & 0x1;
236
0
    unsigned Mid6 = (Value >> 5) & 0x3f;
237
0
    unsigned Lo4 = (Value >> 1) & 0xf;
238
    // Inst{31} = Sbit;
239
    // Inst{30-25} = Mid6;
240
    // Inst{11-8} = Lo4;
241
    // Inst{7} = Hi1;
242
0
    Value = (Sbit << 31) | (Mid6 << 25) | (Lo4 << 8) | (Hi1 << 7);
243
0
    return Value;
244
0
  }
245
0
  case RISCV::fixup_riscv_call:
246
0
  case RISCV::fixup_riscv_call_plt: {
247
    // Jalr will add UpperImm with the sign-extended 12-bit LowerImm,
248
    // we need to add 0x800ULL before extract upper bits to reflect the
249
    // effect of the sign extension.
250
0
    uint64_t UpperImm = (Value + 0x800ULL) & 0xfffff000ULL;
251
0
    uint64_t LowerImm = Value & 0xfffULL;
252
0
    return UpperImm | ((LowerImm << 20) << 32);
253
0
  }
254
11
  case RISCV::fixup_riscv_rvc_jump: {
255
    // Need to produce offset[11|4|9:8|10|6|7|3:1|5] from the 11-bit Value.
256
11
    unsigned Bit11  = (Value >> 11) & 0x1;
257
11
    unsigned Bit4   = (Value >> 4) & 0x1;
258
11
    unsigned Bit9_8 = (Value >> 8) & 0x3;
259
11
    unsigned Bit10  = (Value >> 10) & 0x1;
260
11
    unsigned Bit6   = (Value >> 6) & 0x1;
261
11
    unsigned Bit7   = (Value >> 7) & 0x1;
262
11
    unsigned Bit3_1 = (Value >> 1) & 0x7;
263
11
    unsigned Bit5   = (Value >> 5) & 0x1;
264
11
    Value = (Bit11 << 10) | (Bit4 << 9) | (Bit9_8 << 7) | (Bit10 << 6) |
265
11
            (Bit6 << 5) | (Bit7 << 4) | (Bit3_1 << 1) | Bit5;
266
11
    return Value;
267
0
  }
268
0
  case RISCV::fixup_riscv_rvc_branch: {
269
    // Need to produce offset[8|4:3], [reg 3 bit], offset[7:6|2:1|5]
270
0
    unsigned Bit8   = (Value >> 8) & 0x1;
271
0
    unsigned Bit7_6 = (Value >> 6) & 0x3;
272
0
    unsigned Bit5   = (Value >> 5) & 0x1;
273
0
    unsigned Bit4_3 = (Value >> 3) & 0x3;
274
0
    unsigned Bit2_1 = (Value >> 1) & 0x3;
275
0
    Value = (Bit8 << 12) | (Bit4_3 << 10) | (Bit7_6 << 5) | (Bit2_1 << 3) |
276
0
            (Bit5 << 2);
277
0
    return Value;
278
0
  }
279
280
17.4k
  }
281
17.4k
}
282
283
void RISCVAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
284
21.1k
                          uint64_t Value, bool IsPCRel, unsigned int &KsError) const {
285
286
21.1k
  MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
287
21.1k
  if (!Value)
288
3.72k
    return; // Doesn't change encoding.
289
  // Apply any target-specific value adjustments.
290
17.4k
  Value = adjustFixupValue(Fixup, Value, KsError);
291
292
  // Shift the value into position.
293
17.4k
  Value <<= Info.TargetOffset;
294
295
17.4k
  unsigned Offset = Fixup.getOffset();
296
17.4k
  unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
297
298
17.4k
  assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
299
300
  // For each byte of the fragment that the fixup touches, mask in the
301
  // bits from the fixup value.
302
86.9k
  for (unsigned i = 0; i != NumBytes; ++i) {
303
69.5k
    Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
304
69.5k
  }
305
17.4k
}
306
307
// Linker relaxation may change code size. We have to insert Nops
308
// for .align directive when linker relaxation enabled. So then Linker
309
// could satisfy alignment by removing Nops.
310
// The function return the total Nops Size we need to insert.
311
bool RISCVAsmBackend::shouldInsertExtraNopBytesForCodeAlign(
312
0
    const MCAlignFragment &AF, unsigned &Size) {
313
  // Calculate Nops Size only when linker relaxation enabled.
314
0
  if (!STI.getFeatureBits()[RISCV::FeatureRelax])
315
0
    return false;
316
317
0
  bool HasStdExtC = STI.getFeatureBits()[RISCV::FeatureStdExtC];
318
0
  unsigned MinNopLen = HasStdExtC ? 2 : 4;
319
320
0
  if (AF.getAlignment() <= MinNopLen) {
321
0
    return false;
322
0
  } else {
323
0
    Size = AF.getAlignment() - MinNopLen;
324
0
    return true;
325
0
  }
326
0
}
327
328
// We need to insert R_RISCV_ALIGN relocation type to indicate the
329
// position of Nops and the total bytes of the Nops have been inserted
330
// when linker relaxation enabled.
331
// The function insert fixup_riscv_align fixup which eventually will
332
// transfer to R_RISCV_ALIGN relocation type.
333
bool RISCVAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm,
334
                                                    const MCAsmLayout &Layout,
335
0
                                                    MCAlignFragment &AF) {
336
  // Insert the fixup only when linker relaxation enabled.
337
0
  if (!STI.getFeatureBits()[RISCV::FeatureRelax])
338
0
    return false;
339
340
  // Calculate total Nops we need to insert. If there are none to insert
341
  // then simply return.
342
0
  unsigned Count;
343
0
  if (!shouldInsertExtraNopBytesForCodeAlign(AF, Count) || (Count == 0))
344
0
    return false;
345
346
0
  MCContext &Ctx = Asm.getContext();
347
0
  const MCExpr *Dummy = MCConstantExpr::create(0, Ctx);
348
  // Create fixup_riscv_align fixup.
349
0
  MCFixup Fixup =
350
0
      MCFixup::create(0, Dummy, MCFixupKind(RISCV::fixup_riscv_align), SMLoc());
351
352
0
  uint64_t FixedValue = 0;
353
0
  MCValue NopBytes = MCValue::get(Count);
354
0
  MCAsmBackend &Backend = Asm.getBackend();
355
0
  bool isPCRel = Backend.getFixupKindInfo(Fixup.getKind()).Flags &
356
0
                 MCFixupKindInfo::FKF_IsPCRel;
357
0
  Asm.getWriter().recordRelocation(Asm, Layout, &AF, Fixup, NopBytes, isPCRel,
358
0
                                   FixedValue);
359
360
0
  return true;
361
0
}
362
363
25.0k
MCObjectWriter* RISCVAsmBackend::createObjectWriter(raw_pwrite_stream &OS) const {
364
25.0k
  uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(STI.getTargetTriple().getOS());
365
25.0k
  return createRISCVELFObjectWriter(OS, OSABI, Is64Bit);
366
25.0k
}
367
368
MCAsmBackend *llvm_ks::createRISCVAsmBackend(const Target &T,
369
                                             const MCRegisterInfo &MRI,
370
25.0k
                                             const Triple &TT, StringRef CPU, const MCSubtargetInfo &STI, const MCTargetOptions &Options) {
371
25.0k
  return new RISCVAsmBackend(T, TT.getOS(), /*IsLittle*/ true, /*Is64Bit*/ TT.isArch64Bit(), STI, Options);
372
25.0k
}