/src/keystone/llvm/lib/Target/X86/AsmParser/X86Operand.h
Line | Count | Source (jump to first uncovered line) |
1 | | //===-- X86Operand.h - Parsed X86 machine instruction --------------------===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | |
10 | | #ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H |
11 | | #define LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H |
12 | | |
13 | | #include "X86AsmParserCommon.h" |
14 | | #include "llvm/MC/MCExpr.h" |
15 | | #include "llvm/MC/MCInst.h" |
16 | | #include "llvm/MC/MCRegisterInfo.h" |
17 | | #include "llvm/MC/MCParser/MCParsedAsmOperand.h" |
18 | | #include "llvm/ADT/STLExtras.h" |
19 | | #include "MCTargetDesc/X86MCTargetDesc.h" |
20 | | |
21 | | namespace llvm_ks { |
22 | | |
23 | | /// X86Operand - Instances of this class represent a parsed X86 machine |
24 | | /// instruction. |
25 | | struct X86Operand : public MCParsedAsmOperand { |
26 | | enum KindTy { |
27 | | Token, |
28 | | Register, |
29 | | Immediate, |
30 | | Memory |
31 | | } Kind; |
32 | | |
33 | | SMLoc StartLoc, EndLoc; |
34 | | SMLoc OffsetOfLoc; |
35 | | StringRef SymName; |
36 | | void *OpDecl; |
37 | | bool AddressOf; |
38 | | |
39 | | struct TokOp { |
40 | | const char *Data; |
41 | | unsigned Length; |
42 | | }; |
43 | | |
44 | | struct RegOp { |
45 | | unsigned RegNo; |
46 | | }; |
47 | | |
48 | | struct ImmOp { |
49 | | const MCExpr *Val; |
50 | | }; |
51 | | |
52 | | struct MemOp { |
53 | | unsigned SegReg; |
54 | | const MCExpr *Disp; |
55 | | unsigned BaseReg; |
56 | | unsigned IndexReg; |
57 | | unsigned Scale; |
58 | | unsigned Size; |
59 | | unsigned ModeSize; |
60 | | }; |
61 | | |
62 | | union { |
63 | | struct TokOp Tok; |
64 | | struct RegOp Reg; |
65 | | struct ImmOp Imm; |
66 | | struct MemOp Mem; |
67 | | }; |
68 | | |
69 | | X86Operand(KindTy K, SMLoc Start, SMLoc End) |
70 | 208k | : Kind(K), StartLoc(Start), EndLoc(End) {} |
71 | | |
72 | 0 | StringRef getSymName() override { return SymName; } |
73 | 0 | void *getOpDecl() override { return OpDecl; } |
74 | | |
75 | | /// getStartLoc - Get the location of the first token of this operand. |
76 | 135 | SMLoc getStartLoc() const override { return StartLoc; } |
77 | | /// getEndLoc - Get the location of the last token of this operand. |
78 | 0 | SMLoc getEndLoc() const override { return EndLoc; } |
79 | | /// getLocRange - Get the range between the first and last token of this |
80 | | /// operand. |
81 | 0 | SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } |
82 | | /// getOffsetOfLoc - Get the location of the offset operator. |
83 | 0 | SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; } |
84 | | |
85 | 0 | void print(raw_ostream &OS) const override {} |
86 | | |
87 | 329k | StringRef getToken() const { |
88 | 329k | assert(Kind == Token && "Invalid access!"); |
89 | 329k | return StringRef(Tok.Data, Tok.Length); |
90 | 329k | } |
91 | 5.62k | void setTokenValue(StringRef Value) { |
92 | 5.62k | assert(Kind == Token && "Invalid access!"); |
93 | 5.62k | Tok.Data = Value.data(); |
94 | 5.62k | Tok.Length = Value.size(); |
95 | 5.62k | } |
96 | | |
97 | 179k | unsigned getReg() const override { |
98 | 179k | assert(Kind == Register && "Invalid access!"); |
99 | 179k | return Reg.RegNo; |
100 | 179k | } |
101 | | |
102 | 1.53k | const MCExpr *getImm() const { |
103 | 1.53k | assert(Kind == Immediate && "Invalid access!"); |
104 | 1.53k | return Imm.Val; |
105 | 1.53k | } |
106 | | |
107 | 198k | const MCExpr *getMemDisp() const { |
108 | 198k | assert(Kind == Memory && "Invalid access!"); |
109 | 198k | return Mem.Disp; |
110 | 198k | } |
111 | 194k | unsigned getMemSegReg() const { |
112 | 194k | assert(Kind == Memory && "Invalid access!"); |
113 | 194k | return Mem.SegReg; |
114 | 194k | } |
115 | 249k | unsigned getMemBaseReg() const { |
116 | 249k | assert(Kind == Memory && "Invalid access!"); |
117 | 249k | return Mem.BaseReg; |
118 | 249k | } |
119 | 199k | unsigned getMemIndexReg() const { |
120 | 199k | assert(Kind == Memory && "Invalid access!"); |
121 | 199k | return Mem.IndexReg; |
122 | 199k | } |
123 | 199k | unsigned getMemScale() const { |
124 | 199k | assert(Kind == Memory && "Invalid access!"); |
125 | 199k | return Mem.Scale; |
126 | 199k | } |
127 | 0 | unsigned getMemModeSize() const { |
128 | 0 | assert(Kind == Memory && "Invalid access!"); |
129 | 0 | return Mem.ModeSize; |
130 | 0 | } |
131 | | |
132 | 652k | bool isToken() const override {return Kind == Token; } |
133 | | |
134 | 12.5k | bool isImm() const override { return Kind == Immediate; } |
135 | | |
136 | 658 | bool isImmSExti16i8() const { |
137 | 658 | if (!isImm()) |
138 | 375 | return false; |
139 | | |
140 | | // If this isn't a constant expr, just assume it fits and let relaxation |
141 | | // handle it. |
142 | 283 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
143 | 283 | if (!CE) |
144 | 107 | return true; |
145 | | |
146 | | // Otherwise, check the value is in a range that makes sense for this |
147 | | // extension. |
148 | 176 | return isImmSExti16i8Value(CE->getValue()); |
149 | 283 | } |
150 | 621 | bool isImmSExti32i8() const { |
151 | 621 | if (!isImm()) |
152 | 455 | return false; |
153 | | |
154 | | // If this isn't a constant expr, just assume it fits and let relaxation |
155 | | // handle it. |
156 | 166 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
157 | 166 | if (!CE) |
158 | 53 | return true; |
159 | | |
160 | | // Otherwise, check the value is in a range that makes sense for this |
161 | | // extension. |
162 | 113 | return isImmSExti32i8Value(CE->getValue()); |
163 | 166 | } |
164 | 494 | bool isImmSExti64i8() const { |
165 | 494 | if (!isImm()) |
166 | 277 | return false; |
167 | | |
168 | | // If this isn't a constant expr, just assume it fits and let relaxation |
169 | | // handle it. |
170 | 217 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
171 | 217 | if (!CE) |
172 | 53 | return true; |
173 | | |
174 | | // Otherwise, check the value is in a range that makes sense for this |
175 | | // extension. |
176 | 164 | return isImmSExti64i8Value(CE->getValue()); |
177 | 217 | } |
178 | 434 | bool isImmSExti64i32() const { |
179 | 434 | if (!isImm()) |
180 | 298 | return false; |
181 | | |
182 | | // If this isn't a constant expr, just assume it fits and let relaxation |
183 | | // handle it. |
184 | 136 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
185 | 136 | if (!CE) |
186 | 24 | return true; |
187 | | |
188 | | // Otherwise, check the value is in a range that makes sense for this |
189 | | // extension. |
190 | 112 | return isImmSExti64i32Value(CE->getValue()); |
191 | 136 | } |
192 | | |
193 | 487 | bool isImmUnsignedi8() const { |
194 | 487 | if (!isImm()) return false; |
195 | 289 | const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); |
196 | 289 | if (!CE) return false; |
197 | 240 | return isImmUnsignedi8Value(CE->getValue()); |
198 | 289 | } |
199 | | |
200 | 0 | bool isOffsetOf() const override { |
201 | 0 | return OffsetOfLoc.getPointer(); |
202 | 0 | } |
203 | | |
204 | 0 | bool needAddressOf() const override { |
205 | 0 | return AddressOf; |
206 | 0 | } |
207 | | |
208 | 2.58k | bool isMem() const override { return Kind == Memory; } |
209 | 147k | bool isMemUnsized() const { |
210 | 147k | return Kind == Memory && Mem.Size == 0; |
211 | 147k | } |
212 | 28.4k | bool isMem8() const { |
213 | 28.4k | return Kind == Memory && (!Mem.Size || Mem.Size == 8); |
214 | 28.4k | } |
215 | 55.3k | bool isMem16() const { |
216 | 55.3k | return Kind == Memory && (!Mem.Size || Mem.Size == 16); |
217 | 55.3k | } |
218 | 55.7k | bool isMem32() const { |
219 | 55.7k | return Kind == Memory && (!Mem.Size || Mem.Size == 32); |
220 | 55.7k | } |
221 | 48.2k | bool isMem64() const { |
222 | 48.2k | return Kind == Memory && (!Mem.Size || Mem.Size == 64); |
223 | 48.2k | } |
224 | 16 | bool isMem80() const { |
225 | 16 | return Kind == Memory && (!Mem.Size || Mem.Size == 80); |
226 | 16 | } |
227 | 24 | bool isMem128() const { |
228 | 24 | return Kind == Memory && (!Mem.Size || Mem.Size == 128); |
229 | 24 | } |
230 | 22 | bool isMem256() const { |
231 | 22 | return Kind == Memory && (!Mem.Size || Mem.Size == 256); |
232 | 22 | } |
233 | 10 | bool isMem512() const { |
234 | 10 | return Kind == Memory && (!Mem.Size || Mem.Size == 512); |
235 | 10 | } |
236 | | |
237 | 0 | bool isMemVX32() const { |
238 | 0 | return Kind == Memory && (!Mem.Size || Mem.Size == 32) && |
239 | 0 | getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15; |
240 | 0 | } |
241 | 0 | bool isMemVX32X() const { |
242 | 0 | return Kind == Memory && (!Mem.Size || Mem.Size == 32) && |
243 | 0 | getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM31; |
244 | 0 | } |
245 | 0 | bool isMemVY32() const { |
246 | 0 | return Kind == Memory && (!Mem.Size || Mem.Size == 32) && |
247 | 0 | getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15; |
248 | 0 | } |
249 | 0 | bool isMemVY32X() const { |
250 | 0 | return Kind == Memory && (!Mem.Size || Mem.Size == 32) && |
251 | 0 | getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM31; |
252 | 0 | } |
253 | 0 | bool isMemVX64() const { |
254 | 0 | return Kind == Memory && (!Mem.Size || Mem.Size == 64) && |
255 | 0 | getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM15; |
256 | 0 | } |
257 | 0 | bool isMemVX64X() const { |
258 | 0 | return Kind == Memory && (!Mem.Size || Mem.Size == 64) && |
259 | 0 | getMemIndexReg() >= X86::XMM0 && getMemIndexReg() <= X86::XMM31; |
260 | 0 | } |
261 | 0 | bool isMemVY64() const { |
262 | 0 | return Kind == Memory && (!Mem.Size || Mem.Size == 64) && |
263 | 0 | getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM15; |
264 | 0 | } |
265 | 0 | bool isMemVY64X() const { |
266 | 0 | return Kind == Memory && (!Mem.Size || Mem.Size == 64) && |
267 | 0 | getMemIndexReg() >= X86::YMM0 && getMemIndexReg() <= X86::YMM31; |
268 | 0 | } |
269 | 0 | bool isMemVZ32() const { |
270 | 0 | return Kind == Memory && (!Mem.Size || Mem.Size == 32) && |
271 | 0 | getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31; |
272 | 0 | } |
273 | 0 | bool isMemVZ64() const { |
274 | 0 | return Kind == Memory && (!Mem.Size || Mem.Size == 64) && |
275 | 0 | getMemIndexReg() >= X86::ZMM0 && getMemIndexReg() <= X86::ZMM31; |
276 | 0 | } |
277 | | |
278 | 144k | bool isAbsMem() const { |
279 | 144k | return (Kind == Memory) && !getMemSegReg() && !getMemBaseReg() && |
280 | 144k | !getMemIndexReg() && (getMemScale() == 1) && (Mem.ModeSize == 0 || Mem.Size == 0); |
281 | 144k | } |
282 | 3 | bool isAVX512RC() const{ |
283 | 3 | return isImm(); |
284 | 3 | } |
285 | | |
286 | 0 | bool isAbsMem16() const { |
287 | 0 | return isAbsMem() && Mem.ModeSize == 16; |
288 | 0 | } |
289 | | |
290 | 12.5k | bool isSrcIdx() const { |
291 | 12.5k | return !getMemIndexReg() && getMemScale() == 1 && |
292 | 12.5k | (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI || |
293 | 12.5k | getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(getMemDisp()) && |
294 | 12.5k | cast<MCConstantExpr>(getMemDisp())->getValue() == 0; |
295 | 12.5k | } |
296 | 19.3k | bool isSrcIdx8() const { |
297 | 19.3k | return isMem8() && isSrcIdx(); |
298 | 19.3k | } |
299 | 1.22k | bool isSrcIdx16() const { |
300 | 1.22k | return isMem16() && isSrcIdx(); |
301 | 1.22k | } |
302 | 29.0k | bool isSrcIdx32() const { |
303 | 29.0k | return isMem32() && isSrcIdx(); |
304 | 29.0k | } |
305 | 20.6k | bool isSrcIdx64() const { |
306 | 20.6k | return isMem64() && isSrcIdx(); |
307 | 20.6k | } |
308 | | |
309 | 21.6k | bool isDstIdx() const { |
310 | 21.6k | return !getMemIndexReg() && getMemScale() == 1 && |
311 | 21.6k | (getMemSegReg() == 0 || getMemSegReg() == X86::ES) && |
312 | 21.6k | (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI || |
313 | 21.6k | getMemBaseReg() == X86::DI) && isa<MCConstantExpr>(getMemDisp()) && |
314 | 21.6k | cast<MCConstantExpr>(getMemDisp())->getValue() == 0; |
315 | 21.6k | } |
316 | 1.15k | bool isDstIdx8() const { |
317 | 1.15k | return isMem8() && isDstIdx(); |
318 | 1.15k | } |
319 | 3.87k | bool isDstIdx16() const { |
320 | 3.87k | return isMem16() && isDstIdx(); |
321 | 3.87k | } |
322 | 3.71k | bool isDstIdx32() const { |
323 | 3.71k | return isMem32() && isDstIdx(); |
324 | 3.71k | } |
325 | 20.9k | bool isDstIdx64() const { |
326 | 20.9k | return isMem64() && isDstIdx(); |
327 | 20.9k | } |
328 | | |
329 | 24.1k | bool isMemOffs() const { |
330 | 24.1k | return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() && |
331 | 24.1k | getMemScale() == 1; |
332 | 24.1k | } |
333 | | |
334 | 2.10k | bool isMemOffs16_8() const { |
335 | 2.10k | return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8); |
336 | 2.10k | } |
337 | 2.72k | bool isMemOffs16_16() const { |
338 | 2.72k | return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16); |
339 | 2.72k | } |
340 | 1.97k | bool isMemOffs16_32() const { |
341 | 1.97k | return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32); |
342 | 1.97k | } |
343 | 2.09k | bool isMemOffs32_8() const { |
344 | 2.09k | return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8); |
345 | 2.09k | } |
346 | 2.63k | bool isMemOffs32_16() const { |
347 | 2.63k | return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16); |
348 | 2.63k | } |
349 | 1.97k | bool isMemOffs32_32() const { |
350 | 1.97k | return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32); |
351 | 1.97k | } |
352 | 1.97k | bool isMemOffs32_64() const { |
353 | 1.97k | return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64); |
354 | 1.97k | } |
355 | 2.09k | bool isMemOffs64_8() const { |
356 | 2.09k | return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8); |
357 | 2.09k | } |
358 | 2.63k | bool isMemOffs64_16() const { |
359 | 2.63k | return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16); |
360 | 2.63k | } |
361 | 1.97k | bool isMemOffs64_32() const { |
362 | 1.97k | return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32); |
363 | 1.97k | } |
364 | 1.97k | bool isMemOffs64_64() const { |
365 | 1.97k | return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64); |
366 | 1.97k | } |
367 | | |
368 | 437k | bool isReg() const override { return Kind == Register; } |
369 | | |
370 | 0 | bool isGR32orGR64() const { |
371 | 0 | return Kind == Register && |
372 | 0 | (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) || |
373 | 0 | X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg())); |
374 | 0 | } |
375 | | |
376 | 19.6k | void addExpr(MCInst &Inst, const MCExpr *Expr) const { |
377 | | // Add as immediates when possible. |
378 | 19.6k | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) |
379 | 1.95k | Inst.addOperand(MCOperand::createImm(CE->getValue())); |
380 | 17.6k | else |
381 | 17.6k | Inst.addOperand(MCOperand::createExpr(Expr)); |
382 | 19.6k | } |
383 | | |
384 | 306 | void addRegOperands(MCInst &Inst, unsigned N) const { |
385 | 306 | assert(N == 1 && "Invalid number of operands!"); |
386 | 306 | Inst.addOperand(MCOperand::createReg(getReg())); |
387 | 306 | } |
388 | | |
389 | 0 | static unsigned getGR32FromGR64(unsigned RegNo) { |
390 | 0 | switch (RegNo) { |
391 | 0 | default: llvm_unreachable("Unexpected register"); |
392 | 0 | case X86::RAX: return X86::EAX; |
393 | 0 | case X86::RCX: return X86::ECX; |
394 | 0 | case X86::RDX: return X86::EDX; |
395 | 0 | case X86::RBX: return X86::EBX; |
396 | 0 | case X86::RBP: return X86::EBP; |
397 | 0 | case X86::RSP: return X86::ESP; |
398 | 0 | case X86::RSI: return X86::ESI; |
399 | 0 | case X86::RDI: return X86::EDI; |
400 | 0 | case X86::R8: return X86::R8D; |
401 | 0 | case X86::R9: return X86::R9D; |
402 | 0 | case X86::R10: return X86::R10D; |
403 | 0 | case X86::R11: return X86::R11D; |
404 | 0 | case X86::R12: return X86::R12D; |
405 | 0 | case X86::R13: return X86::R13D; |
406 | 0 | case X86::R14: return X86::R14D; |
407 | 0 | case X86::R15: return X86::R15D; |
408 | 0 | case X86::RIP: return X86::EIP; |
409 | 0 | } |
410 | 0 | } |
411 | | |
412 | 0 | void addGR32orGR64Operands(MCInst &Inst, unsigned N) const { |
413 | 0 | assert(N == 1 && "Invalid number of operands!"); |
414 | 0 | unsigned RegNo = getReg(); |
415 | 0 | if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo)) |
416 | 0 | RegNo = getGR32FromGR64(RegNo); |
417 | 0 | Inst.addOperand(MCOperand::createReg(RegNo)); |
418 | 0 | } |
419 | 0 | void addAVX512RCOperands(MCInst &Inst, unsigned N) const { |
420 | 0 | assert(N == 1 && "Invalid number of operands!"); |
421 | 0 | addExpr(Inst, getImm()); |
422 | 0 | } |
423 | 377 | void addImmOperands(MCInst &Inst, unsigned N) const { |
424 | 377 | assert(N == 1 && "Invalid number of operands!"); |
425 | 377 | addExpr(Inst, getImm()); |
426 | 377 | } |
427 | | |
428 | 19.2k | void addMemOperands(MCInst &Inst, unsigned N) const { |
429 | 19.2k | assert((N == 5) && "Invalid number of operands!"); |
430 | 19.2k | Inst.addOperand(MCOperand::createReg(getMemBaseReg())); |
431 | 19.2k | Inst.addOperand(MCOperand::createImm(getMemScale())); |
432 | 19.2k | Inst.addOperand(MCOperand::createReg(getMemIndexReg())); |
433 | 19.2k | addExpr(Inst, getMemDisp()); |
434 | 19.2k | Inst.addOperand(MCOperand::createReg(getMemSegReg())); |
435 | 19.2k | } |
436 | | |
437 | 96.7k | void addAbsMemOperands(MCInst &Inst, unsigned N) const { |
438 | 96.7k | assert((N == 1) && "Invalid number of operands!"); |
439 | | // Add as immediates when possible. |
440 | 96.7k | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp())) |
441 | 82.1k | Inst.addOperand(MCOperand::createImm(CE->getValue())); |
442 | 14.5k | else |
443 | 14.5k | Inst.addOperand(MCOperand::createExpr(getMemDisp())); |
444 | 96.7k | } |
445 | | |
446 | 8.96k | void addSrcIdxOperands(MCInst &Inst, unsigned N) const { |
447 | 8.96k | assert((N == 2) && "Invalid number of operands!"); |
448 | 8.96k | Inst.addOperand(MCOperand::createReg(getMemBaseReg())); |
449 | 8.96k | Inst.addOperand(MCOperand::createReg(getMemSegReg())); |
450 | 8.96k | } |
451 | 3.63k | void addDstIdxOperands(MCInst &Inst, unsigned N) const { |
452 | 3.63k | assert((N == 1) && "Invalid number of operands!"); |
453 | 3.63k | Inst.addOperand(MCOperand::createReg(getMemBaseReg())); |
454 | 3.63k | } |
455 | | |
456 | 103 | void addMemOffsOperands(MCInst &Inst, unsigned N) const { |
457 | 103 | assert((N == 2) && "Invalid number of operands!"); |
458 | | // Add as immediates when possible. |
459 | 103 | if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp())) |
460 | 1 | Inst.addOperand(MCOperand::createImm(CE->getValue())); |
461 | 102 | else |
462 | 102 | Inst.addOperand(MCOperand::createExpr(getMemDisp())); |
463 | 103 | Inst.addOperand(MCOperand::createReg(getMemSegReg())); |
464 | 103 | } |
465 | | |
466 | 132k | static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) { |
467 | 132k | SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size()); |
468 | 132k | auto Res = llvm_ks::make_unique<X86Operand>(Token, Loc, EndLoc); |
469 | 132k | Res->Tok.Data = Str.data(); |
470 | 132k | Res->Tok.Length = Str.size(); |
471 | 132k | return Res; |
472 | 132k | } |
473 | | |
474 | | static std::unique_ptr<X86Operand> |
475 | | CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc, |
476 | | bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(), |
477 | 7.03k | StringRef SymName = StringRef(), void *OpDecl = nullptr) { |
478 | 7.03k | auto Res = llvm_ks::make_unique<X86Operand>(Register, StartLoc, EndLoc); |
479 | 7.03k | Res->Reg.RegNo = RegNo; |
480 | 7.03k | Res->AddressOf = AddressOf; |
481 | 7.03k | Res->OffsetOfLoc = OffsetOfLoc; |
482 | 7.03k | Res->SymName = SymName; |
483 | 7.03k | Res->OpDecl = OpDecl; |
484 | 7.03k | return Res; |
485 | 7.03k | } |
486 | | |
487 | | static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val, |
488 | 17.5k | SMLoc StartLoc, SMLoc EndLoc) { |
489 | 17.5k | auto Res = llvm_ks::make_unique<X86Operand>(Immediate, StartLoc, EndLoc); |
490 | 17.5k | Res->Imm.Val = Val; |
491 | 17.5k | return Res; |
492 | 17.5k | } |
493 | | |
494 | | /// Create an absolute memory operand. |
495 | | static std::unique_ptr<X86Operand> |
496 | | CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, |
497 | | unsigned Size = 0, StringRef SymName = StringRef(), |
498 | 26.7k | void *OpDecl = nullptr) { |
499 | 26.7k | auto Res = llvm_ks::make_unique<X86Operand>(Memory, StartLoc, EndLoc); |
500 | 26.7k | Res->Mem.SegReg = 0; |
501 | 26.7k | Res->Mem.Disp = Disp; |
502 | 26.7k | Res->Mem.BaseReg = 0; |
503 | 26.7k | Res->Mem.IndexReg = 0; |
504 | 26.7k | Res->Mem.Scale = 1; |
505 | 26.7k | Res->Mem.Size = Size; |
506 | 26.7k | Res->Mem.ModeSize = ModeSize; |
507 | 26.7k | Res->SymName = SymName; |
508 | 26.7k | Res->OpDecl = OpDecl; |
509 | 26.7k | Res->AddressOf = false; |
510 | 26.7k | return Res; |
511 | 26.7k | } |
512 | | |
513 | | /// Create a generalized memory operand. |
514 | | static std::unique_ptr<X86Operand> |
515 | | CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp, |
516 | | unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc, |
517 | | SMLoc EndLoc, unsigned Size = 0, StringRef SymName = StringRef(), |
518 | 24.7k | void *OpDecl = nullptr) { |
519 | | // We should never just have a displacement, that should be parsed as an |
520 | | // absolute memory operand. |
521 | | // assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!"); |
522 | | |
523 | | // The scale should always be one of {1,2,4,8}. |
524 | 24.7k | assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) && |
525 | 24.7k | "Invalid scale!"); |
526 | 24.7k | auto Res = llvm_ks::make_unique<X86Operand>(Memory, StartLoc, EndLoc); |
527 | 24.7k | Res->Mem.SegReg = SegReg; |
528 | 24.7k | Res->Mem.Disp = Disp; |
529 | 24.7k | Res->Mem.BaseReg = BaseReg; |
530 | 24.7k | Res->Mem.IndexReg = IndexReg; |
531 | 24.7k | Res->Mem.Scale = Scale; |
532 | 24.7k | Res->Mem.Size = Size; |
533 | 24.7k | Res->Mem.ModeSize = ModeSize; |
534 | 24.7k | Res->SymName = SymName; |
535 | 24.7k | Res->OpDecl = OpDecl; |
536 | 24.7k | Res->AddressOf = false; |
537 | 24.7k | return Res; |
538 | 24.7k | } |
539 | | }; |
540 | | |
541 | | } // End of namespace llvm_ks |
542 | | |
543 | | #endif |