/src/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===- Hexagon.cpp --------------------------------------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | |
9 | | #include "ABIInfoImpl.h" |
10 | | #include "TargetInfo.h" |
11 | | |
12 | | using namespace clang; |
13 | | using namespace clang::CodeGen; |
14 | | |
15 | | //===----------------------------------------------------------------------===// |
16 | | // Hexagon ABI Implementation |
17 | | //===----------------------------------------------------------------------===// |
18 | | |
19 | | namespace { |
20 | | |
21 | | class HexagonABIInfo : public DefaultABIInfo { |
22 | | public: |
23 | 0 | HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} |
24 | | |
25 | | private: |
26 | | ABIArgInfo classifyReturnType(QualType RetTy) const; |
27 | | ABIArgInfo classifyArgumentType(QualType RetTy) const; |
28 | | ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const; |
29 | | |
30 | | void computeInfo(CGFunctionInfo &FI) const override; |
31 | | |
32 | | Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
33 | | QualType Ty) const override; |
34 | | Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr, |
35 | | QualType Ty) const; |
36 | | Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr, |
37 | | QualType Ty) const; |
38 | | Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr, |
39 | | QualType Ty) const; |
40 | | }; |
41 | | |
42 | | class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { |
43 | | public: |
44 | | HexagonTargetCodeGenInfo(CodeGenTypes &CGT) |
45 | 0 | : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {} |
46 | | |
47 | 0 | int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { |
48 | 0 | return 29; |
49 | 0 | } |
50 | | |
51 | | void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, |
52 | 0 | CodeGen::CodeGenModule &GCM) const override { |
53 | 0 | if (GV->isDeclaration()) |
54 | 0 | return; |
55 | 0 | const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); |
56 | 0 | if (!FD) |
57 | 0 | return; |
58 | 0 | } |
59 | | }; |
60 | | |
61 | | } // namespace |
62 | | |
63 | 0 | void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { |
64 | 0 | unsigned RegsLeft = 6; |
65 | 0 | if (!getCXXABI().classifyReturnType(FI)) |
66 | 0 | FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); |
67 | 0 | for (auto &I : FI.arguments()) |
68 | 0 | I.info = classifyArgumentType(I.type, &RegsLeft); |
69 | 0 | } |
70 | | |
71 | 0 | static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) { |
72 | 0 | assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits" |
73 | 0 | " through registers"); |
74 | | |
75 | 0 | if (*RegsLeft == 0) |
76 | 0 | return false; |
77 | | |
78 | 0 | if (Size <= 32) { |
79 | 0 | (*RegsLeft)--; |
80 | 0 | return true; |
81 | 0 | } |
82 | | |
83 | 0 | if (2 <= (*RegsLeft & (~1U))) { |
84 | 0 | *RegsLeft = (*RegsLeft & (~1U)) - 2; |
85 | 0 | return true; |
86 | 0 | } |
87 | | |
88 | | // Next available register was r5 but candidate was greater than 32-bits so it |
89 | | // has to go on the stack. However we still consume r5 |
90 | 0 | if (*RegsLeft == 1) |
91 | 0 | *RegsLeft = 0; |
92 | |
|
93 | 0 | return false; |
94 | 0 | } |
95 | | |
96 | | ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty, |
97 | 0 | unsigned *RegsLeft) const { |
98 | 0 | if (!isAggregateTypeForABI(Ty)) { |
99 | | // Treat an enum type as its underlying type. |
100 | 0 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
101 | 0 | Ty = EnumTy->getDecl()->getIntegerType(); |
102 | |
|
103 | 0 | uint64_t Size = getContext().getTypeSize(Ty); |
104 | 0 | if (Size <= 64) |
105 | 0 | HexagonAdjustRegsLeft(Size, RegsLeft); |
106 | |
|
107 | 0 | if (Size > 64 && Ty->isBitIntType()) |
108 | 0 | return getNaturalAlignIndirect(Ty, /*ByVal=*/true); |
109 | | |
110 | 0 | return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
111 | 0 | : ABIArgInfo::getDirect(); |
112 | 0 | } |
113 | | |
114 | 0 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) |
115 | 0 | return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); |
116 | | |
117 | | // Ignore empty records. |
118 | 0 | if (isEmptyRecord(getContext(), Ty, true)) |
119 | 0 | return ABIArgInfo::getIgnore(); |
120 | | |
121 | 0 | uint64_t Size = getContext().getTypeSize(Ty); |
122 | 0 | unsigned Align = getContext().getTypeAlign(Ty); |
123 | |
|
124 | 0 | if (Size > 64) |
125 | 0 | return getNaturalAlignIndirect(Ty, /*ByVal=*/true); |
126 | | |
127 | 0 | if (HexagonAdjustRegsLeft(Size, RegsLeft)) |
128 | 0 | Align = Size <= 32 ? 32 : 64; |
129 | 0 | if (Size <= Align) { |
130 | | // Pass in the smallest viable integer type. |
131 | 0 | Size = llvm::bit_ceil(Size); |
132 | 0 | return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size)); |
133 | 0 | } |
134 | 0 | return DefaultABIInfo::classifyArgumentType(Ty); |
135 | 0 | } |
136 | | |
137 | 0 | ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { |
138 | 0 | if (RetTy->isVoidType()) |
139 | 0 | return ABIArgInfo::getIgnore(); |
140 | | |
141 | 0 | const TargetInfo &T = CGT.getTarget(); |
142 | 0 | uint64_t Size = getContext().getTypeSize(RetTy); |
143 | |
|
144 | 0 | if (RetTy->getAs<VectorType>()) { |
145 | | // HVX vectors are returned in vector registers or register pairs. |
146 | 0 | if (T.hasFeature("hvx")) { |
147 | 0 | assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b")); |
148 | 0 | uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8; |
149 | 0 | if (Size == VecSize || Size == 2*VecSize) |
150 | 0 | return ABIArgInfo::getDirectInReg(); |
151 | 0 | } |
152 | | // Large vector types should be returned via memory. |
153 | 0 | if (Size > 64) |
154 | 0 | return getNaturalAlignIndirect(RetTy); |
155 | 0 | } |
156 | | |
157 | 0 | if (!isAggregateTypeForABI(RetTy)) { |
158 | | // Treat an enum type as its underlying type. |
159 | 0 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
160 | 0 | RetTy = EnumTy->getDecl()->getIntegerType(); |
161 | |
|
162 | 0 | if (Size > 64 && RetTy->isBitIntType()) |
163 | 0 | return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); |
164 | | |
165 | 0 | return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) |
166 | 0 | : ABIArgInfo::getDirect(); |
167 | 0 | } |
168 | | |
169 | 0 | if (isEmptyRecord(getContext(), RetTy, true)) |
170 | 0 | return ABIArgInfo::getIgnore(); |
171 | | |
172 | | // Aggregates <= 8 bytes are returned in registers, other aggregates |
173 | | // are returned indirectly. |
174 | 0 | if (Size <= 64) { |
175 | | // Return in the smallest viable integer type. |
176 | 0 | Size = llvm::bit_ceil(Size); |
177 | 0 | return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size)); |
178 | 0 | } |
179 | 0 | return getNaturalAlignIndirect(RetTy, /*ByVal=*/true); |
180 | 0 | } |
181 | | |
182 | | Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF, |
183 | | Address VAListAddr, |
184 | 0 | QualType Ty) const { |
185 | | // Load the overflow area pointer. |
186 | 0 | Address __overflow_area_pointer_p = |
187 | 0 | CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p"); |
188 | 0 | llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( |
189 | 0 | __overflow_area_pointer_p, "__overflow_area_pointer"); |
190 | |
|
191 | 0 | uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; |
192 | 0 | if (Align > 4) { |
193 | | // Alignment should be a power of 2. |
194 | 0 | assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!"); |
195 | | |
196 | | // overflow_arg_area = (overflow_arg_area + align - 1) & -align; |
197 | 0 | llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1); |
198 | | |
199 | | // Add offset to the current pointer to access the argument. |
200 | 0 | __overflow_area_pointer = |
201 | 0 | CGF.Builder.CreateGEP(CGF.Int8Ty, __overflow_area_pointer, Offset); |
202 | 0 | llvm::Value *AsInt = |
203 | 0 | CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty); |
204 | | |
205 | | // Create a mask which should be "AND"ed |
206 | | // with (overflow_arg_area + align - 1) |
207 | 0 | llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align); |
208 | 0 | __overflow_area_pointer = CGF.Builder.CreateIntToPtr( |
209 | 0 | CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(), |
210 | 0 | "__overflow_area_pointer.align"); |
211 | 0 | } |
212 | | |
213 | | // Get the type of the argument from memory and bitcast |
214 | | // overflow area pointer to the argument type. |
215 | 0 | llvm::Type *PTy = CGF.ConvertTypeForMem(Ty); |
216 | 0 | Address AddrTyped = |
217 | 0 | Address(__overflow_area_pointer, PTy, CharUnits::fromQuantity(Align)); |
218 | | |
219 | | // Round up to the minimum stack alignment for varargs which is 4 bytes. |
220 | 0 | uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); |
221 | |
|
222 | 0 | __overflow_area_pointer = CGF.Builder.CreateGEP( |
223 | 0 | CGF.Int8Ty, __overflow_area_pointer, |
224 | 0 | llvm::ConstantInt::get(CGF.Int32Ty, Offset), |
225 | 0 | "__overflow_area_pointer.next"); |
226 | 0 | CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p); |
227 | |
|
228 | 0 | return AddrTyped; |
229 | 0 | } |
230 | | |
231 | | Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF, |
232 | | Address VAListAddr, |
233 | 0 | QualType Ty) const { |
234 | | // FIXME: Need to handle alignment |
235 | 0 | llvm::Type *BP = CGF.Int8PtrTy; |
236 | 0 | CGBuilderTy &Builder = CGF.Builder; |
237 | 0 | Address VAListAddrAsBPP = VAListAddr.withElementType(BP); |
238 | 0 | llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); |
239 | | // Handle address alignment for type alignment > 32 bits |
240 | 0 | uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8; |
241 | 0 | if (TyAlign > 4) { |
242 | 0 | assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!"); |
243 | 0 | llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty); |
244 | 0 | AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1)); |
245 | 0 | AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1))); |
246 | 0 | Addr = Builder.CreateIntToPtr(AddrAsInt, BP); |
247 | 0 | } |
248 | 0 | Address AddrTyped = |
249 | 0 | Address(Addr, CGF.ConvertType(Ty), CharUnits::fromQuantity(TyAlign)); |
250 | |
|
251 | 0 | uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4); |
252 | 0 | llvm::Value *NextAddr = Builder.CreateGEP( |
253 | 0 | CGF.Int8Ty, Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); |
254 | 0 | Builder.CreateStore(NextAddr, VAListAddrAsBPP); |
255 | |
|
256 | 0 | return AddrTyped; |
257 | 0 | } |
258 | | |
259 | | Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF, |
260 | | Address VAListAddr, |
261 | 0 | QualType Ty) const { |
262 | 0 | int ArgSize = CGF.getContext().getTypeSize(Ty) / 8; |
263 | |
|
264 | 0 | if (ArgSize > 8) |
265 | 0 | return EmitVAArgFromMemory(CGF, VAListAddr, Ty); |
266 | | |
267 | | // Here we have check if the argument is in register area or |
268 | | // in overflow area. |
269 | | // If the saved register area pointer + argsize rounded up to alignment > |
270 | | // saved register area end pointer, argument is in overflow area. |
271 | 0 | unsigned RegsLeft = 6; |
272 | 0 | Ty = CGF.getContext().getCanonicalType(Ty); |
273 | 0 | (void)classifyArgumentType(Ty, &RegsLeft); |
274 | |
|
275 | 0 | llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); |
276 | 0 | llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); |
277 | 0 | llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); |
278 | 0 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); |
279 | | |
280 | | // Get rounded size of the argument.GCC does not allow vararg of |
281 | | // size < 4 bytes. We follow the same logic here. |
282 | 0 | ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8; |
283 | 0 | int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8; |
284 | | |
285 | | // Argument may be in saved register area |
286 | 0 | CGF.EmitBlock(MaybeRegBlock); |
287 | | |
288 | | // Load the current saved register area pointer. |
289 | 0 | Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP( |
290 | 0 | VAListAddr, 0, "__current_saved_reg_area_pointer_p"); |
291 | 0 | llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad( |
292 | 0 | __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer"); |
293 | | |
294 | | // Load the saved register area end pointer. |
295 | 0 | Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP( |
296 | 0 | VAListAddr, 1, "__saved_reg_area_end_pointer_p"); |
297 | 0 | llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad( |
298 | 0 | __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer"); |
299 | | |
300 | | // If the size of argument is > 4 bytes, check if the stack |
301 | | // location is aligned to 8 bytes |
302 | 0 | if (ArgAlign > 4) { |
303 | |
|
304 | 0 | llvm::Value *__current_saved_reg_area_pointer_int = |
305 | 0 | CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer, |
306 | 0 | CGF.Int32Ty); |
307 | |
|
308 | 0 | __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd( |
309 | 0 | __current_saved_reg_area_pointer_int, |
310 | 0 | llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)), |
311 | 0 | "align_current_saved_reg_area_pointer"); |
312 | |
|
313 | 0 | __current_saved_reg_area_pointer_int = |
314 | 0 | CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int, |
315 | 0 | llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign), |
316 | 0 | "align_current_saved_reg_area_pointer"); |
317 | |
|
318 | 0 | __current_saved_reg_area_pointer = |
319 | 0 | CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int, |
320 | 0 | __current_saved_reg_area_pointer->getType(), |
321 | 0 | "align_current_saved_reg_area_pointer"); |
322 | 0 | } |
323 | |
|
324 | 0 | llvm::Value *__new_saved_reg_area_pointer = |
325 | 0 | CGF.Builder.CreateGEP(CGF.Int8Ty, __current_saved_reg_area_pointer, |
326 | 0 | llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), |
327 | 0 | "__new_saved_reg_area_pointer"); |
328 | |
|
329 | 0 | llvm::Value *UsingStack = nullptr; |
330 | 0 | UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer, |
331 | 0 | __saved_reg_area_end_pointer); |
332 | |
|
333 | 0 | CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock); |
334 | | |
335 | | // Argument in saved register area |
336 | | // Implement the block where argument is in register saved area |
337 | 0 | CGF.EmitBlock(InRegBlock); |
338 | |
|
339 | 0 | llvm::Type *PTy = CGF.ConvertType(Ty); |
340 | 0 | llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast( |
341 | 0 | __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy)); |
342 | |
|
343 | 0 | CGF.Builder.CreateStore(__new_saved_reg_area_pointer, |
344 | 0 | __current_saved_reg_area_pointer_p); |
345 | |
|
346 | 0 | CGF.EmitBranch(ContBlock); |
347 | | |
348 | | // Argument in overflow area |
349 | | // Implement the block where the argument is in overflow area. |
350 | 0 | CGF.EmitBlock(OnStackBlock); |
351 | | |
352 | | // Load the overflow area pointer |
353 | 0 | Address __overflow_area_pointer_p = |
354 | 0 | CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p"); |
355 | 0 | llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad( |
356 | 0 | __overflow_area_pointer_p, "__overflow_area_pointer"); |
357 | | |
358 | | // Align the overflow area pointer according to the alignment of the argument |
359 | 0 | if (ArgAlign > 4) { |
360 | 0 | llvm::Value *__overflow_area_pointer_int = |
361 | 0 | CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty); |
362 | |
|
363 | 0 | __overflow_area_pointer_int = |
364 | 0 | CGF.Builder.CreateAdd(__overflow_area_pointer_int, |
365 | 0 | llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1), |
366 | 0 | "align_overflow_area_pointer"); |
367 | |
|
368 | 0 | __overflow_area_pointer_int = |
369 | 0 | CGF.Builder.CreateAnd(__overflow_area_pointer_int, |
370 | 0 | llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign), |
371 | 0 | "align_overflow_area_pointer"); |
372 | |
|
373 | 0 | __overflow_area_pointer = CGF.Builder.CreateIntToPtr( |
374 | 0 | __overflow_area_pointer_int, __overflow_area_pointer->getType(), |
375 | 0 | "align_overflow_area_pointer"); |
376 | 0 | } |
377 | | |
378 | | // Get the pointer for next argument in overflow area and store it |
379 | | // to overflow area pointer. |
380 | 0 | llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP( |
381 | 0 | CGF.Int8Ty, __overflow_area_pointer, |
382 | 0 | llvm::ConstantInt::get(CGF.Int32Ty, ArgSize), |
383 | 0 | "__overflow_area_pointer.next"); |
384 | |
|
385 | 0 | CGF.Builder.CreateStore(__new_overflow_area_pointer, |
386 | 0 | __overflow_area_pointer_p); |
387 | |
|
388 | 0 | CGF.Builder.CreateStore(__new_overflow_area_pointer, |
389 | 0 | __current_saved_reg_area_pointer_p); |
390 | | |
391 | | // Bitcast the overflow area pointer to the type of argument. |
392 | 0 | llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty); |
393 | 0 | llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast( |
394 | 0 | __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy)); |
395 | |
|
396 | 0 | CGF.EmitBranch(ContBlock); |
397 | | |
398 | | // Get the correct pointer to load the variable argument |
399 | | // Implement the ContBlock |
400 | 0 | CGF.EmitBlock(ContBlock); |
401 | |
|
402 | 0 | llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); |
403 | 0 | llvm::Type *MemPTy = llvm::PointerType::getUnqual(MemTy); |
404 | 0 | llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr"); |
405 | 0 | ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock); |
406 | 0 | ArgAddr->addIncoming(__overflow_area_p, OnStackBlock); |
407 | |
|
408 | 0 | return Address(ArgAddr, MemTy, CharUnits::fromQuantity(ArgAlign)); |
409 | 0 | } |
410 | | |
411 | | Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
412 | 0 | QualType Ty) const { |
413 | |
|
414 | 0 | if (getTarget().getTriple().isMusl()) |
415 | 0 | return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty); |
416 | | |
417 | 0 | return EmitVAArgForHexagon(CGF, VAListAddr, Ty); |
418 | 0 | } |
419 | | |
420 | | std::unique_ptr<TargetCodeGenInfo> |
421 | 0 | CodeGen::createHexagonTargetCodeGenInfo(CodeGenModule &CGM) { |
422 | 0 | return std::make_unique<HexagonTargetCodeGenInfo>(CGM.getTypes()); |
423 | 0 | } |