/src/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // Implementation of the abstract lowering for the Swift calling convention. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "clang/CodeGen/SwiftCallingConv.h" |
14 | | #include "ABIInfo.h" |
15 | | #include "CodeGenModule.h" |
16 | | #include "TargetInfo.h" |
17 | | #include "clang/Basic/TargetInfo.h" |
18 | | #include <optional> |
19 | | |
20 | | using namespace clang; |
21 | | using namespace CodeGen; |
22 | | using namespace swiftcall; |
23 | | |
24 | 0 | static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) { |
25 | 0 | return CGM.getTargetCodeGenInfo().getSwiftABIInfo(); |
26 | 0 | } |
27 | | |
28 | 0 | static bool isPowerOf2(unsigned n) { |
29 | 0 | return n == (n & -n); |
30 | 0 | } |
31 | | |
32 | | /// Given two types with the same size, try to find a common type. |
33 | 0 | static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) { |
34 | 0 | assert(first != second); |
35 | | |
36 | | // Allow pointers to merge with integers, but prefer the integer type. |
37 | 0 | if (first->isIntegerTy()) { |
38 | 0 | if (second->isPointerTy()) return first; |
39 | 0 | } else if (first->isPointerTy()) { |
40 | 0 | if (second->isIntegerTy()) return second; |
41 | 0 | if (second->isPointerTy()) return first; |
42 | | |
43 | | // Allow two vectors to be merged (given that they have the same size). |
44 | | // This assumes that we never have two different vector register sets. |
45 | 0 | } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(first)) { |
46 | 0 | if (auto secondVecTy = dyn_cast<llvm::VectorType>(second)) { |
47 | 0 | if (auto commonTy = getCommonType(firstVecTy->getElementType(), |
48 | 0 | secondVecTy->getElementType())) { |
49 | 0 | return (commonTy == firstVecTy->getElementType() ? first : second); |
50 | 0 | } |
51 | 0 | } |
52 | 0 | } |
53 | | |
54 | 0 | return nullptr; |
55 | 0 | } |
56 | | |
57 | 0 | static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type) { |
58 | 0 | return CharUnits::fromQuantity(CGM.getDataLayout().getTypeStoreSize(type)); |
59 | 0 | } |
60 | | |
61 | 0 | static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type) { |
62 | 0 | return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(type)); |
63 | 0 | } |
64 | | |
65 | 0 | void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) { |
66 | | // Deal with various aggregate types as special cases: |
67 | | |
68 | | // Record types. |
69 | 0 | if (auto recType = type->getAs<RecordType>()) { |
70 | 0 | addTypedData(recType->getDecl(), begin); |
71 | | |
72 | | // Array types. |
73 | 0 | } else if (type->isArrayType()) { |
74 | | // Incomplete array types (flexible array members?) don't provide |
75 | | // data to lay out, and the other cases shouldn't be possible. |
76 | 0 | auto arrayType = CGM.getContext().getAsConstantArrayType(type); |
77 | 0 | if (!arrayType) return; |
78 | | |
79 | 0 | QualType eltType = arrayType->getElementType(); |
80 | 0 | auto eltSize = CGM.getContext().getTypeSizeInChars(eltType); |
81 | 0 | for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) { |
82 | 0 | addTypedData(eltType, begin + i * eltSize); |
83 | 0 | } |
84 | | |
85 | | // Complex types. |
86 | 0 | } else if (auto complexType = type->getAs<ComplexType>()) { |
87 | 0 | auto eltType = complexType->getElementType(); |
88 | 0 | auto eltSize = CGM.getContext().getTypeSizeInChars(eltType); |
89 | 0 | auto eltLLVMType = CGM.getTypes().ConvertType(eltType); |
90 | 0 | addTypedData(eltLLVMType, begin, begin + eltSize); |
91 | 0 | addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize); |
92 | | |
93 | | // Member pointer types. |
94 | 0 | } else if (type->getAs<MemberPointerType>()) { |
95 | | // Just add it all as opaque. |
96 | 0 | addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type)); |
97 | | |
98 | | // Atomic types. |
99 | 0 | } else if (const auto *atomicType = type->getAs<AtomicType>()) { |
100 | 0 | auto valueType = atomicType->getValueType(); |
101 | 0 | auto atomicSize = CGM.getContext().getTypeSizeInChars(atomicType); |
102 | 0 | auto valueSize = CGM.getContext().getTypeSizeInChars(valueType); |
103 | |
|
104 | 0 | addTypedData(atomicType->getValueType(), begin); |
105 | | |
106 | | // Add atomic padding. |
107 | 0 | auto atomicPadding = atomicSize - valueSize; |
108 | 0 | if (atomicPadding > CharUnits::Zero()) |
109 | 0 | addOpaqueData(begin + valueSize, begin + atomicSize); |
110 | | |
111 | | // Everything else is scalar and should not convert as an LLVM aggregate. |
112 | 0 | } else { |
113 | | // We intentionally convert as !ForMem because we want to preserve |
114 | | // that a type was an i1. |
115 | 0 | auto *llvmType = CGM.getTypes().ConvertType(type); |
116 | 0 | addTypedData(llvmType, begin); |
117 | 0 | } |
118 | 0 | } |
119 | | |
120 | 0 | void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin) { |
121 | 0 | addTypedData(record, begin, CGM.getContext().getASTRecordLayout(record)); |
122 | 0 | } |
123 | | |
124 | | void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin, |
125 | 0 | const ASTRecordLayout &layout) { |
126 | | // Unions are a special case. |
127 | 0 | if (record->isUnion()) { |
128 | 0 | for (auto *field : record->fields()) { |
129 | 0 | if (field->isBitField()) { |
130 | 0 | addBitFieldData(field, begin, 0); |
131 | 0 | } else { |
132 | 0 | addTypedData(field->getType(), begin); |
133 | 0 | } |
134 | 0 | } |
135 | 0 | return; |
136 | 0 | } |
137 | | |
138 | | // Note that correctness does not rely on us adding things in |
139 | | // their actual order of layout; it's just somewhat more efficient |
140 | | // for the builder. |
141 | | |
142 | | // With that in mind, add "early" C++ data. |
143 | 0 | auto cxxRecord = dyn_cast<CXXRecordDecl>(record); |
144 | 0 | if (cxxRecord) { |
145 | | // - a v-table pointer, if the class adds its own |
146 | 0 | if (layout.hasOwnVFPtr()) { |
147 | 0 | addTypedData(CGM.Int8PtrTy, begin); |
148 | 0 | } |
149 | | |
150 | | // - non-virtual bases |
151 | 0 | for (auto &baseSpecifier : cxxRecord->bases()) { |
152 | 0 | if (baseSpecifier.isVirtual()) continue; |
153 | | |
154 | 0 | auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl(); |
155 | 0 | addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord)); |
156 | 0 | } |
157 | | |
158 | | // - a vbptr if the class adds its own |
159 | 0 | if (layout.hasOwnVBPtr()) { |
160 | 0 | addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset()); |
161 | 0 | } |
162 | 0 | } |
163 | | |
164 | | // Add fields. |
165 | 0 | for (auto *field : record->fields()) { |
166 | 0 | auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex()); |
167 | 0 | if (field->isBitField()) { |
168 | 0 | addBitFieldData(field, begin, fieldOffsetInBits); |
169 | 0 | } else { |
170 | 0 | addTypedData(field->getType(), |
171 | 0 | begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits)); |
172 | 0 | } |
173 | 0 | } |
174 | | |
175 | | // Add "late" C++ data: |
176 | 0 | if (cxxRecord) { |
177 | | // - virtual bases |
178 | 0 | for (auto &vbaseSpecifier : cxxRecord->vbases()) { |
179 | 0 | auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl(); |
180 | 0 | addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord)); |
181 | 0 | } |
182 | 0 | } |
183 | 0 | } |
184 | | |
185 | | void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield, |
186 | | CharUnits recordBegin, |
187 | 0 | uint64_t bitfieldBitBegin) { |
188 | 0 | assert(bitfield->isBitField()); |
189 | 0 | auto &ctx = CGM.getContext(); |
190 | 0 | auto width = bitfield->getBitWidthValue(ctx); |
191 | | |
192 | | // We can ignore zero-width bit-fields. |
193 | 0 | if (width == 0) return; |
194 | | |
195 | | // toCharUnitsFromBits rounds down. |
196 | 0 | CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin); |
197 | | |
198 | | // Find the offset of the last byte that is partially occupied by the |
199 | | // bit-field; since we otherwise expect exclusive ends, the end is the |
200 | | // next byte. |
201 | 0 | uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1; |
202 | 0 | CharUnits bitfieldByteEnd = |
203 | 0 | ctx.toCharUnitsFromBits(bitfieldBitLast) + CharUnits::One(); |
204 | 0 | addOpaqueData(recordBegin + bitfieldByteBegin, |
205 | 0 | recordBegin + bitfieldByteEnd); |
206 | 0 | } |
207 | | |
208 | 0 | void SwiftAggLowering::addTypedData(llvm::Type *type, CharUnits begin) { |
209 | 0 | assert(type && "didn't provide type for typed data"); |
210 | 0 | addTypedData(type, begin, begin + getTypeStoreSize(CGM, type)); |
211 | 0 | } |
212 | | |
213 | | void SwiftAggLowering::addTypedData(llvm::Type *type, |
214 | 0 | CharUnits begin, CharUnits end) { |
215 | 0 | assert(type && "didn't provide type for typed data"); |
216 | 0 | assert(getTypeStoreSize(CGM, type) == end - begin); |
217 | | |
218 | | // Legalize vector types. |
219 | 0 | if (auto vecTy = dyn_cast<llvm::VectorType>(type)) { |
220 | 0 | SmallVector<llvm::Type*, 4> componentTys; |
221 | 0 | legalizeVectorType(CGM, end - begin, vecTy, componentTys); |
222 | 0 | assert(componentTys.size() >= 1); |
223 | | |
224 | | // Walk the initial components. |
225 | 0 | for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) { |
226 | 0 | llvm::Type *componentTy = componentTys[i]; |
227 | 0 | auto componentSize = getTypeStoreSize(CGM, componentTy); |
228 | 0 | assert(componentSize < end - begin); |
229 | 0 | addLegalTypedData(componentTy, begin, begin + componentSize); |
230 | 0 | begin += componentSize; |
231 | 0 | } |
232 | |
|
233 | 0 | return addLegalTypedData(componentTys.back(), begin, end); |
234 | 0 | } |
235 | | |
236 | | // Legalize integer types. |
237 | 0 | if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { |
238 | 0 | if (!isLegalIntegerType(CGM, intTy)) |
239 | 0 | return addOpaqueData(begin, end); |
240 | 0 | } |
241 | | |
242 | | // All other types should be legal. |
243 | 0 | return addLegalTypedData(type, begin, end); |
244 | 0 | } |
245 | | |
246 | | void SwiftAggLowering::addLegalTypedData(llvm::Type *type, |
247 | 0 | CharUnits begin, CharUnits end) { |
248 | | // Require the type to be naturally aligned. |
249 | 0 | if (!begin.isZero() && !begin.isMultipleOf(getNaturalAlignment(CGM, type))) { |
250 | | |
251 | | // Try splitting vector types. |
252 | 0 | if (auto vecTy = dyn_cast<llvm::VectorType>(type)) { |
253 | 0 | auto split = splitLegalVectorType(CGM, end - begin, vecTy); |
254 | 0 | auto eltTy = split.first; |
255 | 0 | auto numElts = split.second; |
256 | |
|
257 | 0 | auto eltSize = (end - begin) / numElts; |
258 | 0 | assert(eltSize == getTypeStoreSize(CGM, eltTy)); |
259 | 0 | for (size_t i = 0, e = numElts; i != e; ++i) { |
260 | 0 | addLegalTypedData(eltTy, begin, begin + eltSize); |
261 | 0 | begin += eltSize; |
262 | 0 | } |
263 | 0 | assert(begin == end); |
264 | 0 | return; |
265 | 0 | } |
266 | | |
267 | 0 | return addOpaqueData(begin, end); |
268 | 0 | } |
269 | | |
270 | 0 | addEntry(type, begin, end); |
271 | 0 | } |
272 | | |
273 | | void SwiftAggLowering::addEntry(llvm::Type *type, |
274 | 0 | CharUnits begin, CharUnits end) { |
275 | 0 | assert((!type || |
276 | 0 | (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) && |
277 | 0 | "cannot add aggregate-typed data"); |
278 | 0 | assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type))); |
279 | | |
280 | | // Fast path: we can just add entries to the end. |
281 | 0 | if (Entries.empty() || Entries.back().End <= begin) { |
282 | 0 | Entries.push_back({begin, end, type}); |
283 | 0 | return; |
284 | 0 | } |
285 | | |
286 | | // Find the first existing entry that ends after the start of the new data. |
287 | | // TODO: do a binary search if Entries is big enough for it to matter. |
288 | 0 | size_t index = Entries.size() - 1; |
289 | 0 | while (index != 0) { |
290 | 0 | if (Entries[index - 1].End <= begin) break; |
291 | 0 | --index; |
292 | 0 | } |
293 | | |
294 | | // The entry ends after the start of the new data. |
295 | | // If the entry starts after the end of the new data, there's no conflict. |
296 | 0 | if (Entries[index].Begin >= end) { |
297 | | // This insertion is potentially O(n), but the way we generally build |
298 | | // these layouts makes that unlikely to matter: we'd need a union of |
299 | | // several very large types. |
300 | 0 | Entries.insert(Entries.begin() + index, {begin, end, type}); |
301 | 0 | return; |
302 | 0 | } |
303 | | |
304 | | // Otherwise, the ranges overlap. The new range might also overlap |
305 | | // with later ranges. |
306 | 0 | restartAfterSplit: |
307 | | |
308 | | // Simplest case: an exact overlap. |
309 | 0 | if (Entries[index].Begin == begin && Entries[index].End == end) { |
310 | | // If the types match exactly, great. |
311 | 0 | if (Entries[index].Type == type) return; |
312 | | |
313 | | // If either type is opaque, make the entry opaque and return. |
314 | 0 | if (Entries[index].Type == nullptr) { |
315 | 0 | return; |
316 | 0 | } else if (type == nullptr) { |
317 | 0 | Entries[index].Type = nullptr; |
318 | 0 | return; |
319 | 0 | } |
320 | | |
321 | | // If they disagree in an ABI-agnostic way, just resolve the conflict |
322 | | // arbitrarily. |
323 | 0 | if (auto entryType = getCommonType(Entries[index].Type, type)) { |
324 | 0 | Entries[index].Type = entryType; |
325 | 0 | return; |
326 | 0 | } |
327 | | |
328 | | // Otherwise, make the entry opaque. |
329 | 0 | Entries[index].Type = nullptr; |
330 | 0 | return; |
331 | 0 | } |
332 | | |
333 | | // Okay, we have an overlapping conflict of some sort. |
334 | | |
335 | | // If we have a vector type, split it. |
336 | 0 | if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) { |
337 | 0 | auto eltTy = vecTy->getElementType(); |
338 | 0 | CharUnits eltSize = |
339 | 0 | (end - begin) / cast<llvm::FixedVectorType>(vecTy)->getNumElements(); |
340 | 0 | assert(eltSize == getTypeStoreSize(CGM, eltTy)); |
341 | 0 | for (unsigned i = 0, |
342 | 0 | e = cast<llvm::FixedVectorType>(vecTy)->getNumElements(); |
343 | 0 | i != e; ++i) { |
344 | 0 | addEntry(eltTy, begin, begin + eltSize); |
345 | 0 | begin += eltSize; |
346 | 0 | } |
347 | 0 | assert(begin == end); |
348 | 0 | return; |
349 | 0 | } |
350 | | |
351 | | // If the entry is a vector type, split it and try again. |
352 | 0 | if (Entries[index].Type && Entries[index].Type->isVectorTy()) { |
353 | 0 | splitVectorEntry(index); |
354 | 0 | goto restartAfterSplit; |
355 | 0 | } |
356 | | |
357 | | // Okay, we have no choice but to make the existing entry opaque. |
358 | | |
359 | 0 | Entries[index].Type = nullptr; |
360 | | |
361 | | // Stretch the start of the entry to the beginning of the range. |
362 | 0 | if (begin < Entries[index].Begin) { |
363 | 0 | Entries[index].Begin = begin; |
364 | 0 | assert(index == 0 || begin >= Entries[index - 1].End); |
365 | 0 | } |
366 | | |
367 | | // Stretch the end of the entry to the end of the range; but if we run |
368 | | // into the start of the next entry, just leave the range there and repeat. |
369 | 0 | while (end > Entries[index].End) { |
370 | 0 | assert(Entries[index].Type == nullptr); |
371 | | |
372 | | // If the range doesn't overlap the next entry, we're done. |
373 | 0 | if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) { |
374 | 0 | Entries[index].End = end; |
375 | 0 | break; |
376 | 0 | } |
377 | | |
378 | | // Otherwise, stretch to the start of the next entry. |
379 | 0 | Entries[index].End = Entries[index + 1].Begin; |
380 | | |
381 | | // Continue with the next entry. |
382 | 0 | index++; |
383 | | |
384 | | // This entry needs to be made opaque if it is not already. |
385 | 0 | if (Entries[index].Type == nullptr) |
386 | 0 | continue; |
387 | | |
388 | | // Split vector entries unless we completely subsume them. |
389 | 0 | if (Entries[index].Type->isVectorTy() && |
390 | 0 | end < Entries[index].End) { |
391 | 0 | splitVectorEntry(index); |
392 | 0 | } |
393 | | |
394 | | // Make the entry opaque. |
395 | 0 | Entries[index].Type = nullptr; |
396 | 0 | } |
397 | 0 | } |
398 | | |
399 | | /// Replace the entry of vector type at offset 'index' with a sequence |
400 | | /// of its component vectors. |
401 | 0 | void SwiftAggLowering::splitVectorEntry(unsigned index) { |
402 | 0 | auto vecTy = cast<llvm::VectorType>(Entries[index].Type); |
403 | 0 | auto split = splitLegalVectorType(CGM, Entries[index].getWidth(), vecTy); |
404 | |
|
405 | 0 | auto eltTy = split.first; |
406 | 0 | CharUnits eltSize = getTypeStoreSize(CGM, eltTy); |
407 | 0 | auto numElts = split.second; |
408 | 0 | Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry()); |
409 | |
|
410 | 0 | CharUnits begin = Entries[index].Begin; |
411 | 0 | for (unsigned i = 0; i != numElts; ++i) { |
412 | 0 | unsigned idx = index + i; |
413 | 0 | Entries[idx].Type = eltTy; |
414 | 0 | Entries[idx].Begin = begin; |
415 | 0 | Entries[idx].End = begin + eltSize; |
416 | 0 | begin += eltSize; |
417 | 0 | } |
418 | 0 | } |
419 | | |
420 | | /// Given a power-of-two unit size, return the offset of the aligned unit |
421 | | /// of that size which contains the given offset. |
422 | | /// |
423 | | /// In other words, round down to the nearest multiple of the unit size. |
424 | 0 | static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize) { |
425 | 0 | assert(isPowerOf2(unitSize.getQuantity())); |
426 | 0 | auto unitMask = ~(unitSize.getQuantity() - 1); |
427 | 0 | return CharUnits::fromQuantity(offset.getQuantity() & unitMask); |
428 | 0 | } |
429 | | |
430 | | static bool areBytesInSameUnit(CharUnits first, CharUnits second, |
431 | 0 | CharUnits chunkSize) { |
432 | 0 | return getOffsetAtStartOfUnit(first, chunkSize) |
433 | 0 | == getOffsetAtStartOfUnit(second, chunkSize); |
434 | 0 | } |
435 | | |
436 | 0 | static bool isMergeableEntryType(llvm::Type *type) { |
437 | | // Opaquely-typed memory is always mergeable. |
438 | 0 | if (type == nullptr) return true; |
439 | | |
440 | | // Pointers and integers are always mergeable. In theory we should not |
441 | | // merge pointers, but (1) it doesn't currently matter in practice because |
442 | | // the chunk size is never greater than the size of a pointer and (2) |
443 | | // Swift IRGen uses integer types for a lot of things that are "really" |
444 | | // just storing pointers (like std::optional<SomePointer>). If we ever have a |
445 | | // target that would otherwise combine pointers, we should put some effort |
446 | | // into fixing those cases in Swift IRGen and then call out pointer types |
447 | | // here. |
448 | | |
449 | | // Floating-point and vector types should never be merged. |
450 | | // Most such types are too large and highly-aligned to ever trigger merging |
451 | | // in practice, but it's important for the rule to cover at least 'half' |
452 | | // and 'float', as well as things like small vectors of 'i1' or 'i8'. |
453 | 0 | return (!type->isFloatingPointTy() && !type->isVectorTy()); |
454 | 0 | } |
455 | | |
456 | | bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first, |
457 | | const StorageEntry &second, |
458 | 0 | CharUnits chunkSize) { |
459 | | // Only merge entries that overlap the same chunk. We test this first |
460 | | // despite being a bit more expensive because this is the condition that |
461 | | // tends to prevent merging. |
462 | 0 | if (!areBytesInSameUnit(first.End - CharUnits::One(), second.Begin, |
463 | 0 | chunkSize)) |
464 | 0 | return false; |
465 | | |
466 | 0 | return (isMergeableEntryType(first.Type) && |
467 | 0 | isMergeableEntryType(second.Type)); |
468 | 0 | } |
469 | | |
470 | 0 | void SwiftAggLowering::finish() { |
471 | 0 | if (Entries.empty()) { |
472 | 0 | Finished = true; |
473 | 0 | return; |
474 | 0 | } |
475 | | |
476 | | // We logically split the layout down into a series of chunks of this size, |
477 | | // which is generally the size of a pointer. |
478 | 0 | const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM); |
479 | | |
480 | | // First pass: if two entries should be merged, make them both opaque |
481 | | // and stretch one to meet the next. |
482 | | // Also, remember if there are any opaque entries. |
483 | 0 | bool hasOpaqueEntries = (Entries[0].Type == nullptr); |
484 | 0 | for (size_t i = 1, e = Entries.size(); i != e; ++i) { |
485 | 0 | if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) { |
486 | 0 | Entries[i - 1].Type = nullptr; |
487 | 0 | Entries[i].Type = nullptr; |
488 | 0 | Entries[i - 1].End = Entries[i].Begin; |
489 | 0 | hasOpaqueEntries = true; |
490 | |
|
491 | 0 | } else if (Entries[i].Type == nullptr) { |
492 | 0 | hasOpaqueEntries = true; |
493 | 0 | } |
494 | 0 | } |
495 | | |
496 | | // The rest of the algorithm leaves non-opaque entries alone, so if we |
497 | | // have no opaque entries, we're done. |
498 | 0 | if (!hasOpaqueEntries) { |
499 | 0 | Finished = true; |
500 | 0 | return; |
501 | 0 | } |
502 | | |
503 | | // Okay, move the entries to a temporary and rebuild Entries. |
504 | 0 | auto orig = std::move(Entries); |
505 | 0 | assert(Entries.empty()); |
506 | | |
507 | 0 | for (size_t i = 0, e = orig.size(); i != e; ++i) { |
508 | | // Just copy over non-opaque entries. |
509 | 0 | if (orig[i].Type != nullptr) { |
510 | 0 | Entries.push_back(orig[i]); |
511 | 0 | continue; |
512 | 0 | } |
513 | | |
514 | | // Scan forward to determine the full extent of the next opaque range. |
515 | | // We know from the first pass that only contiguous ranges will overlap |
516 | | // the same aligned chunk. |
517 | 0 | auto begin = orig[i].Begin; |
518 | 0 | auto end = orig[i].End; |
519 | 0 | while (i + 1 != e && |
520 | 0 | orig[i + 1].Type == nullptr && |
521 | 0 | end == orig[i + 1].Begin) { |
522 | 0 | end = orig[i + 1].End; |
523 | 0 | i++; |
524 | 0 | } |
525 | | |
526 | | // Add an entry per intersected chunk. |
527 | 0 | do { |
528 | | // Find the smallest aligned storage unit in the maximal aligned |
529 | | // storage unit containing 'begin' that contains all the bytes in |
530 | | // the intersection between the range and this chunk. |
531 | 0 | CharUnits localBegin = begin; |
532 | 0 | CharUnits chunkBegin = getOffsetAtStartOfUnit(localBegin, chunkSize); |
533 | 0 | CharUnits chunkEnd = chunkBegin + chunkSize; |
534 | 0 | CharUnits localEnd = std::min(end, chunkEnd); |
535 | | |
536 | | // Just do a simple loop over ever-increasing unit sizes. |
537 | 0 | CharUnits unitSize = CharUnits::One(); |
538 | 0 | CharUnits unitBegin, unitEnd; |
539 | 0 | for (; ; unitSize *= 2) { |
540 | 0 | assert(unitSize <= chunkSize); |
541 | 0 | unitBegin = getOffsetAtStartOfUnit(localBegin, unitSize); |
542 | 0 | unitEnd = unitBegin + unitSize; |
543 | 0 | if (unitEnd >= localEnd) break; |
544 | 0 | } |
545 | | |
546 | | // Add an entry for this unit. |
547 | 0 | auto entryTy = |
548 | 0 | llvm::IntegerType::get(CGM.getLLVMContext(), |
549 | 0 | CGM.getContext().toBits(unitSize)); |
550 | 0 | Entries.push_back({unitBegin, unitEnd, entryTy}); |
551 | | |
552 | | // The next chunk starts where this chunk left off. |
553 | 0 | begin = localEnd; |
554 | 0 | } while (begin != end); |
555 | 0 | } |
556 | | |
557 | | // Okay, finally finished. |
558 | 0 | Finished = true; |
559 | 0 | } |
560 | | |
561 | 0 | void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const { |
562 | 0 | assert(Finished && "haven't yet finished lowering"); |
563 | | |
564 | 0 | for (auto &entry : Entries) { |
565 | 0 | callback(entry.Begin, entry.End, entry.Type); |
566 | 0 | } |
567 | 0 | } |
568 | | |
569 | | std::pair<llvm::StructType*, llvm::Type*> |
570 | 0 | SwiftAggLowering::getCoerceAndExpandTypes() const { |
571 | 0 | assert(Finished && "haven't yet finished lowering"); |
572 | | |
573 | 0 | auto &ctx = CGM.getLLVMContext(); |
574 | |
|
575 | 0 | if (Entries.empty()) { |
576 | 0 | auto type = llvm::StructType::get(ctx); |
577 | 0 | return { type, type }; |
578 | 0 | } |
579 | | |
580 | 0 | SmallVector<llvm::Type*, 8> elts; |
581 | 0 | CharUnits lastEnd = CharUnits::Zero(); |
582 | 0 | bool hasPadding = false; |
583 | 0 | bool packed = false; |
584 | 0 | for (auto &entry : Entries) { |
585 | 0 | if (entry.Begin != lastEnd) { |
586 | 0 | auto paddingSize = entry.Begin - lastEnd; |
587 | 0 | assert(!paddingSize.isNegative()); |
588 | | |
589 | 0 | auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx), |
590 | 0 | paddingSize.getQuantity()); |
591 | 0 | elts.push_back(padding); |
592 | 0 | hasPadding = true; |
593 | 0 | } |
594 | | |
595 | 0 | if (!packed && !entry.Begin.isMultipleOf(CharUnits::fromQuantity( |
596 | 0 | CGM.getDataLayout().getABITypeAlign(entry.Type)))) |
597 | 0 | packed = true; |
598 | |
|
599 | 0 | elts.push_back(entry.Type); |
600 | |
|
601 | 0 | lastEnd = entry.Begin + getTypeAllocSize(CGM, entry.Type); |
602 | 0 | assert(entry.End <= lastEnd); |
603 | 0 | } |
604 | | |
605 | | // We don't need to adjust 'packed' to deal with possible tail padding |
606 | | // because we never do that kind of access through the coercion type. |
607 | 0 | auto coercionType = llvm::StructType::get(ctx, elts, packed); |
608 | |
|
609 | 0 | llvm::Type *unpaddedType = coercionType; |
610 | 0 | if (hasPadding) { |
611 | 0 | elts.clear(); |
612 | 0 | for (auto &entry : Entries) { |
613 | 0 | elts.push_back(entry.Type); |
614 | 0 | } |
615 | 0 | if (elts.size() == 1) { |
616 | 0 | unpaddedType = elts[0]; |
617 | 0 | } else { |
618 | 0 | unpaddedType = llvm::StructType::get(ctx, elts, /*packed*/ false); |
619 | 0 | } |
620 | 0 | } else if (Entries.size() == 1) { |
621 | 0 | unpaddedType = Entries[0].Type; |
622 | 0 | } |
623 | |
|
624 | 0 | return { coercionType, unpaddedType }; |
625 | 0 | } |
626 | | |
627 | 0 | bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const { |
628 | 0 | assert(Finished && "haven't yet finished lowering"); |
629 | | |
630 | | // Empty types don't need to be passed indirectly. |
631 | 0 | if (Entries.empty()) return false; |
632 | | |
633 | | // Avoid copying the array of types when there's just a single element. |
634 | 0 | if (Entries.size() == 1) { |
635 | 0 | return getSwiftABIInfo(CGM).shouldPassIndirectly(Entries.back().Type, |
636 | 0 | asReturnValue); |
637 | 0 | } |
638 | | |
639 | 0 | SmallVector<llvm::Type*, 8> componentTys; |
640 | 0 | componentTys.reserve(Entries.size()); |
641 | 0 | for (auto &entry : Entries) { |
642 | 0 | componentTys.push_back(entry.Type); |
643 | 0 | } |
644 | 0 | return getSwiftABIInfo(CGM).shouldPassIndirectly(componentTys, asReturnValue); |
645 | 0 | } |
646 | | |
647 | | bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM, |
648 | | ArrayRef<llvm::Type*> componentTys, |
649 | 0 | bool asReturnValue) { |
650 | 0 | return getSwiftABIInfo(CGM).shouldPassIndirectly(componentTys, asReturnValue); |
651 | 0 | } |
652 | | |
653 | 0 | CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) { |
654 | | // Currently always the size of an ordinary pointer. |
655 | 0 | return CGM.getContext().toCharUnitsFromBits( |
656 | 0 | CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default)); |
657 | 0 | } |
658 | | |
659 | 0 | CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) { |
660 | | // For Swift's purposes, this is always just the store size of the type |
661 | | // rounded up to a power of 2. |
662 | 0 | auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity(); |
663 | 0 | size = llvm::bit_ceil(size); |
664 | 0 | assert(CGM.getDataLayout().getABITypeAlign(type) <= size); |
665 | 0 | return CharUnits::fromQuantity(size); |
666 | 0 | } |
667 | | |
668 | | bool swiftcall::isLegalIntegerType(CodeGenModule &CGM, |
669 | 0 | llvm::IntegerType *intTy) { |
670 | 0 | auto size = intTy->getBitWidth(); |
671 | 0 | switch (size) { |
672 | 0 | case 1: |
673 | 0 | case 8: |
674 | 0 | case 16: |
675 | 0 | case 32: |
676 | 0 | case 64: |
677 | | // Just assume that the above are always legal. |
678 | 0 | return true; |
679 | | |
680 | 0 | case 128: |
681 | 0 | return CGM.getContext().getTargetInfo().hasInt128Type(); |
682 | | |
683 | 0 | default: |
684 | 0 | return false; |
685 | 0 | } |
686 | 0 | } |
687 | | |
688 | | bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, |
689 | 0 | llvm::VectorType *vectorTy) { |
690 | 0 | return isLegalVectorType( |
691 | 0 | CGM, vectorSize, vectorTy->getElementType(), |
692 | 0 | cast<llvm::FixedVectorType>(vectorTy)->getNumElements()); |
693 | 0 | } |
694 | | |
695 | | bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, |
696 | 0 | llvm::Type *eltTy, unsigned numElts) { |
697 | 0 | assert(numElts > 1 && "illegal vector length"); |
698 | 0 | return getSwiftABIInfo(CGM).isLegalVectorType(vectorSize, eltTy, numElts); |
699 | 0 | } |
700 | | |
701 | | std::pair<llvm::Type*, unsigned> |
702 | | swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, |
703 | 0 | llvm::VectorType *vectorTy) { |
704 | 0 | auto numElts = cast<llvm::FixedVectorType>(vectorTy)->getNumElements(); |
705 | 0 | auto eltTy = vectorTy->getElementType(); |
706 | | |
707 | | // Try to split the vector type in half. |
708 | 0 | if (numElts >= 4 && isPowerOf2(numElts)) { |
709 | 0 | if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2)) |
710 | 0 | return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2}; |
711 | 0 | } |
712 | | |
713 | 0 | return {eltTy, numElts}; |
714 | 0 | } |
715 | | |
716 | | void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize, |
717 | | llvm::VectorType *origVectorTy, |
718 | 0 | llvm::SmallVectorImpl<llvm::Type*> &components) { |
719 | | // If it's already a legal vector type, use it. |
720 | 0 | if (isLegalVectorType(CGM, origVectorSize, origVectorTy)) { |
721 | 0 | components.push_back(origVectorTy); |
722 | 0 | return; |
723 | 0 | } |
724 | | |
725 | | // Try to split the vector into legal subvectors. |
726 | 0 | auto numElts = cast<llvm::FixedVectorType>(origVectorTy)->getNumElements(); |
727 | 0 | auto eltTy = origVectorTy->getElementType(); |
728 | 0 | assert(numElts != 1); |
729 | | |
730 | | // The largest size that we're still considering making subvectors of. |
731 | | // Always a power of 2. |
732 | 0 | unsigned logCandidateNumElts = llvm::Log2_32(numElts); |
733 | 0 | unsigned candidateNumElts = 1U << logCandidateNumElts; |
734 | 0 | assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts); |
735 | | |
736 | | // Minor optimization: don't check the legality of this exact size twice. |
737 | 0 | if (candidateNumElts == numElts) { |
738 | 0 | logCandidateNumElts--; |
739 | 0 | candidateNumElts >>= 1; |
740 | 0 | } |
741 | |
|
742 | 0 | CharUnits eltSize = (origVectorSize / numElts); |
743 | 0 | CharUnits candidateSize = eltSize * candidateNumElts; |
744 | | |
745 | | // The sensibility of this algorithm relies on the fact that we never |
746 | | // have a legal non-power-of-2 vector size without having the power of 2 |
747 | | // also be legal. |
748 | 0 | while (logCandidateNumElts > 0) { |
749 | 0 | assert(candidateNumElts == 1U << logCandidateNumElts); |
750 | 0 | assert(candidateNumElts <= numElts); |
751 | 0 | assert(candidateSize == eltSize * candidateNumElts); |
752 | | |
753 | | // Skip illegal vector sizes. |
754 | 0 | if (!isLegalVectorType(CGM, candidateSize, eltTy, candidateNumElts)) { |
755 | 0 | logCandidateNumElts--; |
756 | 0 | candidateNumElts /= 2; |
757 | 0 | candidateSize /= 2; |
758 | 0 | continue; |
759 | 0 | } |
760 | | |
761 | | // Add the right number of vectors of this size. |
762 | 0 | auto numVecs = numElts >> logCandidateNumElts; |
763 | 0 | components.append(numVecs, |
764 | 0 | llvm::FixedVectorType::get(eltTy, candidateNumElts)); |
765 | 0 | numElts -= (numVecs << logCandidateNumElts); |
766 | |
|
767 | 0 | if (numElts == 0) return; |
768 | | |
769 | | // It's possible that the number of elements remaining will be legal. |
770 | | // This can happen with e.g. <7 x float> when <3 x float> is legal. |
771 | | // This only needs to be separately checked if it's not a power of 2. |
772 | 0 | if (numElts > 2 && !isPowerOf2(numElts) && |
773 | 0 | isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) { |
774 | 0 | components.push_back(llvm::FixedVectorType::get(eltTy, numElts)); |
775 | 0 | return; |
776 | 0 | } |
777 | | |
778 | | // Bring vecSize down to something no larger than numElts. |
779 | 0 | do { |
780 | 0 | logCandidateNumElts--; |
781 | 0 | candidateNumElts /= 2; |
782 | 0 | candidateSize /= 2; |
783 | 0 | } while (candidateNumElts > numElts); |
784 | 0 | } |
785 | | |
786 | | // Otherwise, just append a bunch of individual elements. |
787 | 0 | components.append(numElts, eltTy); |
788 | 0 | } |
789 | | |
790 | | bool swiftcall::mustPassRecordIndirectly(CodeGenModule &CGM, |
791 | 0 | const RecordDecl *record) { |
792 | | // FIXME: should we not rely on the standard computation in Sema, just in |
793 | | // case we want to diverge from the platform ABI (e.g. on targets where |
794 | | // that uses the MSVC rule)? |
795 | 0 | return !record->canPassInRegisters(); |
796 | 0 | } |
797 | | |
798 | | static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering, |
799 | | bool forReturn, |
800 | 0 | CharUnits alignmentForIndirect) { |
801 | 0 | if (lowering.empty()) { |
802 | 0 | return ABIArgInfo::getIgnore(); |
803 | 0 | } else if (lowering.shouldPassIndirectly(forReturn)) { |
804 | 0 | return ABIArgInfo::getIndirect(alignmentForIndirect, /*byval*/ false); |
805 | 0 | } else { |
806 | 0 | auto types = lowering.getCoerceAndExpandTypes(); |
807 | 0 | return ABIArgInfo::getCoerceAndExpand(types.first, types.second); |
808 | 0 | } |
809 | 0 | } |
810 | | |
811 | | static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, |
812 | 0 | bool forReturn) { |
813 | 0 | if (auto recordType = dyn_cast<RecordType>(type)) { |
814 | 0 | auto record = recordType->getDecl(); |
815 | 0 | auto &layout = CGM.getContext().getASTRecordLayout(record); |
816 | |
|
817 | 0 | if (mustPassRecordIndirectly(CGM, record)) |
818 | 0 | return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false); |
819 | | |
820 | 0 | SwiftAggLowering lowering(CGM); |
821 | 0 | lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout); |
822 | 0 | lowering.finish(); |
823 | |
|
824 | 0 | return classifyExpandedType(lowering, forReturn, layout.getAlignment()); |
825 | 0 | } |
826 | | |
827 | | // Just assume that all of our target ABIs can support returning at least |
828 | | // two integer or floating-point values. |
829 | 0 | if (isa<ComplexType>(type)) { |
830 | 0 | return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand()); |
831 | 0 | } |
832 | | |
833 | | // Vector types may need to be legalized. |
834 | 0 | if (isa<VectorType>(type)) { |
835 | 0 | SwiftAggLowering lowering(CGM); |
836 | 0 | lowering.addTypedData(type, CharUnits::Zero()); |
837 | 0 | lowering.finish(); |
838 | |
|
839 | 0 | CharUnits alignment = CGM.getContext().getTypeAlignInChars(type); |
840 | 0 | return classifyExpandedType(lowering, forReturn, alignment); |
841 | 0 | } |
842 | | |
843 | | // Member pointer types need to be expanded, but it's a simple form of |
844 | | // expansion that 'Direct' can handle. Note that CanBeFlattened should be |
845 | | // true for this to work. |
846 | | |
847 | | // 'void' needs to be ignored. |
848 | 0 | if (type->isVoidType()) { |
849 | 0 | return ABIArgInfo::getIgnore(); |
850 | 0 | } |
851 | | |
852 | | // Everything else can be passed directly. |
853 | 0 | return ABIArgInfo::getDirect(); |
854 | 0 | } |
855 | | |
856 | 0 | ABIArgInfo swiftcall::classifyReturnType(CodeGenModule &CGM, CanQualType type) { |
857 | 0 | return classifyType(CGM, type, /*forReturn*/ true); |
858 | 0 | } |
859 | | |
860 | | ABIArgInfo swiftcall::classifyArgumentType(CodeGenModule &CGM, |
861 | 0 | CanQualType type) { |
862 | 0 | return classifyType(CGM, type, /*forReturn*/ false); |
863 | 0 | } |
864 | | |
865 | 0 | void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { |
866 | 0 | auto &retInfo = FI.getReturnInfo(); |
867 | 0 | retInfo = classifyReturnType(CGM, FI.getReturnType()); |
868 | |
|
869 | 0 | for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) { |
870 | 0 | auto &argInfo = FI.arg_begin()[i]; |
871 | 0 | argInfo.info = classifyArgumentType(CGM, argInfo.type); |
872 | 0 | } |
873 | 0 | } |
874 | | |
875 | | // Is swifterror lowered to a register by the target ABI. |
876 | 0 | bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule &CGM) { |
877 | 0 | return getSwiftABIInfo(CGM).isSwiftErrorInRegister(); |
878 | 0 | } |