1//===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Implementation of the abstract lowering for the Swift calling convention.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/CodeGen/SwiftCallingConv.h"
15#include "clang/Basic/TargetInfo.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18
19using namespace clang;
20using namespace CodeGen;
21using namespace swiftcall;
22
23static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) {
24  return cast<SwiftABIInfo>(CGM.getTargetCodeGenInfo().getABIInfo());
25}
26
27static bool isPowerOf2(unsigned n) {
28  return n == (n & -n);
29}
30
31/// Given two types with the same size, try to find a common type.
32static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) {
33  assert(first != second);
34
35  // Allow pointers to merge with integers, but prefer the integer type.
36  if (first->isIntegerTy()) {
37    if (second->isPointerTy()) return first;
38  } else if (first->isPointerTy()) {
39    if (second->isIntegerTy()) return second;
40    if (second->isPointerTy()) return first;
41
42  // Allow two vectors to be merged (given that they have the same size).
43  // This assumes that we never have two different vector register sets.
44  } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(first)) {
45    if (auto secondVecTy = dyn_cast<llvm::VectorType>(second)) {
46      if (auto commonTy = getCommonType(firstVecTy->getElementType(),
47                                        secondVecTy->getElementType())) {
48        return (commonTy == firstVecTy->getElementType() ? first : second);
49      }
50    }
51  }
52
53  return nullptr;
54}
55
56static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type) {
57  return CharUnits::fromQuantity(CGM.getDataLayout().getTypeStoreSize(type));
58}
59
60void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
61  // Deal with various aggregate types as special cases:
62
63  // Record types.
64  if (auto recType = type->getAs<RecordType>()) {
65    addTypedData(recType->getDecl(), begin);
66
67  // Array types.
68  } else if (type->isArrayType()) {
69    // Incomplete array types (flexible array members?) don't provide
70    // data to lay out, and the other cases shouldn't be possible.
71    auto arrayType = CGM.getContext().getAsConstantArrayType(type);
72    if (!arrayType) return;
73
74    QualType eltType = arrayType->getElementType();
75    auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
76    for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) {
77      addTypedData(eltType, begin + i * eltSize);
78    }
79
80  // Complex types.
81  } else if (auto complexType = type->getAs<ComplexType>()) {
82    auto eltType = complexType->getElementType();
83    auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
84    auto eltLLVMType = CGM.getTypes().ConvertType(eltType);
85    addTypedData(eltLLVMType, begin, begin + eltSize);
86    addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize);
87
88  // Member pointer types.
89  } else if (type->getAs<MemberPointerType>()) {
90    // Just add it all as opaque.
91    addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type));
92
93  // Everything else is scalar and should not convert as an LLVM aggregate.
94  } else {
95    // We intentionally convert as !ForMem because we want to preserve
96    // that a type was an i1.
97    auto llvmType = CGM.getTypes().ConvertType(type);
98    addTypedData(llvmType, begin);
99  }
100}
101
102void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin) {
103  addTypedData(record, begin, CGM.getContext().getASTRecordLayout(record));
104}
105
106void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin,
107                                    const ASTRecordLayout &layout) {
108  // Unions are a special case.
109  if (record->isUnion()) {
110    for (auto field : record->fields()) {
111      if (field->isBitField()) {
112        addBitFieldData(field, begin, 0);
113      } else {
114        addTypedData(field->getType(), begin);
115      }
116    }
117    return;
118  }
119
120  // Note that correctness does not rely on us adding things in
121  // their actual order of layout; it's just somewhat more efficient
122  // for the builder.
123
124  // With that in mind, add "early" C++ data.
125  auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
126  if (cxxRecord) {
127    //   - a v-table pointer, if the class adds its own
128    if (layout.hasOwnVFPtr()) {
129      addTypedData(CGM.Int8PtrTy, begin);
130    }
131
132    //   - non-virtual bases
133    for (auto &baseSpecifier : cxxRecord->bases()) {
134      if (baseSpecifier.isVirtual()) continue;
135
136      auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
137      addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord));
138    }
139
140    //   - a vbptr if the class adds its own
141    if (layout.hasOwnVBPtr()) {
142      addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset());
143    }
144  }
145
146  // Add fields.
147  for (auto field : record->fields()) {
148    auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex());
149    if (field->isBitField()) {
150      addBitFieldData(field, begin, fieldOffsetInBits);
151    } else {
152      addTypedData(field->getType(),
153              begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits));
154    }
155  }
156
157  // Add "late" C++ data:
158  if (cxxRecord) {
159    //   - virtual bases
160    for (auto &vbaseSpecifier : cxxRecord->vbases()) {
161      auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
162      addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord));
163    }
164  }
165}
166
167void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield,
168                                       CharUnits recordBegin,
169                                       uint64_t bitfieldBitBegin) {
170  assert(bitfield->isBitField());
171  auto &ctx = CGM.getContext();
172  auto width = bitfield->getBitWidthValue(ctx);
173
174  // We can ignore zero-width bit-fields.
175  if (width == 0) return;
176
177  // toCharUnitsFromBits rounds down.
178  CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin);
179
180  // Find the offset of the last byte that is partially occupied by the
181  // bit-field; since we otherwise expect exclusive ends, the end is the
182  // next byte.
183  uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1;
184  CharUnits bitfieldByteEnd =
185    ctx.toCharUnitsFromBits(bitfieldBitLast) + CharUnits::One();
186  addOpaqueData(recordBegin + bitfieldByteBegin,
187                recordBegin + bitfieldByteEnd);
188}
189
190void SwiftAggLowering::addTypedData(llvm::Type *type, CharUnits begin) {
191  assert(type && "didn't provide type for typed data");
192  addTypedData(type, begin, begin + getTypeStoreSize(CGM, type));
193}
194
195void SwiftAggLowering::addTypedData(llvm::Type *type,
196                                    CharUnits begin, CharUnits end) {
197  assert(type && "didn't provide type for typed data");
198  assert(getTypeStoreSize(CGM, type) == end - begin);
199
200  // Legalize vector types.
201  if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
202    SmallVector<llvm::Type*, 4> componentTys;
203    legalizeVectorType(CGM, end - begin, vecTy, componentTys);
204    assert(componentTys.size() >= 1);
205
206    // Walk the initial components.
207    for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) {
208      llvm::Type *componentTy = componentTys[i];
209      auto componentSize = getTypeStoreSize(CGM, componentTy);
210      assert(componentSize < end - begin);
211      addLegalTypedData(componentTy, begin, begin + componentSize);
212      begin += componentSize;
213    }
214
215    return addLegalTypedData(componentTys.back(), begin, end);
216  }
217
218  // Legalize integer types.
219  if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
220    if (!isLegalIntegerType(CGM, intTy))
221      return addOpaqueData(begin, end);
222  }
223
224  // All other types should be legal.
225  return addLegalTypedData(type, begin, end);
226}
227
228void SwiftAggLowering::addLegalTypedData(llvm::Type *type,
229                                         CharUnits begin, CharUnits end) {
230  // Require the type to be naturally aligned.
231  if (!begin.isZero() && !begin.isMultipleOf(getNaturalAlignment(CGM, type))) {
232
233    // Try splitting vector types.
234    if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
235      auto split = splitLegalVectorType(CGM, end - begin, vecTy);
236      auto eltTy = split.first;
237      auto numElts = split.second;
238
239      auto eltSize = (end - begin) / numElts;
240      assert(eltSize == getTypeStoreSize(CGM, eltTy));
241      for (size_t i = 0, e = numElts; i != e; ++i) {
242        addLegalTypedData(eltTy, begin, begin + eltSize);
243        begin += eltSize;
244      }
245      assert(begin == end);
246      return;
247    }
248
249    return addOpaqueData(begin, end);
250  }
251
252  addEntry(type, begin, end);
253}
254
255void SwiftAggLowering::addEntry(llvm::Type *type,
256                                CharUnits begin, CharUnits end) {
257  assert((!type ||
258          (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) &&
259         "cannot add aggregate-typed data");
260  assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type)));
261
262  // Fast path: we can just add entries to the end.
263  if (Entries.empty() || Entries.back().End <= begin) {
264    Entries.push_back({begin, end, type});
265    return;
266  }
267
268  // Find the first existing entry that ends after the start of the new data.
269  // TODO: do a binary search if Entries is big enough for it to matter.
270  size_t index = Entries.size() - 1;
271  while (index != 0) {
272    if (Entries[index - 1].End <= begin) break;
273    --index;
274  }
275
276  // The entry ends after the start of the new data.
277  // If the entry starts after the end of the new data, there's no conflict.
278  if (Entries[index].Begin >= end) {
279    // This insertion is potentially O(n), but the way we generally build
280    // these layouts makes that unlikely to matter: we'd need a union of
281    // several very large types.
282    Entries.insert(Entries.begin() + index, {begin, end, type});
283    return;
284  }
285
286  // Otherwise, the ranges overlap.  The new range might also overlap
287  // with later ranges.
288restartAfterSplit:
289
290  // Simplest case: an exact overlap.
291  if (Entries[index].Begin == begin && Entries[index].End == end) {
292    // If the types match exactly, great.
293    if (Entries[index].Type == type) return;
294
295    // If either type is opaque, make the entry opaque and return.
296    if (Entries[index].Type == nullptr) {
297      return;
298    } else if (type == nullptr) {
299      Entries[index].Type = nullptr;
300      return;
301    }
302
303    // If they disagree in an ABI-agnostic way, just resolve the conflict
304    // arbitrarily.
305    if (auto entryType = getCommonType(Entries[index].Type, type)) {
306      Entries[index].Type = entryType;
307      return;
308    }
309
310    // Otherwise, make the entry opaque.
311    Entries[index].Type = nullptr;
312    return;
313  }
314
315  // Okay, we have an overlapping conflict of some sort.
316
317  // If we have a vector type, split it.
318  if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) {
319    auto eltTy = vecTy->getElementType();
320    CharUnits eltSize = (end - begin) / vecTy->getNumElements();
321    assert(eltSize == getTypeStoreSize(CGM, eltTy));
322    for (unsigned i = 0, e = vecTy->getNumElements(); i != e; ++i) {
323      addEntry(eltTy, begin, begin + eltSize);
324      begin += eltSize;
325    }
326    assert(begin == end);
327    return;
328  }
329
330  // If the entry is a vector type, split it and try again.
331  if (Entries[index].Type && Entries[index].Type->isVectorTy()) {
332    splitVectorEntry(index);
333    goto restartAfterSplit;
334  }
335
336  // Okay, we have no choice but to make the existing entry opaque.
337
338  Entries[index].Type = nullptr;
339
340  // Stretch the start of the entry to the beginning of the range.
341  if (begin < Entries[index].Begin) {
342    Entries[index].Begin = begin;
343    assert(index == 0 || begin >= Entries[index - 1].End);
344  }
345
346  // Stretch the end of the entry to the end of the range; but if we run
347  // into the start of the next entry, just leave the range there and repeat.
348  while (end > Entries[index].End) {
349    assert(Entries[index].Type == nullptr);
350
351    // If the range doesn't overlap the next entry, we're done.
352    if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) {
353      Entries[index].End = end;
354      break;
355    }
356
357    // Otherwise, stretch to the start of the next entry.
358    Entries[index].End = Entries[index + 1].Begin;
359
360    // Continue with the next entry.
361    index++;
362
363    // This entry needs to be made opaque if it is not already.
364    if (Entries[index].Type == nullptr)
365      continue;
366
367    // Split vector entries unless we completely subsume them.
368    if (Entries[index].Type->isVectorTy() &&
369        end < Entries[index].End) {
370      splitVectorEntry(index);
371    }
372
373    // Make the entry opaque.
374    Entries[index].Type = nullptr;
375  }
376}
377
378/// Replace the entry of vector type at offset 'index' with a sequence
379/// of its component vectors.
380void SwiftAggLowering::splitVectorEntry(unsigned index) {
381  auto vecTy = cast<llvm::VectorType>(Entries[index].Type);
382  auto split = splitLegalVectorType(CGM, Entries[index].getWidth(), vecTy);
383
384  auto eltTy = split.first;
385  CharUnits eltSize = getTypeStoreSize(CGM, eltTy);
386  auto numElts = split.second;
387  Entries.insert(&Entries[index + 1], numElts - 1, StorageEntry());
388
389  CharUnits begin = Entries[index].Begin;
390  for (unsigned i = 0; i != numElts; ++i) {
391    Entries[index].Type = eltTy;
392    Entries[index].Begin = begin;
393    Entries[index].End = begin + eltSize;
394    begin += eltSize;
395  }
396}
397
398/// Given a power-of-two unit size, return the offset of the aligned unit
399/// of that size which contains the given offset.
400///
401/// In other words, round down to the nearest multiple of the unit size.
402static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize) {
403  assert(isPowerOf2(unitSize.getQuantity()));
404  auto unitMask = ~(unitSize.getQuantity() - 1);
405  return CharUnits::fromQuantity(offset.getQuantity() & unitMask);
406}
407
408static bool areBytesInSameUnit(CharUnits first, CharUnits second,
409                               CharUnits chunkSize) {
410  return getOffsetAtStartOfUnit(first, chunkSize)
411      == getOffsetAtStartOfUnit(second, chunkSize);
412}
413
414void SwiftAggLowering::finish() {
415  if (Entries.empty()) {
416    Finished = true;
417    return;
418  }
419
420  // We logically split the layout down into a series of chunks of this size,
421  // which is generally the size of a pointer.
422  const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM);
423
424  // First pass: if two entries share a chunk, make them both opaque
425  // and stretch one to meet the next.
426  bool hasOpaqueEntries = (Entries[0].Type == nullptr);
427  for (size_t i = 1, e = Entries.size(); i != e; ++i) {
428    if (areBytesInSameUnit(Entries[i - 1].End - CharUnits::One(),
429                           Entries[i].Begin, chunkSize)) {
430      Entries[i - 1].Type = nullptr;
431      Entries[i].Type = nullptr;
432      Entries[i - 1].End = Entries[i].Begin;
433      hasOpaqueEntries = true;
434
435    } else if (Entries[i].Type == nullptr) {
436      hasOpaqueEntries = true;
437    }
438  }
439
440  // The rest of the algorithm leaves non-opaque entries alone, so if we
441  // have no opaque entries, we're done.
442  if (!hasOpaqueEntries) {
443    Finished = true;
444    return;
445  }
446
447  // Okay, move the entries to a temporary and rebuild Entries.
448  auto orig = std::move(Entries);
449  assert(Entries.empty());
450
451  for (size_t i = 0, e = orig.size(); i != e; ++i) {
452    // Just copy over non-opaque entries.
453    if (orig[i].Type != nullptr) {
454      Entries.push_back(orig[i]);
455      continue;
456    }
457
458    // Scan forward to determine the full extent of the next opaque range.
459    // We know from the first pass that only contiguous ranges will overlap
460    // the same aligned chunk.
461    auto begin = orig[i].Begin;
462    auto end = orig[i].End;
463    while (i + 1 != e &&
464           orig[i + 1].Type == nullptr &&
465           end == orig[i + 1].Begin) {
466      end = orig[i + 1].End;
467      i++;
468    }
469
470    // Add an entry per intersected chunk.
471    do {
472      // Find the smallest aligned storage unit in the maximal aligned
473      // storage unit containing 'begin' that contains all the bytes in
474      // the intersection between the range and this chunk.
475      CharUnits localBegin = begin;
476      CharUnits chunkBegin = getOffsetAtStartOfUnit(localBegin, chunkSize);
477      CharUnits chunkEnd = chunkBegin + chunkSize;
478      CharUnits localEnd = std::min(end, chunkEnd);
479
480      // Just do a simple loop over ever-increasing unit sizes.
481      CharUnits unitSize = CharUnits::One();
482      CharUnits unitBegin, unitEnd;
483      for (; ; unitSize *= 2) {
484        assert(unitSize <= chunkSize);
485        unitBegin = getOffsetAtStartOfUnit(localBegin, unitSize);
486        unitEnd = unitBegin + unitSize;
487        if (unitEnd >= localEnd) break;
488      }
489
490      // Add an entry for this unit.
491      auto entryTy =
492        llvm::IntegerType::get(CGM.getLLVMContext(),
493                               CGM.getContext().toBits(unitSize));
494      Entries.push_back({unitBegin, unitEnd, entryTy});
495
496      // The next chunk starts where this chunk left off.
497      begin = localEnd;
498    } while (begin != end);
499  }
500
501  // Okay, finally finished.
502  Finished = true;
503}
504
505void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const {
506  assert(Finished && "haven't yet finished lowering");
507
508  for (auto &entry : Entries) {
509    callback(entry.Begin, entry.Type);
510  }
511}
512
513std::pair<llvm::StructType*, llvm::Type*>
514SwiftAggLowering::getCoerceAndExpandTypes() const {
515  assert(Finished && "haven't yet finished lowering");
516
517  auto &ctx = CGM.getLLVMContext();
518
519  if (Entries.empty()) {
520    auto type = llvm::StructType::get(ctx);
521    return { type, type };
522  }
523
524  SmallVector<llvm::Type*, 8> elts;
525  CharUnits lastEnd = CharUnits::Zero();
526  bool hasPadding = false;
527  bool packed = false;
528  for (auto &entry : Entries) {
529    if (entry.Begin != lastEnd) {
530      auto paddingSize = entry.Begin - lastEnd;
531      assert(!paddingSize.isNegative());
532
533      auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
534                                          paddingSize.getQuantity());
535      elts.push_back(padding);
536      hasPadding = true;
537    }
538
539    if (!packed && !entry.Begin.isMultipleOf(
540          CharUnits::fromQuantity(
541            CGM.getDataLayout().getABITypeAlignment(entry.Type))))
542      packed = true;
543
544    elts.push_back(entry.Type);
545    lastEnd = entry.End;
546  }
547
548  // We don't need to adjust 'packed' to deal with possible tail padding
549  // because we never do that kind of access through the coercion type.
550  auto coercionType = llvm::StructType::get(ctx, elts, packed);
551
552  llvm::Type *unpaddedType = coercionType;
553  if (hasPadding) {
554    elts.clear();
555    for (auto &entry : Entries) {
556      elts.push_back(entry.Type);
557    }
558    if (elts.size() == 1) {
559      unpaddedType = elts[0];
560    } else {
561      unpaddedType = llvm::StructType::get(ctx, elts, /*packed*/ false);
562    }
563  } else if (Entries.size() == 1) {
564    unpaddedType = Entries[0].Type;
565  }
566
567  return { coercionType, unpaddedType };
568}
569
570bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
571  assert(Finished && "haven't yet finished lowering");
572
573  // Empty types don't need to be passed indirectly.
574  if (Entries.empty()) return false;
575
576  CharUnits totalSize = Entries.back().End;
577
578  // Avoid copying the array of types when there's just a single element.
579  if (Entries.size() == 1) {
580    return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(totalSize,
581                                                           Entries.back().Type,
582                                                             asReturnValue);
583  }
584
585  SmallVector<llvm::Type*, 8> componentTys;
586  componentTys.reserve(Entries.size());
587  for (auto &entry : Entries) {
588    componentTys.push_back(entry.Type);
589  }
590  return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(totalSize,
591                                                           componentTys,
592                                                           asReturnValue);
593}
594
595CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) {
596  // Currently always the size of an ordinary pointer.
597  return CGM.getContext().toCharUnitsFromBits(
598           CGM.getContext().getTargetInfo().getPointerWidth(0));
599}
600
601CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) {
602  // For Swift's purposes, this is always just the store size of the type
603  // rounded up to a power of 2.
604  auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity();
605  if (!isPowerOf2(size)) {
606    size = 1ULL << (llvm::findLastSet(size, llvm::ZB_Undefined) + 1);
607  }
608  assert(size >= CGM.getDataLayout().getABITypeAlignment(type));
609  return CharUnits::fromQuantity(size);
610}
611
612bool swiftcall::isLegalIntegerType(CodeGenModule &CGM,
613                                   llvm::IntegerType *intTy) {
614  auto size = intTy->getBitWidth();
615  switch (size) {
616  case 1:
617  case 8:
618  case 16:
619  case 32:
620  case 64:
621    // Just assume that the above are always legal.
622    return true;
623
624  case 128:
625    return CGM.getContext().getTargetInfo().hasInt128Type();
626
627  default:
628    return false;
629  }
630}
631
632bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
633                                  llvm::VectorType *vectorTy) {
634  return isLegalVectorType(CGM, vectorSize, vectorTy->getElementType(),
635                           vectorTy->getNumElements());
636}
637
638bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
639                                  llvm::Type *eltTy, unsigned numElts) {
640  assert(numElts > 1 && "illegal vector length");
641  return getSwiftABIInfo(CGM)
642           .isLegalVectorTypeForSwift(vectorSize, eltTy, numElts);
643}
644
645std::pair<llvm::Type*, unsigned>
646swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
647                                llvm::VectorType *vectorTy) {
648  auto numElts = vectorTy->getNumElements();
649  auto eltTy = vectorTy->getElementType();
650
651  // Try to split the vector type in half.
652  if (numElts >= 4 && isPowerOf2(numElts)) {
653    if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2))
654      return {llvm::VectorType::get(eltTy, numElts / 2), 2};
655  }
656
657  return {eltTy, numElts};
658}
659
660void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
661                                   llvm::VectorType *origVectorTy,
662                             llvm::SmallVectorImpl<llvm::Type*> &components) {
663  // If it's already a legal vector type, use it.
664  if (isLegalVectorType(CGM, origVectorSize, origVectorTy)) {
665    components.push_back(origVectorTy);
666    return;
667  }
668
669  // Try to split the vector into legal subvectors.
670  auto numElts = origVectorTy->getNumElements();
671  auto eltTy = origVectorTy->getElementType();
672  assert(numElts != 1);
673
674  // The largest size that we're still considering making subvectors of.
675  // Always a power of 2.
676  unsigned logCandidateNumElts = llvm::findLastSet(numElts, llvm::ZB_Undefined);
677  unsigned candidateNumElts = 1U << logCandidateNumElts;
678  assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
679
680  // Minor optimization: don't check the legality of this exact size twice.
681  if (candidateNumElts == numElts) {
682    logCandidateNumElts--;
683    candidateNumElts >>= 1;
684  }
685
686  CharUnits eltSize = (origVectorSize / numElts);
687  CharUnits candidateSize = eltSize * candidateNumElts;
688
689  // The sensibility of this algorithm relies on the fact that we never
690  // have a legal non-power-of-2 vector size without having the power of 2
691  // also be legal.
692  while (logCandidateNumElts > 0) {
693    assert(candidateNumElts == 1U << logCandidateNumElts);
694    assert(candidateNumElts <= numElts);
695    assert(candidateSize == eltSize * candidateNumElts);
696
697    // Skip illegal vector sizes.
698    if (!isLegalVectorType(CGM, candidateSize, eltTy, candidateNumElts)) {
699      logCandidateNumElts--;
700      candidateNumElts /= 2;
701      candidateSize /= 2;
702      continue;
703    }
704
705    // Add the right number of vectors of this size.
706    auto numVecs = numElts >> logCandidateNumElts;
707    components.append(numVecs, llvm::VectorType::get(eltTy, candidateNumElts));
708    numElts -= (numVecs << logCandidateNumElts);
709
710    if (numElts == 0) return;
711
712    // It's possible that the number of elements remaining will be legal.
713    // This can happen with e.g. <7 x float> when <3 x float> is legal.
714    // This only needs to be separately checked if it's not a power of 2.
715    if (numElts > 2 && !isPowerOf2(numElts) &&
716        isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) {
717      components.push_back(llvm::VectorType::get(eltTy, numElts));
718      return;
719    }
720
721    // Bring vecSize down to something no larger than numElts.
722    do {
723      logCandidateNumElts--;
724      candidateNumElts /= 2;
725      candidateSize /= 2;
726    } while (candidateNumElts > numElts);
727  }
728
729  // Otherwise, just append a bunch of individual elements.
730  components.append(numElts, eltTy);
731}
732
733bool swiftcall::shouldPassCXXRecordIndirectly(CodeGenModule &CGM,
734                                              const CXXRecordDecl *record) {
735  // Following a recommendation from Richard Smith, pass a C++ type
736  // indirectly only if the destructor is non-trivial or *all* of the
737  // copy/move constructors are deleted or non-trivial.
738
739  if (record->hasNonTrivialDestructor())
740    return true;
741
742  // It would be nice if this were summarized on the CXXRecordDecl.
743  for (auto ctor : record->ctors()) {
744    if (ctor->isCopyOrMoveConstructor() && !ctor->isDeleted() &&
745        ctor->isTrivial()) {
746      return false;
747    }
748  }
749
750  return true;
751}
752
753static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering,
754                                       bool forReturn,
755                                       CharUnits alignmentForIndirect) {
756  if (lowering.empty()) {
757    return ABIArgInfo::getIgnore();
758  } else if (lowering.shouldPassIndirectly(forReturn)) {
759    return ABIArgInfo::getIndirect(alignmentForIndirect, /*byval*/ false);
760  } else {
761    auto types = lowering.getCoerceAndExpandTypes();
762    return ABIArgInfo::getCoerceAndExpand(types.first, types.second);
763  }
764}
765
766static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
767                               bool forReturn) {
768  if (auto recordType = dyn_cast<RecordType>(type)) {
769    auto record = recordType->getDecl();
770    auto &layout = CGM.getContext().getASTRecordLayout(record);
771
772    if (auto cxxRecord = dyn_cast<CXXRecordDecl>(record)) {
773      if (shouldPassCXXRecordIndirectly(CGM, cxxRecord))
774        return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false);
775    }
776
777    SwiftAggLowering lowering(CGM);
778    lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
779    lowering.finish();
780
781    return classifyExpandedType(lowering, forReturn, layout.getAlignment());
782  }
783
784  // Just assume that all of our target ABIs can support returning at least
785  // two integer or floating-point values.
786  if (isa<ComplexType>(type)) {
787    return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand());
788  }
789
790  // Vector types may need to be legalized.
791  if (isa<VectorType>(type)) {
792    SwiftAggLowering lowering(CGM);
793    lowering.addTypedData(type, CharUnits::Zero());
794    lowering.finish();
795
796    CharUnits alignment = CGM.getContext().getTypeAlignInChars(type);
797    return classifyExpandedType(lowering, forReturn, alignment);
798  }
799
800  // Member pointer types need to be expanded, but it's a simple form of
801  // expansion that 'Direct' can handle.  Note that CanBeFlattened should be
802  // true for this to work.
803
804  // 'void' needs to be ignored.
805  if (type->isVoidType()) {
806    return ABIArgInfo::getIgnore();
807  }
808
809  // Everything else can be passed directly.
810  return ABIArgInfo::getDirect();
811}
812
813ABIArgInfo swiftcall::classifyReturnType(CodeGenModule &CGM, CanQualType type) {
814  return classifyType(CGM, type, /*forReturn*/ true);
815}
816
817ABIArgInfo swiftcall::classifyArgumentType(CodeGenModule &CGM,
818                                           CanQualType type) {
819  return classifyType(CGM, type, /*forReturn*/ false);
820}
821
822void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
823  auto &retInfo = FI.getReturnInfo();
824  retInfo = classifyReturnType(CGM, FI.getReturnType());
825
826  for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) {
827    auto &argInfo = FI.arg_begin()[i];
828    argInfo.info = classifyArgumentType(CGM, argInfo.type);
829  }
830}
831