ARMCallingConv.h revision cddc3e03e4ec99c0268c03a126195173e519ed58
1//=== ARMCallingConv.h - ARM Custom Calling Convention Routines -*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the custom routines for the ARM Calling Convention that 11// aren't done by tablegen. 12// 13//===----------------------------------------------------------------------===// 14 15#ifndef LLVM_LIB_TARGET_ARM_ARMCALLINGCONV_H 16#define LLVM_LIB_TARGET_ARM_ARMCALLINGCONV_H 17 18#include "ARM.h" 19#include "ARMBaseInstrInfo.h" 20#include "ARMSubtarget.h" 21#include "llvm/CodeGen/CallingConvLower.h" 22#include "llvm/IR/CallingConv.h" 23#include "llvm/Target/TargetInstrInfo.h" 24 25namespace llvm { 26 27// APCS f64 is in register pairs, possibly split to stack 28static bool f64AssignAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 29 CCValAssign::LocInfo &LocInfo, 30 CCState &State, bool CanFail) { 31 static const MCPhysReg RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; 32 33 // Try to get the first register. 34 if (unsigned Reg = State.AllocateReg(RegList)) 35 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 36 else { 37 // For the 2nd half of a v2f64, do not fail. 38 if (CanFail) 39 return false; 40 41 // Put the whole thing on the stack. 42 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 43 State.AllocateStack(8, 4), 44 LocVT, LocInfo)); 45 return true; 46 } 47 48 // Try to get the second register. 49 if (unsigned Reg = State.AllocateReg(RegList)) 50 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 51 else 52 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 53 State.AllocateStack(4, 4), 54 LocVT, LocInfo)); 55 return true; 56} 57 58static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 59 CCValAssign::LocInfo &LocInfo, 60 ISD::ArgFlagsTy &ArgFlags, 61 CCState &State) { 62 if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, true)) 63 return false; 64 if (LocVT == MVT::v2f64 && 65 !f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, false)) 66 return false; 67 return true; // we handled it 68} 69 70// AAPCS f64 is in aligned register pairs 71static bool f64AssignAAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 72 CCValAssign::LocInfo &LocInfo, 73 CCState &State, bool CanFail) { 74 static const MCPhysReg HiRegList[] = { ARM::R0, ARM::R2 }; 75 static const MCPhysReg LoRegList[] = { ARM::R1, ARM::R3 }; 76 static const MCPhysReg ShadowRegList[] = { ARM::R0, ARM::R1 }; 77 static const MCPhysReg GPRArgRegs[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; 78 79 unsigned Reg = State.AllocateReg(HiRegList, ShadowRegList); 80 if (Reg == 0) { 81 82 // If we had R3 unallocated only, now we still must to waste it. 83 Reg = State.AllocateReg(GPRArgRegs); 84 assert((!Reg || Reg == ARM::R3) && "Wrong GPRs usage for f64"); 85 86 // For the 2nd half of a v2f64, do not just fail. 87 if (CanFail) 88 return false; 89 90 // Put the whole thing on the stack. 91 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, 92 State.AllocateStack(8, 8), 93 LocVT, LocInfo)); 94 return true; 95 } 96 97 unsigned i; 98 for (i = 0; i < 2; ++i) 99 if (HiRegList[i] == Reg) 100 break; 101 102 unsigned T = State.AllocateReg(LoRegList[i]); 103 (void)T; 104 assert(T == LoRegList[i] && "Could not allocate register"); 105 106 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 107 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i], 108 LocVT, LocInfo)); 109 return true; 110} 111 112static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 113 CCValAssign::LocInfo &LocInfo, 114 ISD::ArgFlagsTy &ArgFlags, 115 CCState &State) { 116 if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, true)) 117 return false; 118 if (LocVT == MVT::v2f64 && 119 !f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, false)) 120 return false; 121 return true; // we handled it 122} 123 124static bool f64RetAssign(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 125 CCValAssign::LocInfo &LocInfo, CCState &State) { 126 static const MCPhysReg HiRegList[] = { ARM::R0, ARM::R2 }; 127 static const MCPhysReg LoRegList[] = { ARM::R1, ARM::R3 }; 128 129 unsigned Reg = State.AllocateReg(HiRegList, LoRegList); 130 if (Reg == 0) 131 return false; // we didn't handle it 132 133 unsigned i; 134 for (i = 0; i < 2; ++i) 135 if (HiRegList[i] == Reg) 136 break; 137 138 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 139 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i], 140 LocVT, LocInfo)); 141 return true; 142} 143 144static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 145 CCValAssign::LocInfo &LocInfo, 146 ISD::ArgFlagsTy &ArgFlags, 147 CCState &State) { 148 if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State)) 149 return false; 150 if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State)) 151 return false; 152 return true; // we handled it 153} 154 155static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 156 CCValAssign::LocInfo &LocInfo, 157 ISD::ArgFlagsTy &ArgFlags, 158 CCState &State) { 159 return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, 160 State); 161} 162 163static const MCPhysReg RRegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; 164 165static const MCPhysReg SRegList[] = { ARM::S0, ARM::S1, ARM::S2, ARM::S3, 166 ARM::S4, ARM::S5, ARM::S6, ARM::S7, 167 ARM::S8, ARM::S9, ARM::S10, ARM::S11, 168 ARM::S12, ARM::S13, ARM::S14, ARM::S15 }; 169static const MCPhysReg DRegList[] = { ARM::D0, ARM::D1, ARM::D2, ARM::D3, 170 ARM::D4, ARM::D5, ARM::D6, ARM::D7 }; 171static const MCPhysReg QRegList[] = { ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3 }; 172 173 174// Allocate part of an AAPCS HFA or HVA. We assume that each member of the HA 175// has InConsecutiveRegs set, and that the last member also has 176// InConsecutiveRegsLast set. We must process all members of the HA before 177// we can allocate it, as we need to know the total number of registers that 178// will be needed in order to (attempt to) allocate a contiguous block. 179static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned &ValNo, MVT &ValVT, 180 MVT &LocVT, 181 CCValAssign::LocInfo &LocInfo, 182 ISD::ArgFlagsTy &ArgFlags, 183 CCState &State) { 184 SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs(); 185 186 // AAPCS HFAs must have 1-4 elements, all of the same type 187 if (PendingMembers.size() > 0) 188 assert(PendingMembers[0].getLocVT() == LocVT); 189 190 // Add the argument to the list to be allocated once we know the size of the 191 // aggregate. Store the type's required alignmnent as extra info for later: in 192 // the [N x i64] case all trace has been removed by the time we actually get 193 // to do allocation. 194 PendingMembers.push_back(CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo, 195 ArgFlags.getOrigAlign())); 196 197 if (!ArgFlags.isInConsecutiveRegsLast()) 198 return true; 199 200 // Try to allocate a contiguous block of registers, each of the correct 201 // size to hold one member. 202 auto &DL = State.getMachineFunction().getDataLayout(); 203 unsigned StackAlign = DL.getStackAlignment(); 204 unsigned Align = std::min(PendingMembers[0].getExtraInfo(), StackAlign); 205 206 ArrayRef<MCPhysReg> RegList; 207 switch (LocVT.SimpleTy) { 208 case MVT::i32: { 209 RegList = RRegList; 210 unsigned RegIdx = State.getFirstUnallocated(RegList); 211 212 // First consume all registers that would give an unaligned object. Whether 213 // we go on stack or in regs, no-one will be using them in future. 214 unsigned RegAlign = RoundUpToAlignment(Align, 4) / 4; 215 while (RegIdx % RegAlign != 0 && RegIdx < RegList.size()) 216 State.AllocateReg(RegList[RegIdx++]); 217 218 break; 219 } 220 case MVT::f32: 221 RegList = SRegList; 222 break; 223 case MVT::f64: 224 RegList = DRegList; 225 break; 226 case MVT::v2f64: 227 RegList = QRegList; 228 break; 229 default: 230 llvm_unreachable("Unexpected member type for block aggregate"); 231 break; 232 } 233 234 unsigned RegResult = State.AllocateRegBlock(RegList, PendingMembers.size()); 235 if (RegResult) { 236 for (SmallVectorImpl<CCValAssign>::iterator It = PendingMembers.begin(); 237 It != PendingMembers.end(); ++It) { 238 It->convertToReg(RegResult); 239 State.addLoc(*It); 240 ++RegResult; 241 } 242 PendingMembers.clear(); 243 return true; 244 } 245 246 // Register allocation failed, we'll be needing the stack 247 unsigned Size = LocVT.getSizeInBits() / 8; 248 if (LocVT == MVT::i32 && State.getNextStackOffset() == 0) { 249 // If nothing else has used the stack until this point, a non-HFA aggregate 250 // can be split between regs and stack. 251 unsigned RegIdx = State.getFirstUnallocated(RegList); 252 for (auto &It : PendingMembers) { 253 if (RegIdx >= RegList.size()) 254 It.convertToMem(State.AllocateStack(Size, Size)); 255 else 256 It.convertToReg(State.AllocateReg(RegList[RegIdx++])); 257 258 State.addLoc(It); 259 } 260 PendingMembers.clear(); 261 return true; 262 } else if (LocVT != MVT::i32) 263 RegList = SRegList; 264 265 // Mark all regs as unavailable (AAPCS rule C.2.vfp for VFP, C.6 for core) 266 for (auto Reg : RegList) 267 State.AllocateReg(Reg); 268 269 for (auto &It : PendingMembers) { 270 It.convertToMem(State.AllocateStack(Size, Align)); 271 State.addLoc(It); 272 273 // After the first item has been allocated, the rest are packed as tightly 274 // as possible. (E.g. an incoming i64 would have starting Align of 8, but 275 // we'll be allocating a bunch of i32 slots). 276 Align = Size; 277 } 278 279 // All pending members have now been allocated 280 PendingMembers.clear(); 281 282 // This will be allocated by the last member of the aggregate 283 return true; 284} 285 286} // End llvm namespace 287 288#endif 289