ARMISelLowering.cpp revision 970a419633ba41cac44ae636543f192ea632fe00
1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Evan Cheng and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#include "ARM.h" 16#include "ARMAddressingModes.h" 17#include "ARMConstantPoolValue.h" 18#include "ARMISelLowering.h" 19#include "ARMMachineFunctionInfo.h" 20#include "ARMRegisterInfo.h" 21#include "ARMSubtarget.h" 22#include "ARMTargetMachine.h" 23#include "llvm/CallingConv.h" 24#include "llvm/Constants.h" 25#include "llvm/CodeGen/MachineBasicBlock.h" 26#include "llvm/CodeGen/MachineFrameInfo.h" 27#include "llvm/CodeGen/MachineFunction.h" 28#include "llvm/CodeGen/MachineInstrBuilder.h" 29#include "llvm/CodeGen/SelectionDAG.h" 30#include "llvm/CodeGen/SSARegMap.h" 31#include "llvm/ADT/VectorExtras.h" 32using namespace llvm; 33 34ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 35 : TargetLowering(TM), ARMPCLabelIndex(0) { 36 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 37 38 // Uses VFP for Thumb libfuncs if available. 39 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 40 // Single-precision floating-point arithmetic. 41 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 42 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 43 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 44 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 45 46 // Double-precision floating-point arithmetic. 47 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 48 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 49 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 50 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 51 52 // Single-precision comparisons. 53 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 54 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 55 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 56 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 57 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 58 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 59 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 60 61 // Double-precision comparisons. 62 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 63 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 64 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 65 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 66 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 67 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 68 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 69 70 // Floating-point to integer conversions. 71 // i64 conversions are done via library routines even when generating VFP 72 // instructions, so use the same ones. 73 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 74 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 75 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 76 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 77 78 // Conversions between floating types. 79 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 80 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 81 82 // Integer to floating-point conversions. 83 // i64 conversions are done via library routines even when generating VFP 84 // instructions, so use the same ones. 85 // FIXME: There appears to be some naming inconsistency in ARM libgcc: e.g. 86 // __floatunsidf vs. __floatunssidfvfp. 87 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 88 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 89 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 90 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 91 } 92 93 addRegisterClass(MVT::i32, ARM::GPRRegisterClass); 94 if (Subtarget->hasVFP2() && !Subtarget->isThumb()) { 95 addRegisterClass(MVT::f32, ARM::SPRRegisterClass); 96 addRegisterClass(MVT::f64, ARM::DPRRegisterClass); 97 } 98 99 // ARM does not have f32 extending load. 100 setLoadXAction(ISD::EXTLOAD, MVT::f32, Expand); 101 102 // ARM supports all 4 flavors of integer indexed load / store. 103 for (unsigned im = (unsigned)ISD::PRE_INC; 104 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 105 setIndexedLoadAction(im, MVT::i1, Legal); 106 setIndexedLoadAction(im, MVT::i8, Legal); 107 setIndexedLoadAction(im, MVT::i16, Legal); 108 setIndexedLoadAction(im, MVT::i32, Legal); 109 setIndexedStoreAction(im, MVT::i1, Legal); 110 setIndexedStoreAction(im, MVT::i8, Legal); 111 setIndexedStoreAction(im, MVT::i16, Legal); 112 setIndexedStoreAction(im, MVT::i32, Legal); 113 } 114 115 // i64 operation support. 116 if (Subtarget->isThumb()) { 117 setOperationAction(ISD::MUL, MVT::i64, Expand); 118 setOperationAction(ISD::MULHU, MVT::i32, Expand); 119 setOperationAction(ISD::MULHS, MVT::i32, Expand); 120 } else { 121 setOperationAction(ISD::MUL, MVT::i64, Custom); 122 setOperationAction(ISD::MULHU, MVT::i32, Custom); 123 if (!Subtarget->hasV6Ops()) 124 setOperationAction(ISD::MULHS, MVT::i32, Custom); 125 } 126 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 127 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 128 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 129 setOperationAction(ISD::SRL, MVT::i64, Custom); 130 setOperationAction(ISD::SRA, MVT::i64, Custom); 131 132 // ARM does not have ROTL. 133 setOperationAction(ISD::ROTL, MVT::i32, Expand); 134 setOperationAction(ISD::CTTZ , MVT::i32, Expand); 135 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 136 if (!Subtarget->hasV5TOps()) 137 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 138 139 // These are expanded into libcalls. 140 setOperationAction(ISD::SDIV, MVT::i32, Expand); 141 setOperationAction(ISD::UDIV, MVT::i32, Expand); 142 setOperationAction(ISD::SREM, MVT::i32, Expand); 143 setOperationAction(ISD::UREM, MVT::i32, Expand); 144 145 // Support label based line numbers. 146 setOperationAction(ISD::LOCATION, MVT::Other, Expand); 147 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 148 // FIXME - use subtarget debug flags 149 if (Subtarget->isTargetDarwin()) 150 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand); 151 152 setOperationAction(ISD::RET, MVT::Other, Custom); 153 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 154 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 155 156 // Expand mem operations genericly. 157 setOperationAction(ISD::MEMSET , MVT::Other, Expand); 158 setOperationAction(ISD::MEMCPY , MVT::Other, Expand); 159 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand); 160 161 // Use the default implementation. 162 setOperationAction(ISD::VASTART , MVT::Other, Expand); 163 setOperationAction(ISD::VAARG , MVT::Other, Expand); 164 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 165 setOperationAction(ISD::VAEND , MVT::Other, Expand); 166 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 167 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 168 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand); 169 170 if (!Subtarget->hasV6Ops()) { 171 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 172 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 173 } 174 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 175 176 if (Subtarget->hasVFP2() && !Subtarget->isThumb()) 177 // Turn f64->i64 into FMRRD iff target supports vfp2. 178 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom); 179 180 setOperationAction(ISD::SETCC , MVT::i32, Expand); 181 setOperationAction(ISD::SETCC , MVT::f32, Expand); 182 setOperationAction(ISD::SETCC , MVT::f64, Expand); 183 setOperationAction(ISD::SELECT , MVT::i32, Expand); 184 setOperationAction(ISD::SELECT , MVT::f32, Expand); 185 setOperationAction(ISD::SELECT , MVT::f64, Expand); 186 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 187 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 188 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 189 190 setOperationAction(ISD::BRCOND , MVT::Other, Expand); 191 setOperationAction(ISD::BR_CC , MVT::i32, Custom); 192 setOperationAction(ISD::BR_CC , MVT::f32, Custom); 193 setOperationAction(ISD::BR_CC , MVT::f64, Custom); 194 setOperationAction(ISD::BR_JT , MVT::Other, Custom); 195 196 setOperationAction(ISD::VASTART, MVT::Other, Custom); 197 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 198 setOperationAction(ISD::VAEND, MVT::Other, Expand); 199 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 200 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 201 202 // FP Constants can't be immediates. 203 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 204 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 205 206 // We don't support sin/cos/fmod/copysign 207 setOperationAction(ISD::FSIN , MVT::f64, Expand); 208 setOperationAction(ISD::FSIN , MVT::f32, Expand); 209 setOperationAction(ISD::FCOS , MVT::f32, Expand); 210 setOperationAction(ISD::FCOS , MVT::f64, Expand); 211 setOperationAction(ISD::FREM , MVT::f64, Expand); 212 setOperationAction(ISD::FREM , MVT::f32, Expand); 213 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 214 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 215 216 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 217 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 218 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 219 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 220 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 221 222 setStackPointerRegisterToSaveRestore(ARM::SP); 223 224 setSchedulingPreference(SchedulingForRegPressure); 225 computeRegisterProperties(); 226} 227 228 229const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 230 switch (Opcode) { 231 default: return 0; 232 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 233 case ARMISD::WrapperCall: return "ARMISD::WrapperCall"; 234 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 235 case ARMISD::CALL: return "ARMISD::CALL"; 236 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 237 case ARMISD::tCALL: return "ARMISD::tCALL"; 238 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 239 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 240 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 241 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 242 case ARMISD::CMP: return "ARMISD::CMP"; 243 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 244 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 245 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 246 case ARMISD::CMOV: return "ARMISD::CMOV"; 247 case ARMISD::CNEG: return "ARMISD::CNEG"; 248 249 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 250 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 251 case ARMISD::SITOF: return "ARMISD::SITOF"; 252 case ARMISD::UITOF: return "ARMISD::UITOF"; 253 case ARMISD::MULHILOU: return "ARMISD::MULHILOU"; 254 case ARMISD::MULHILOS: return "ARMISD::MULHILOS"; 255 256 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 257 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 258 case ARMISD::RRX: return "ARMISD::RRX"; 259 260 case ARMISD::FMRRD: return "ARMISD::FMRRD"; 261 case ARMISD::FMDRR: return "ARMISD::FMDRR"; 262 } 263} 264 265//===----------------------------------------------------------------------===// 266// Lowering Code 267//===----------------------------------------------------------------------===// 268 269 270/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 271static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 272 switch (CC) { 273 default: assert(0 && "Unknown condition code!"); 274 case ISD::SETNE: return ARMCC::NE; 275 case ISD::SETEQ: return ARMCC::EQ; 276 case ISD::SETGT: return ARMCC::GT; 277 case ISD::SETGE: return ARMCC::GE; 278 case ISD::SETLT: return ARMCC::LT; 279 case ISD::SETLE: return ARMCC::LE; 280 case ISD::SETUGT: return ARMCC::HI; 281 case ISD::SETUGE: return ARMCC::HS; 282 case ISD::SETULT: return ARMCC::LO; 283 case ISD::SETULE: return ARMCC::LS; 284 } 285} 286 287/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. It 288/// returns true if the operands should be inverted to form the proper 289/// comparison. 290static bool FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 291 ARMCC::CondCodes &CondCode2) { 292 bool Invert = false; 293 CondCode2 = ARMCC::AL; 294 switch (CC) { 295 default: assert(0 && "Unknown FP condition!"); 296 case ISD::SETEQ: 297 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 298 case ISD::SETGT: 299 case ISD::SETOGT: CondCode = ARMCC::GT; break; 300 case ISD::SETGE: 301 case ISD::SETOGE: CondCode = ARMCC::GE; break; 302 case ISD::SETOLT: CondCode = ARMCC::MI; break; 303 case ISD::SETOLE: CondCode = ARMCC::GT; Invert = true; break; 304 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 305 case ISD::SETO: CondCode = ARMCC::VC; break; 306 case ISD::SETUO: CondCode = ARMCC::VS; break; 307 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 308 case ISD::SETUGT: CondCode = ARMCC::HI; break; 309 case ISD::SETUGE: CondCode = ARMCC::PL; break; 310 case ISD::SETLT: 311 case ISD::SETULT: CondCode = ARMCC::LT; break; 312 case ISD::SETLE: 313 case ISD::SETULE: CondCode = ARMCC::LE; break; 314 case ISD::SETNE: 315 case ISD::SETUNE: CondCode = ARMCC::NE; break; 316 } 317 return Invert; 318} 319 320static void 321HowToPassArgument(MVT::ValueType ObjectVT, 322 unsigned NumGPRs, unsigned &ObjSize, unsigned &ObjGPRs) { 323 ObjSize = 0; 324 ObjGPRs = 0; 325 326 switch (ObjectVT) { 327 default: assert(0 && "Unhandled argument type!"); 328 case MVT::i32: 329 case MVT::f32: 330 if (NumGPRs < 4) 331 ObjGPRs = 1; 332 else 333 ObjSize = 4; 334 break; 335 case MVT::i64: 336 case MVT::f64: 337 if (NumGPRs < 3) 338 ObjGPRs = 2; 339 else if (NumGPRs == 3) { 340 ObjGPRs = 1; 341 ObjSize = 4; 342 } else 343 ObjSize = 8; 344 } 345} 346 347// This transforms a ISD::CALL node into a 348// callseq_star <- ARMISD:CALL <- callseq_end 349// chain 350SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { 351 MVT::ValueType RetVT= Op.Val->getValueType(0); 352 SDOperand Chain = Op.getOperand(0); 353 unsigned CallConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); 354 assert((CallConv == CallingConv::C || 355 CallConv == CallingConv::CSRet || 356 CallConv == CallingConv::Fast) && "unknown calling convention"); 357 SDOperand Callee = Op.getOperand(4); 358 unsigned NumOps = (Op.getNumOperands() - 5) / 2; 359 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 360 unsigned NumGPRs = 0; // GPRs used for parameter passing. 361 362 // Count how many bytes are to be pushed on the stack. 363 unsigned NumBytes = 0; 364 365 // Add up all the space actually used. 366 for (unsigned i = 0; i < NumOps; ++i) { 367 unsigned ObjSize = 0; 368 unsigned ObjGPRs = 0; 369 MVT::ValueType ObjectVT = Op.getOperand(5+2*i).getValueType(); 370 HowToPassArgument(ObjectVT, NumGPRs, ObjSize, ObjGPRs); 371 NumBytes += ObjSize; 372 NumGPRs += ObjGPRs; 373 } 374 375 // Adjust the stack pointer for the new arguments... 376 // These operations are automatically eliminated by the prolog/epilog pass 377 Chain = DAG.getCALLSEQ_START(Chain, 378 DAG.getConstant(NumBytes, MVT::i32)); 379 380 SDOperand StackPtr = DAG.getRegister(ARM::SP, MVT::i32); 381 382 static const unsigned GPRArgRegs[] = { 383 ARM::R0, ARM::R1, ARM::R2, ARM::R3 384 }; 385 386 NumGPRs = 0; 387 std::vector<std::pair<unsigned, SDOperand> > RegsToPass; 388 std::vector<SDOperand> MemOpChains; 389 for (unsigned i = 0; i != NumOps; ++i) { 390 SDOperand Arg = Op.getOperand(5+2*i); 391 MVT::ValueType ArgVT = Arg.getValueType(); 392 393 unsigned ObjSize = 0; 394 unsigned ObjGPRs = 0; 395 HowToPassArgument(ArgVT, NumGPRs, ObjSize, ObjGPRs); 396 if (ObjGPRs > 0) { 397 switch (ArgVT) { 398 default: assert(0 && "Unexpected ValueType for argument!"); 399 case MVT::i32: 400 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Arg)); 401 break; 402 case MVT::f32: 403 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], 404 DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Arg))); 405 break; 406 case MVT::i64: { 407 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Arg, 408 DAG.getConstant(0, getPointerTy())); 409 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Arg, 410 DAG.getConstant(1, getPointerTy())); 411 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Lo)); 412 if (ObjGPRs == 2) 413 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs+1], Hi)); 414 else { 415 SDOperand PtrOff= DAG.getConstant(ArgOffset, StackPtr.getValueType()); 416 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff); 417 MemOpChains.push_back(DAG.getStore(Chain, Hi, PtrOff, NULL, 0)); 418 } 419 break; 420 } 421 case MVT::f64: { 422 SDOperand Cvt = DAG.getNode(ARMISD::FMRRD, 423 DAG.getVTList(MVT::i32, MVT::i32), 424 &Arg, 1); 425 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Cvt)); 426 if (ObjGPRs == 2) 427 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs+1], 428 Cvt.getValue(1))); 429 else { 430 SDOperand PtrOff= DAG.getConstant(ArgOffset, StackPtr.getValueType()); 431 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff); 432 MemOpChains.push_back(DAG.getStore(Chain, Cvt.getValue(1), PtrOff, 433 NULL, 0)); 434 } 435 break; 436 } 437 } 438 } else { 439 assert(ObjSize != 0); 440 SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); 441 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff); 442 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); 443 } 444 445 NumGPRs += ObjGPRs; 446 ArgOffset += ObjSize; 447 } 448 449 if (!MemOpChains.empty()) 450 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, 451 &MemOpChains[0], MemOpChains.size()); 452 453 // Build a sequence of copy-to-reg nodes chained together with token chain 454 // and flag operands which copy the outgoing args into the appropriate regs. 455 SDOperand InFlag; 456 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 457 Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, 458 InFlag); 459 InFlag = Chain.getValue(1); 460 } 461 462 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 463 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 464 // node so that legalize doesn't hack it. 465 bool isDirect = false; 466 bool isARMFunc = false; 467 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 468 GlobalValue *GV = G->getGlobal(); 469 Callee = DAG.getTargetGlobalAddress(GV, getPointerTy()); 470 isDirect = true; 471 bool isExt = (GV->isExternal() || GV->hasWeakLinkage() || 472 GV->hasLinkOnceLinkage()); 473 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 474 getTargetMachine().getRelocationModel() != Reloc::Static; 475 isARMFunc = !Subtarget->isThumb() || isStub; 476 // Wrap it since tBX takes a register source operand. 477 if (isARMFunc && Subtarget->isThumb() && !Subtarget->hasV5TOps()) 478 Callee = DAG.getNode(ARMISD::WrapperCall, MVT::i32, Callee); 479 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 480 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); 481 isDirect = true; 482 bool isStub = Subtarget->isTargetDarwin() && 483 getTargetMachine().getRelocationModel() != Reloc::Static; 484 isARMFunc = !Subtarget->isThumb() || isStub; 485 // Wrap it since tBX takes a register source operand. 486 if (!Subtarget->hasV5TOps() && Subtarget->isThumb()) 487 Callee = DAG.getNode(ARMISD::WrapperCall, MVT::i32, Callee); 488 } 489 490 std::vector<MVT::ValueType> NodeTys; 491 NodeTys.push_back(MVT::Other); // Returns a chain 492 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. 493 494 std::vector<SDOperand> Ops; 495 Ops.push_back(Chain); 496 Ops.push_back(Callee); 497 498 // Add argument registers to the end of the list so that they are known live 499 // into the call. 500 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 501 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 502 RegsToPass[i].second.getValueType())); 503 504 // FIXME: handle tail calls differently. 505 unsigned CallOpc; 506 if (Subtarget->isThumb()) { 507 if (!Subtarget->hasV5TOps() && (!isDirect || isARMFunc)) 508 CallOpc = ARMISD::CALL_NOLINK; 509 else 510 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 511 } else { 512 CallOpc = (isDirect || Subtarget->hasV5TOps()) 513 ? ARMISD::CALL : ARMISD::CALL_NOLINK; 514 } 515 if (InFlag.Val) 516 Ops.push_back(InFlag); 517 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size()); 518 InFlag = Chain.getValue(1); 519 520 SDOperand CSOps[] = { Chain, DAG.getConstant(NumBytes, MVT::i32), InFlag }; 521 Chain = DAG.getNode(ISD::CALLSEQ_END, 522 DAG.getNodeValueTypes(MVT::Other, MVT::Flag), 523 ((RetVT != MVT::Other) ? 2 : 1), CSOps, 3); 524 if (RetVT != MVT::Other) 525 InFlag = Chain.getValue(1); 526 527 std::vector<SDOperand> ResultVals; 528 NodeTys.clear(); 529 530 // If the call has results, copy the values out of the ret val registers. 531 switch (RetVT) { 532 default: assert(0 && "Unexpected ret value!"); 533 case MVT::Other: 534 break; 535 case MVT::i32: 536 Chain = DAG.getCopyFromReg(Chain, ARM::R0, MVT::i32, InFlag).getValue(1); 537 ResultVals.push_back(Chain.getValue(0)); 538 if (Op.Val->getValueType(1) == MVT::i32) { 539 // Returns a i64 value. 540 Chain = DAG.getCopyFromReg(Chain, ARM::R1, MVT::i32, 541 Chain.getValue(2)).getValue(1); 542 ResultVals.push_back(Chain.getValue(0)); 543 NodeTys.push_back(MVT::i32); 544 } 545 NodeTys.push_back(MVT::i32); 546 break; 547 case MVT::f32: 548 Chain = DAG.getCopyFromReg(Chain, ARM::R0, MVT::i32, InFlag).getValue(1); 549 ResultVals.push_back(DAG.getNode(ISD::BIT_CONVERT, MVT::f32, 550 Chain.getValue(0))); 551 NodeTys.push_back(MVT::f32); 552 break; 553 case MVT::f64: { 554 SDOperand Lo = DAG.getCopyFromReg(Chain, ARM::R0, MVT::i32, InFlag); 555 SDOperand Hi = DAG.getCopyFromReg(Lo, ARM::R1, MVT::i32, Lo.getValue(2)); 556 ResultVals.push_back(DAG.getNode(ARMISD::FMDRR, MVT::f64, Lo, Hi)); 557 NodeTys.push_back(MVT::f64); 558 break; 559 } 560 } 561 562 NodeTys.push_back(MVT::Other); 563 564 if (ResultVals.empty()) 565 return Chain; 566 567 ResultVals.push_back(Chain); 568 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, &ResultVals[0], 569 ResultVals.size()); 570 return Res.getValue(Op.ResNo); 571} 572 573static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { 574 SDOperand Copy; 575 SDOperand Chain = Op.getOperand(0); 576 switch(Op.getNumOperands()) { 577 default: 578 assert(0 && "Do not know how to return this many arguments!"); 579 abort(); 580 case 1: { 581 SDOperand LR = DAG.getRegister(ARM::LR, MVT::i32); 582 return DAG.getNode(ARMISD::RET_FLAG, MVT::Other, Chain); 583 } 584 case 3: 585 Op = Op.getOperand(1); 586 if (Op.getValueType() == MVT::f32) { 587 Op = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Op); 588 } else if (Op.getValueType() == MVT::f64) { 589 // Recursively legalize f64 -> i64. 590 Op = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Op); 591 return DAG.getNode(ISD::RET, MVT::Other, Chain, Op, 592 DAG.getConstant(0, MVT::i32)); 593 } 594 Copy = DAG.getCopyToReg(Chain, ARM::R0, Op, SDOperand()); 595 if (DAG.getMachineFunction().liveout_empty()) 596 DAG.getMachineFunction().addLiveOut(ARM::R0); 597 break; 598 case 5: 599 Copy = DAG.getCopyToReg(Chain, ARM::R1, Op.getOperand(3), SDOperand()); 600 Copy = DAG.getCopyToReg(Copy, ARM::R0, Op.getOperand(1), Copy.getValue(1)); 601 // If we haven't noted the R0+R1 are live out, do so now. 602 if (DAG.getMachineFunction().liveout_empty()) { 603 DAG.getMachineFunction().addLiveOut(ARM::R0); 604 DAG.getMachineFunction().addLiveOut(ARM::R1); 605 } 606 break; 607 } 608 609 //We must use RET_FLAG instead of BRIND because BRIND doesn't have a flag 610 return DAG.getNode(ARMISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1)); 611} 612 613// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 614// their target countpart wrapped in the ARMISD::Wrapper node. Suppose N is 615// one of the above mentioned nodes. It has to be wrapped because otherwise 616// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 617// be used to form addressing mode. These wrapped nodes will be selected 618// into MOVri. 619static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { 620 MVT::ValueType PtrVT = Op.getValueType(); 621 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 622 SDOperand Res; 623 if (CP->isMachineConstantPoolEntry()) 624 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 625 CP->getAlignment()); 626 else 627 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 628 CP->getAlignment()); 629 return DAG.getNode(ARMISD::Wrapper, MVT::i32, Res); 630} 631 632/// GVIsIndirectSymbol - true if the GV will be accessed via an indirect symbol 633/// even in dynamic-no-pic mode. 634static bool GVIsIndirectSymbol(GlobalValue *GV) { 635 return (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() || 636 (GV->isExternal() && !GV->hasNotBeenReadFromBytecode())); 637} 638 639SDOperand ARMTargetLowering::LowerGlobalAddress(SDOperand Op, 640 SelectionDAG &DAG) { 641 MVT::ValueType PtrVT = getPointerTy(); 642 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 643 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 644 bool IsIndirect = Subtarget->isTargetDarwin() && GVIsIndirectSymbol(GV); 645 SDOperand CPAddr; 646 if (RelocM == Reloc::Static) 647 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 2); 648 else { 649 unsigned PCAdj = (RelocM != Reloc::PIC_) 650 ? 0 : (Subtarget->isThumb() ? 4 : 8); 651 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMPCLabelIndex, 652 IsIndirect, PCAdj); 653 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 2); 654 } 655 CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr); 656 657 SDOperand Result = DAG.getLoad(PtrVT, DAG.getEntryNode(), CPAddr, NULL, 0); 658 SDOperand Chain = Result.getValue(1); 659 660 if (RelocM == Reloc::PIC_) { 661 SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); 662 Result = DAG.getNode(ARMISD::PIC_ADD, PtrVT, Result, PICLabel); 663 } 664 if (IsIndirect) 665 Result = DAG.getLoad(PtrVT, Chain, Result, NULL, 0); 666 667 return Result; 668} 669 670static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, 671 unsigned VarArgsFrameIndex) { 672 // vastart just stores the address of the VarArgsFrameIndex slot into the 673 // memory location argument. 674 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 675 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); 676 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2)); 677 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV->getValue(), 678 SV->getOffset()); 679} 680 681static SDOperand LowerFORMAL_ARGUMENT(SDOperand Op, SelectionDAG &DAG, 682 unsigned *vRegs, unsigned ArgNo, 683 unsigned &NumGPRs, unsigned &ArgOffset) { 684 MachineFunction &MF = DAG.getMachineFunction(); 685 MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType(); 686 SDOperand Root = Op.getOperand(0); 687 std::vector<SDOperand> ArgValues; 688 SSARegMap *RegMap = MF.getSSARegMap(); 689 690 static const unsigned GPRArgRegs[] = { 691 ARM::R0, ARM::R1, ARM::R2, ARM::R3 692 }; 693 694 unsigned ObjSize = 0; 695 unsigned ObjGPRs = 0; 696 HowToPassArgument(ObjectVT, NumGPRs, ObjSize, ObjGPRs); 697 698 SDOperand ArgValue; 699 if (ObjGPRs == 1) { 700 unsigned VReg = RegMap->createVirtualRegister(&ARM::GPRRegClass); 701 MF.addLiveIn(GPRArgRegs[NumGPRs], VReg); 702 vRegs[NumGPRs] = VReg; 703 ArgValue = DAG.getCopyFromReg(Root, VReg, MVT::i32); 704 if (ObjectVT == MVT::f32) 705 ArgValue = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, ArgValue); 706 } else if (ObjGPRs == 2) { 707 unsigned VReg = RegMap->createVirtualRegister(&ARM::GPRRegClass); 708 MF.addLiveIn(GPRArgRegs[NumGPRs], VReg); 709 vRegs[NumGPRs] = VReg; 710 ArgValue = DAG.getCopyFromReg(Root, VReg, MVT::i32); 711 712 VReg = RegMap->createVirtualRegister(&ARM::GPRRegClass); 713 MF.addLiveIn(GPRArgRegs[NumGPRs+1], VReg); 714 vRegs[NumGPRs+1] = VReg; 715 SDOperand ArgValue2 = DAG.getCopyFromReg(Root, VReg, MVT::i32); 716 717 if (ObjectVT == MVT::i64) 718 ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2); 719 else 720 ArgValue = DAG.getNode(ARMISD::FMDRR, MVT::f64, ArgValue, ArgValue2); 721 } 722 NumGPRs += ObjGPRs; 723 724 if (ObjSize) { 725 // If the argument is actually used, emit a load from the right stack 726 // slot. 727 if (!Op.Val->hasNUsesOfValue(0, ArgNo)) { 728 MachineFrameInfo *MFI = MF.getFrameInfo(); 729 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); 730 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32); 731 if (ObjGPRs == 0) 732 ArgValue = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0); 733 else { 734 SDOperand ArgValue2 = 735 DAG.getLoad(MVT::i32, Root, FIN, NULL, 0); 736 if (ObjectVT == MVT::i64) 737 ArgValue= DAG.getNode(ISD::BUILD_PAIR, MVT::i64, ArgValue, ArgValue2); 738 else 739 ArgValue= DAG.getNode(ARMISD::FMDRR, MVT::f64, ArgValue, ArgValue2); 740 } 741 } else { 742 // Don't emit a dead load. 743 ArgValue = DAG.getNode(ISD::UNDEF, ObjectVT); 744 } 745 746 ArgOffset += ObjSize; // Move on to the next argument. 747 } 748 749 return ArgValue; 750} 751 752SDOperand 753ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { 754 std::vector<SDOperand> ArgValues; 755 SDOperand Root = Op.getOperand(0); 756 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot 757 unsigned NumGPRs = 0; // GPRs used for parameter passing. 758 unsigned VRegs[4]; 759 760 unsigned NumArgs = Op.Val->getNumValues()-1; 761 for (unsigned ArgNo = 0; ArgNo < NumArgs; ++ArgNo) 762 ArgValues.push_back(LowerFORMAL_ARGUMENT(Op, DAG, VRegs, ArgNo, 763 NumGPRs, ArgOffset)); 764 765 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; 766 if (isVarArg) { 767 static const unsigned GPRArgRegs[] = { 768 ARM::R0, ARM::R1, ARM::R2, ARM::R3 769 }; 770 771 MachineFunction &MF = DAG.getMachineFunction(); 772 SSARegMap *RegMap = MF.getSSARegMap(); 773 MachineFrameInfo *MFI = MF.getFrameInfo(); 774 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 775 unsigned VARegSaveSize = (4 - NumGPRs) * 4; 776 if (VARegSaveSize) { 777 // If this function is vararg, store any remaining integer argument regs 778 // to their spots on the stack so that they may be loaded by deferencing 779 // the result of va_next. 780 AFI->setVarArgsRegSaveSize(VARegSaveSize); 781 VarArgsFrameIndex = MFI->CreateFixedObject(VARegSaveSize, ArgOffset); 782 SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); 783 784 SmallVector<SDOperand, 4> MemOps; 785 for (; NumGPRs < 4; ++NumGPRs) { 786 unsigned VReg = RegMap->createVirtualRegister(&ARM::GPRRegClass); 787 MF.addLiveIn(GPRArgRegs[NumGPRs], VReg); 788 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i32); 789 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); 790 MemOps.push_back(Store); 791 FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, 792 DAG.getConstant(4, getPointerTy())); 793 } 794 if (!MemOps.empty()) 795 Root = DAG.getNode(ISD::TokenFactor, MVT::Other, 796 &MemOps[0], MemOps.size()); 797 } else 798 // This will point to the next argument passed via stack. 799 VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset); 800 } 801 802 ArgValues.push_back(Root); 803 804 // Return the new list of results. 805 std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(), 806 Op.Val->value_end()); 807 return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size()); 808} 809 810/// isFloatingPointZero - Return true if this is +0.0. 811static bool isFloatingPointZero(SDOperand Op) { 812 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 813 return CFP->isExactlyValue(0.0); 814 else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) { 815 // Maybe this has already been legalized into the constant pool? 816 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 817 SDOperand WrapperOp = Op.getOperand(1).getOperand(0); 818 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 819 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 820 return CFP->isExactlyValue(0.0); 821 } 822 } 823 return false; 824} 825 826static bool isLegalCmpImmediate(int C, bool isThumb) { 827 return ( isThumb && (C & ~255U) == 0) || 828 (!isThumb && ARM_AM::getSOImmVal(C) != -1); 829} 830 831/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 832/// the given operands. 833static SDOperand getARMCmp(SDOperand LHS, SDOperand RHS, ISD::CondCode CC, 834 SDOperand &ARMCC, SelectionDAG &DAG, bool isThumb) { 835 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.Val)) { 836 int C = (int)RHSC->getValue(); 837 if (!isLegalCmpImmediate(C, isThumb)) { 838 // Constant does not fit, try adjusting it by one? 839 switch (CC) { 840 default: break; 841 case ISD::SETLT: 842 case ISD::SETULT: 843 case ISD::SETGE: 844 case ISD::SETUGE: 845 if (isLegalCmpImmediate(C-1, isThumb)) { 846 switch (CC) { 847 default: break; 848 case ISD::SETLT: CC = ISD::SETLE; break; 849 case ISD::SETULT: CC = ISD::SETULE; break; 850 case ISD::SETGE: CC = ISD::SETGT; break; 851 case ISD::SETUGE: CC = ISD::SETUGT; break; 852 } 853 RHS = DAG.getConstant(C-1, MVT::i32); 854 } 855 break; 856 case ISD::SETLE: 857 case ISD::SETULE: 858 case ISD::SETGT: 859 case ISD::SETUGT: 860 if (isLegalCmpImmediate(C+1, isThumb)) { 861 switch (CC) { 862 default: break; 863 case ISD::SETLE: CC = ISD::SETLT; break; 864 case ISD::SETULE: CC = ISD::SETULT; break; 865 case ISD::SETGT: CC = ISD::SETGE; break; 866 case ISD::SETUGT: CC = ISD::SETUGE; break; 867 } 868 RHS = DAG.getConstant(C+1, MVT::i32); 869 } 870 break; 871 } 872 } 873 } 874 875 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 876 ARMCC = DAG.getConstant(CondCode, MVT::i32); 877 return DAG.getNode(ARMISD::CMP, MVT::Flag, LHS, RHS); 878} 879 880/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 881static SDOperand getVFPCmp(SDOperand LHS, SDOperand RHS, SelectionDAG &DAG) { 882 SDOperand Cmp; 883 if (!isFloatingPointZero(RHS)) 884 Cmp = DAG.getNode(ARMISD::CMPFP, MVT::Flag, LHS, RHS); 885 else 886 Cmp = DAG.getNode(ARMISD::CMPFPw0, MVT::Flag, LHS); 887 return DAG.getNode(ARMISD::FMSTAT, MVT::Flag, Cmp); 888} 889 890static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG, 891 const ARMSubtarget *ST) { 892 MVT::ValueType VT = Op.getValueType(); 893 SDOperand LHS = Op.getOperand(0); 894 SDOperand RHS = Op.getOperand(1); 895 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 896 SDOperand TrueVal = Op.getOperand(2); 897 SDOperand FalseVal = Op.getOperand(3); 898 899 if (LHS.getValueType() == MVT::i32) { 900 SDOperand ARMCC; 901 SDOperand Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb()); 902 return DAG.getNode(ARMISD::CMOV, VT, FalseVal, TrueVal, ARMCC, Cmp); 903 } 904 905 ARMCC::CondCodes CondCode, CondCode2; 906 if (FPCCToARMCC(CC, CondCode, CondCode2)) 907 std::swap(TrueVal, FalseVal); 908 909 SDOperand ARMCC = DAG.getConstant(CondCode, MVT::i32); 910 SDOperand Cmp = getVFPCmp(LHS, RHS, DAG); 911 SDOperand Result = DAG.getNode(ARMISD::CMOV, VT, FalseVal, TrueVal, 912 ARMCC, Cmp); 913 if (CondCode2 != ARMCC::AL) { 914 SDOperand ARMCC2 = DAG.getConstant(CondCode2, MVT::i32); 915 // FIXME: Needs another CMP because flag can have but one use. 916 SDOperand Cmp2 = getVFPCmp(LHS, RHS, DAG); 917 Result = DAG.getNode(ARMISD::CMOV, VT, Result, TrueVal, ARMCC2, Cmp2); 918 } 919 return Result; 920} 921 922static SDOperand LowerBR_CC(SDOperand Op, SelectionDAG &DAG, 923 const ARMSubtarget *ST) { 924 SDOperand Chain = Op.getOperand(0); 925 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 926 SDOperand LHS = Op.getOperand(2); 927 SDOperand RHS = Op.getOperand(3); 928 SDOperand Dest = Op.getOperand(4); 929 930 if (LHS.getValueType() == MVT::i32) { 931 SDOperand ARMCC; 932 SDOperand Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb()); 933 return DAG.getNode(ARMISD::BRCOND, MVT::Other, Chain, Dest, ARMCC, Cmp); 934 } 935 936 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 937 ARMCC::CondCodes CondCode, CondCode2; 938 if (FPCCToARMCC(CC, CondCode, CondCode2)) 939 // Swap the LHS/RHS of the comparison if needed. 940 std::swap(LHS, RHS); 941 942 SDOperand Cmp = getVFPCmp(LHS, RHS, DAG); 943 SDOperand ARMCC = DAG.getConstant(CondCode, MVT::i32); 944 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag); 945 SDOperand Ops[] = { Chain, Dest, ARMCC, Cmp }; 946 SDOperand Res = DAG.getNode(ARMISD::BRCOND, VTList, Ops, 4); 947 if (CondCode2 != ARMCC::AL) { 948 ARMCC = DAG.getConstant(CondCode2, MVT::i32); 949 SDOperand Ops[] = { Res, Dest, ARMCC, Res.getValue(1) }; 950 Res = DAG.getNode(ARMISD::BRCOND, VTList, Ops, 4); 951 } 952 return Res; 953} 954 955SDOperand ARMTargetLowering::LowerBR_JT(SDOperand Op, SelectionDAG &DAG) { 956 SDOperand Chain = Op.getOperand(0); 957 SDOperand Table = Op.getOperand(1); 958 SDOperand Index = Op.getOperand(2); 959 960 MVT::ValueType PTy = getPointerTy(); 961 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 962 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 963 SDOperand UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 964 SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 965 Table = DAG.getNode(ARMISD::WrapperJT, MVT::i32, JTI, UId); 966 Index = DAG.getNode(ISD::MUL, PTy, Index, DAG.getConstant(4, PTy)); 967 SDOperand Addr = DAG.getNode(ISD::ADD, PTy, Index, Table); 968 bool isPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_; 969 Addr = DAG.getLoad(isPIC ? MVT::i32 : PTy, Chain, Addr, NULL, 0); 970 Chain = Addr.getValue(1); 971 if (isPIC) 972 Addr = DAG.getNode(ISD::ADD, PTy, Addr, Table); 973 return DAG.getNode(ARMISD::BR_JT, MVT::Other, Chain, Addr, JTI, UId); 974} 975 976static SDOperand LowerFP_TO_INT(SDOperand Op, SelectionDAG &DAG) { 977 unsigned Opc = 978 Op.getOpcode() == ISD::FP_TO_SINT ? ARMISD::FTOSI : ARMISD::FTOUI; 979 Op = DAG.getNode(Opc, MVT::f32, Op.getOperand(0)); 980 return DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Op); 981} 982 983static SDOperand LowerINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { 984 MVT::ValueType VT = Op.getValueType(); 985 unsigned Opc = 986 Op.getOpcode() == ISD::SINT_TO_FP ? ARMISD::SITOF : ARMISD::UITOF; 987 988 Op = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, Op.getOperand(0)); 989 return DAG.getNode(Opc, VT, Op); 990} 991 992static SDOperand LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { 993 // Implement fcopysign with a fabs and a conditional fneg. 994 SDOperand Tmp0 = Op.getOperand(0); 995 SDOperand Tmp1 = Op.getOperand(1); 996 MVT::ValueType VT = Op.getValueType(); 997 MVT::ValueType SrcVT = Tmp1.getValueType(); 998 SDOperand AbsVal = DAG.getNode(ISD::FABS, VT, Tmp0); 999 SDOperand Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG); 1000 SDOperand ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32); 1001 return DAG.getNode(ARMISD::CNEG, VT, AbsVal, AbsVal, ARMCC, Cmp); 1002} 1003 1004static SDOperand LowerBIT_CONVERT(SDOperand Op, SelectionDAG &DAG) { 1005 // Turn f64->i64 into FMRRD. 1006 assert(Op.getValueType() == MVT::i64 && 1007 Op.getOperand(0).getValueType() == MVT::f64); 1008 1009 Op = Op.getOperand(0); 1010 SDOperand Cvt = DAG.getNode(ARMISD::FMRRD, DAG.getVTList(MVT::i32, MVT::i32), 1011 &Op, 1); 1012 1013 // Merge the pieces into a single i64 value. 1014 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Cvt, Cvt.getValue(1)); 1015} 1016 1017static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG) { 1018 // FIXME: All this code is target-independent. Create a new target-indep 1019 // MULHILO node and move this code to the legalizer. 1020 // 1021 assert(Op.getValueType() == MVT::i64 && "Only handles i64 expand right now!"); 1022 1023 SDOperand LL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 1024 DAG.getConstant(0, MVT::i32)); 1025 SDOperand RL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(1), 1026 DAG.getConstant(0, MVT::i32)); 1027 1028 const TargetLowering &TL = DAG.getTargetLoweringInfo(); 1029 unsigned LHSSB = TL.ComputeNumSignBits(Op.getOperand(0)); 1030 unsigned RHSSB = TL.ComputeNumSignBits(Op.getOperand(1)); 1031 1032 SDOperand Lo, Hi; 1033 // Figure out how to lower this multiply. 1034 if (LHSSB >= 33 && RHSSB >= 33) { 1035 // If the input values are both sign extended, we can emit a mulhs+mul. 1036 Lo = DAG.getNode(ISD::MUL, MVT::i32, LL, RL); 1037 Hi = DAG.getNode(ISD::MULHS, MVT::i32, LL, RL); 1038 } else if (LHSSB == 32 && RHSSB == 32 && 1039 TL.MaskedValueIsZero(Op.getOperand(0), 0xFFFFFFFF00000000ULL) && 1040 TL.MaskedValueIsZero(Op.getOperand(1), 0xFFFFFFFF00000000ULL)) { 1041 // If the inputs are zero extended, use mulhu. 1042 Lo = DAG.getNode(ISD::MUL, MVT::i32, LL, RL); 1043 Hi = DAG.getNode(ISD::MULHU, MVT::i32, LL, RL); 1044 } else { 1045 SDOperand LH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 1046 DAG.getConstant(1, MVT::i32)); 1047 SDOperand RH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(1), 1048 DAG.getConstant(1, MVT::i32)); 1049 1050 // Lo,Hi = umul LHS, RHS. 1051 SDOperand Ops[] = { LL, RL }; 1052 SDOperand UMul64 = DAG.getNode(ARMISD::MULHILOU, 1053 DAG.getVTList(MVT::i32, MVT::i32), Ops, 2); 1054 Lo = UMul64; 1055 Hi = UMul64.getValue(1); 1056 RH = DAG.getNode(ISD::MUL, MVT::i32, LL, RH); 1057 LH = DAG.getNode(ISD::MUL, MVT::i32, LH, RL); 1058 Hi = DAG.getNode(ISD::ADD, MVT::i32, Hi, RH); 1059 Hi = DAG.getNode(ISD::ADD, MVT::i32, Hi, LH); 1060 } 1061 1062 // Merge the pieces into a single i64 value. 1063 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi); 1064} 1065 1066static SDOperand LowerMULHU(SDOperand Op, SelectionDAG &DAG) { 1067 SDOperand Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 1068 return DAG.getNode(ARMISD::MULHILOU, 1069 DAG.getVTList(MVT::i32, MVT::i32), Ops, 2).getValue(1); 1070} 1071 1072static SDOperand LowerMULHS(SDOperand Op, SelectionDAG &DAG) { 1073 SDOperand Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 1074 return DAG.getNode(ARMISD::MULHILOS, 1075 DAG.getVTList(MVT::i32, MVT::i32), Ops, 2).getValue(1); 1076} 1077 1078static SDOperand LowerSRx(SDOperand Op, SelectionDAG &DAG, 1079 const ARMSubtarget *ST) { 1080 assert(Op.getValueType() == MVT::i64 && 1081 (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) && 1082 "Unknown shift to lower!"); 1083 1084 // We only lower SRA, SRL of 1 here, all others use generic lowering. 1085 if (!isa<ConstantSDNode>(Op.getOperand(1)) || 1086 cast<ConstantSDNode>(Op.getOperand(1))->getValue() != 1) 1087 return SDOperand(); 1088 1089 // If we are in thumb mode, we don't have RRX. 1090 if (ST->isThumb()) return SDOperand(); 1091 1092 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 1093 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 1094 DAG.getConstant(0, MVT::i32)); 1095 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op.getOperand(0), 1096 DAG.getConstant(1, MVT::i32)); 1097 1098 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 1099 // captures the result into a carry flag. 1100 unsigned Opc = Op.getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 1101 Hi = DAG.getNode(Opc, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1); 1102 1103 // The low part is an ARMISD::RRX operand, which shifts the carry in. 1104 Lo = DAG.getNode(ARMISD::RRX, MVT::i32, Lo, Hi.getValue(1)); 1105 1106 // Merge the pieces into a single i64 value. 1107 return DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Lo, Hi); 1108} 1109 1110SDOperand ARMTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { 1111 switch (Op.getOpcode()) { 1112 default: assert(0 && "Don't know how to custom lower this!"); abort(); 1113 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 1114 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 1115 case ISD::CALL: return LowerCALL(Op, DAG); 1116 case ISD::RET: return LowerRET(Op, DAG); 1117 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, Subtarget); 1118 case ISD::BR_CC: return LowerBR_CC(Op, DAG, Subtarget); 1119 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 1120 case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex); 1121 case ISD::SINT_TO_FP: 1122 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 1123 case ISD::FP_TO_SINT: 1124 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 1125 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 1126 case ISD::BIT_CONVERT: return LowerBIT_CONVERT(Op, DAG); 1127 case ISD::MUL: return LowerMUL(Op, DAG); 1128 case ISD::MULHU: return LowerMULHU(Op, DAG); 1129 case ISD::MULHS: return LowerMULHS(Op, DAG); 1130 case ISD::SRL: 1131 case ISD::SRA: return LowerSRx(Op, DAG, Subtarget); 1132 case ISD::FORMAL_ARGUMENTS: 1133 return LowerFORMAL_ARGUMENTS(Op, DAG); 1134 } 1135} 1136 1137//===----------------------------------------------------------------------===// 1138// ARM Scheduler Hooks 1139//===----------------------------------------------------------------------===// 1140 1141MachineBasicBlock * 1142ARMTargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI, 1143 MachineBasicBlock *BB) { 1144 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 1145 switch (MI->getOpcode()) { 1146 default: assert(false && "Unexpected instr type to insert"); 1147 case ARM::tMOVCCr: { 1148 // To "insert" a SELECT_CC instruction, we actually have to insert the 1149 // diamond control-flow pattern. The incoming instruction knows the 1150 // destination vreg to set, the condition code register to branch on, the 1151 // true/false values to select between, and a branch opcode to use. 1152 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1153 ilist<MachineBasicBlock>::iterator It = BB; 1154 ++It; 1155 1156 // thisMBB: 1157 // ... 1158 // TrueVal = ... 1159 // cmpTY ccX, r1, r2 1160 // bCC copy1MBB 1161 // fallthrough --> copy0MBB 1162 MachineBasicBlock *thisMBB = BB; 1163 MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB); 1164 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB); 1165 BuildMI(BB, TII->get(ARM::tBcc)).addMBB(sinkMBB) 1166 .addImm(MI->getOperand(3).getImm()); 1167 MachineFunction *F = BB->getParent(); 1168 F->getBasicBlockList().insert(It, copy0MBB); 1169 F->getBasicBlockList().insert(It, sinkMBB); 1170 // Update machine-CFG edges by first adding all successors of the current 1171 // block to the new block which will contain the Phi node for the select. 1172 for(MachineBasicBlock::succ_iterator i = BB->succ_begin(), 1173 e = BB->succ_end(); i != e; ++i) 1174 sinkMBB->addSuccessor(*i); 1175 // Next, remove all successors of the current block, and add the true 1176 // and fallthrough blocks as its successors. 1177 while(!BB->succ_empty()) 1178 BB->removeSuccessor(BB->succ_begin()); 1179 BB->addSuccessor(copy0MBB); 1180 BB->addSuccessor(sinkMBB); 1181 1182 // copy0MBB: 1183 // %FalseValue = ... 1184 // # fallthrough to sinkMBB 1185 BB = copy0MBB; 1186 1187 // Update machine-CFG edges 1188 BB->addSuccessor(sinkMBB); 1189 1190 // sinkMBB: 1191 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1192 // ... 1193 BB = sinkMBB; 1194 BuildMI(BB, TII->get(ARM::PHI), MI->getOperand(0).getReg()) 1195 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 1196 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 1197 1198 delete MI; // The pseudo instruction is gone now. 1199 return BB; 1200 } 1201 } 1202} 1203 1204//===----------------------------------------------------------------------===// 1205// ARM Optimization Hooks 1206//===----------------------------------------------------------------------===// 1207 1208/// isLegalAddressImmediate - Return true if the integer value or 1209/// GlobalValue can be used as the offset of the target addressing mode. 1210bool ARMTargetLowering::isLegalAddressImmediate(int64_t V) const { 1211 // ARM allows a 12-bit immediate field. 1212 return V == V & ((1LL << 12) - 1); 1213} 1214 1215bool ARMTargetLowering::isLegalAddressImmediate(GlobalValue *GV) const { 1216 return false; 1217} 1218 1219static bool getIndexedAddressParts(SDNode *Ptr, MVT::ValueType VT, 1220 bool isSEXTLoad, SDOperand &Base, 1221 SDOperand &Offset, bool &isInc, 1222 SelectionDAG &DAG) { 1223 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 1224 return false; 1225 1226 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 1227 // AddressingMode 3 1228 Base = Ptr->getOperand(0); 1229 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 1230 int RHSC = (int)RHS->getValue(); 1231 if (RHSC < 0 && RHSC > -256) { 1232 isInc = false; 1233 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 1234 return true; 1235 } 1236 } 1237 isInc = (Ptr->getOpcode() == ISD::ADD); 1238 Offset = Ptr->getOperand(1); 1239 return true; 1240 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 1241 // AddressingMode 2 1242 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 1243 int RHSC = (int)RHS->getValue(); 1244 if (RHSC < 0 && RHSC > -0x1000) { 1245 isInc = false; 1246 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 1247 Base = Ptr->getOperand(0); 1248 return true; 1249 } 1250 } 1251 1252 if (Ptr->getOpcode() == ISD::ADD) { 1253 isInc = true; 1254 ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0)); 1255 if (ShOpcVal != ARM_AM::no_shift) { 1256 Base = Ptr->getOperand(1); 1257 Offset = Ptr->getOperand(0); 1258 } else { 1259 Base = Ptr->getOperand(0); 1260 Offset = Ptr->getOperand(1); 1261 } 1262 return true; 1263 } 1264 1265 isInc = (Ptr->getOpcode() == ISD::ADD); 1266 Base = Ptr->getOperand(0); 1267 Offset = Ptr->getOperand(1); 1268 return true; 1269 } 1270 1271 // FIXME: Use FLDM / FSTM to emulate indexed FP load / store. 1272 return false; 1273} 1274 1275/// getPreIndexedAddressParts - returns true by value, base pointer and 1276/// offset pointer and addressing mode by reference if the node's address 1277/// can be legally represented as pre-indexed load / store address. 1278bool 1279ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, 1280 SDOperand &Offset, 1281 ISD::MemIndexedMode &AM, 1282 SelectionDAG &DAG) { 1283 if (Subtarget->isThumb()) 1284 return false; 1285 1286 MVT::ValueType VT; 1287 SDOperand Ptr; 1288 bool isSEXTLoad = false; 1289 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1290 Ptr = LD->getBasePtr(); 1291 VT = LD->getLoadedVT(); 1292 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 1293 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1294 Ptr = ST->getBasePtr(); 1295 VT = ST->getStoredVT(); 1296 } else 1297 return false; 1298 1299 bool isInc; 1300 bool isLegal = getIndexedAddressParts(Ptr.Val, VT, isSEXTLoad, Base, Offset, 1301 isInc, DAG); 1302 if (isLegal) { 1303 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 1304 return true; 1305 } 1306 return false; 1307} 1308 1309/// getPostIndexedAddressParts - returns true by value, base pointer and 1310/// offset pointer and addressing mode by reference if this node can be 1311/// combined with a load / store to form a post-indexed load / store. 1312bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 1313 SDOperand &Base, 1314 SDOperand &Offset, 1315 ISD::MemIndexedMode &AM, 1316 SelectionDAG &DAG) { 1317 if (Subtarget->isThumb()) 1318 return false; 1319 1320 MVT::ValueType VT; 1321 SDOperand Ptr; 1322 bool isSEXTLoad = false; 1323 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1324 VT = LD->getLoadedVT(); 1325 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 1326 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1327 VT = ST->getStoredVT(); 1328 } else 1329 return false; 1330 1331 bool isInc; 1332 bool isLegal = getIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 1333 isInc, DAG); 1334 if (isLegal) { 1335 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 1336 return true; 1337 } 1338 return false; 1339} 1340 1341void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 1342 uint64_t Mask, 1343 uint64_t &KnownZero, 1344 uint64_t &KnownOne, 1345 unsigned Depth) const { 1346 KnownZero = 0; 1347 KnownOne = 0; 1348 switch (Op.getOpcode()) { 1349 default: break; 1350 case ARMISD::CMOV: { 1351 // Bits are known zero/one if known on the LHS and RHS. 1352 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 1353 if (KnownZero == 0 && KnownOne == 0) return; 1354 1355 uint64_t KnownZeroRHS, KnownOneRHS; 1356 ComputeMaskedBits(Op.getOperand(1), Mask, 1357 KnownZeroRHS, KnownOneRHS, Depth+1); 1358 KnownZero &= KnownZeroRHS; 1359 KnownOne &= KnownOneRHS; 1360 return; 1361 } 1362 } 1363} 1364 1365//===----------------------------------------------------------------------===// 1366// ARM Inline Assembly Support 1367//===----------------------------------------------------------------------===// 1368 1369/// getConstraintType - Given a constraint letter, return the type of 1370/// constraint it is for this target. 1371ARMTargetLowering::ConstraintType 1372ARMTargetLowering::getConstraintType(char ConstraintLetter) const { 1373 switch (ConstraintLetter) { 1374 case 'l': 1375 return C_RegisterClass; 1376 default: return TargetLowering::getConstraintType(ConstraintLetter); 1377 } 1378} 1379 1380std::pair<unsigned, const TargetRegisterClass*> 1381ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 1382 MVT::ValueType VT) const { 1383 if (Constraint.size() == 1) { 1384 // GCC RS6000 Constraint Letters 1385 switch (Constraint[0]) { 1386 case 'l': 1387 // FIXME: in thumb mode, 'l' is only low-regs. 1388 // FALL THROUGH. 1389 case 'r': 1390 return std::make_pair(0U, ARM::GPRRegisterClass); 1391 break; 1392 } 1393 } 1394 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 1395} 1396 1397std::vector<unsigned> ARMTargetLowering:: 1398getRegClassForInlineAsmConstraint(const std::string &Constraint, 1399 MVT::ValueType VT) const { 1400 if (Constraint.size() != 1) 1401 return std::vector<unsigned>(); 1402 1403 switch (Constraint[0]) { // GCC ARM Constraint Letters 1404 default: break; 1405 case 'l': 1406 case 'r': 1407 return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3, 1408 ARM::R4, ARM::R5, ARM::R6, ARM::R7, 1409 ARM::R8, ARM::R9, ARM::R10, ARM::R11, 1410 ARM::R12, ARM::LR, 0); 1411 } 1412 1413 return std::vector<unsigned>(); 1414} 1415