ARMISelDAGToDAG.cpp revision 18f30e6f5e80787808fe1455742452a5210afe07
1//===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines an instruction selector for the ARM target. 11// 12//===----------------------------------------------------------------------===// 13 14#include "ARM.h" 15#include "ARMAddressingModes.h" 16#include "ARMTargetMachine.h" 17#include "llvm/CallingConv.h" 18#include "llvm/Constants.h" 19#include "llvm/DerivedTypes.h" 20#include "llvm/Function.h" 21#include "llvm/Intrinsics.h" 22#include "llvm/LLVMContext.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/MachineFunction.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/SelectionDAG.h" 27#include "llvm/CodeGen/SelectionDAGISel.h" 28#include "llvm/Target/TargetLowering.h" 29#include "llvm/Target/TargetOptions.h" 30#include "llvm/Support/CommandLine.h" 31#include "llvm/Support/Compiler.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/Support/ErrorHandling.h" 34#include "llvm/Support/raw_ostream.h" 35 36using namespace llvm; 37 38static cl::opt<bool> 39UseRegSeq("neon-reg-sequence", cl::Hidden, 40 cl::desc("Use reg_sequence to model ld / st of multiple neon regs"), 41 cl::init(true)); 42 43//===--------------------------------------------------------------------===// 44/// ARMDAGToDAGISel - ARM specific code to select ARM machine 45/// instructions for SelectionDAG operations. 46/// 47namespace { 48class ARMDAGToDAGISel : public SelectionDAGISel { 49 ARMBaseTargetMachine &TM; 50 51 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 52 /// make the right decision when generating code for different targets. 53 const ARMSubtarget *Subtarget; 54 55public: 56 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, 57 CodeGenOpt::Level OptLevel) 58 : SelectionDAGISel(tm, OptLevel), TM(tm), 59 Subtarget(&TM.getSubtarget<ARMSubtarget>()) { 60 } 61 62 virtual const char *getPassName() const { 63 return "ARM Instruction Selection"; 64 } 65 66 /// getI32Imm - Return a target constant of type i32 with the specified 67 /// value. 68 inline SDValue getI32Imm(unsigned Imm) { 69 return CurDAG->getTargetConstant(Imm, MVT::i32); 70 } 71 72 SDNode *Select(SDNode *N); 73 74 bool SelectShifterOperandReg(SDNode *Op, SDValue N, SDValue &A, 75 SDValue &B, SDValue &C); 76 bool SelectAddrMode2(SDNode *Op, SDValue N, SDValue &Base, 77 SDValue &Offset, SDValue &Opc); 78 bool SelectAddrMode2Offset(SDNode *Op, SDValue N, 79 SDValue &Offset, SDValue &Opc); 80 bool SelectAddrMode3(SDNode *Op, SDValue N, SDValue &Base, 81 SDValue &Offset, SDValue &Opc); 82 bool SelectAddrMode3Offset(SDNode *Op, SDValue N, 83 SDValue &Offset, SDValue &Opc); 84 bool SelectAddrMode4(SDNode *Op, SDValue N, SDValue &Addr, 85 SDValue &Mode); 86 bool SelectAddrMode5(SDNode *Op, SDValue N, SDValue &Base, 87 SDValue &Offset); 88 bool SelectAddrMode6(SDNode *Op, SDValue N, SDValue &Addr, SDValue &Align); 89 90 bool SelectAddrModePC(SDNode *Op, SDValue N, SDValue &Offset, 91 SDValue &Label); 92 93 bool SelectThumbAddrModeRR(SDNode *Op, SDValue N, SDValue &Base, 94 SDValue &Offset); 95 bool SelectThumbAddrModeRI5(SDNode *Op, SDValue N, unsigned Scale, 96 SDValue &Base, SDValue &OffImm, 97 SDValue &Offset); 98 bool SelectThumbAddrModeS1(SDNode *Op, SDValue N, SDValue &Base, 99 SDValue &OffImm, SDValue &Offset); 100 bool SelectThumbAddrModeS2(SDNode *Op, SDValue N, SDValue &Base, 101 SDValue &OffImm, SDValue &Offset); 102 bool SelectThumbAddrModeS4(SDNode *Op, SDValue N, SDValue &Base, 103 SDValue &OffImm, SDValue &Offset); 104 bool SelectThumbAddrModeSP(SDNode *Op, SDValue N, SDValue &Base, 105 SDValue &OffImm); 106 107 bool SelectT2ShifterOperandReg(SDNode *Op, SDValue N, 108 SDValue &BaseReg, SDValue &Opc); 109 bool SelectT2AddrModeImm12(SDNode *Op, SDValue N, SDValue &Base, 110 SDValue &OffImm); 111 bool SelectT2AddrModeImm8(SDNode *Op, SDValue N, SDValue &Base, 112 SDValue &OffImm); 113 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N, 114 SDValue &OffImm); 115 bool SelectT2AddrModeImm8s4(SDNode *Op, SDValue N, SDValue &Base, 116 SDValue &OffImm); 117 bool SelectT2AddrModeSoReg(SDNode *Op, SDValue N, SDValue &Base, 118 SDValue &OffReg, SDValue &ShImm); 119 120 // Include the pieces autogenerated from the target description. 121#include "ARMGenDAGISel.inc" 122 123private: 124 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for 125 /// ARM. 126 SDNode *SelectARMIndexedLoad(SDNode *N); 127 SDNode *SelectT2IndexedLoad(SDNode *N); 128 129 /// SelectVLD - Select NEON load intrinsics. NumVecs should be 130 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for 131 /// loads of D registers and even subregs and odd subregs of Q registers. 132 /// For NumVecs <= 2, QOpcodes1 is not used. 133 SDNode *SelectVLD(SDNode *N, unsigned NumVecs, unsigned *DOpcodes, 134 unsigned *QOpcodes0, unsigned *QOpcodes1); 135 136 /// SelectVST - Select NEON store intrinsics. NumVecs should 137 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for 138 /// stores of D registers and even subregs and odd subregs of Q registers. 139 /// For NumVecs <= 2, QOpcodes1 is not used. 140 SDNode *SelectVST(SDNode *N, unsigned NumVecs, unsigned *DOpcodes, 141 unsigned *QOpcodes0, unsigned *QOpcodes1); 142 143 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should 144 /// be 2, 3 or 4. The opcode arrays specify the instructions used for 145 /// load/store of D registers and even subregs and odd subregs of Q registers. 146 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad, unsigned NumVecs, 147 unsigned *DOpcodes, unsigned *QOpcodes0, 148 unsigned *QOpcodes1); 149 150 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM. 151 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned); 152 153 /// SelectCMOVOp - Select CMOV instructions for ARM. 154 SDNode *SelectCMOVOp(SDNode *N); 155 SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 156 ARMCC::CondCodes CCVal, SDValue CCR, 157 SDValue InFlag); 158 SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 159 ARMCC::CondCodes CCVal, SDValue CCR, 160 SDValue InFlag); 161 SDNode *SelectT2CMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 162 ARMCC::CondCodes CCVal, SDValue CCR, 163 SDValue InFlag); 164 SDNode *SelectARMCMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 165 ARMCC::CondCodes CCVal, SDValue CCR, 166 SDValue InFlag); 167 168 SDNode *SelectConcatVector(SDNode *N); 169 170 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 171 /// inline asm expressions. 172 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 173 char ConstraintCode, 174 std::vector<SDValue> &OutOps); 175 176 /// PairDRegs - Form a quad register from a pair of D registers. 177 /// 178 SDNode *PairDRegs(EVT VT, SDValue V0, SDValue V1); 179 180 /// PairDRegs - Form a quad register pair from a pair of Q registers. 181 /// 182 SDNode *PairQRegs(EVT VT, SDValue V0, SDValue V1); 183 184 /// QuadDRegs - Form a quad register pair from a quad of D registers. 185 /// 186 SDNode *QuadDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3); 187 188 /// QuadQRegs - Form 4 consecutive Q registers. 189 /// 190 SDNode *QuadQRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3); 191 192 /// OctoDRegs - Form 8 consecutive D registers. 193 /// 194 SDNode *OctoDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3, 195 SDValue V4, SDValue V5, SDValue V6, SDValue V7); 196}; 197} 198 199/// isInt32Immediate - This method tests to see if the node is a 32-bit constant 200/// operand. If so Imm will receive the 32-bit value. 201static bool isInt32Immediate(SDNode *N, unsigned &Imm) { 202 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) { 203 Imm = cast<ConstantSDNode>(N)->getZExtValue(); 204 return true; 205 } 206 return false; 207} 208 209// isInt32Immediate - This method tests to see if a constant operand. 210// If so Imm will receive the 32 bit value. 211static bool isInt32Immediate(SDValue N, unsigned &Imm) { 212 return isInt32Immediate(N.getNode(), Imm); 213} 214 215// isOpcWithIntImmediate - This method tests to see if the node is a specific 216// opcode and that it has a immediate integer right operand. 217// If so Imm will receive the 32 bit value. 218static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) { 219 return N->getOpcode() == Opc && 220 isInt32Immediate(N->getOperand(1).getNode(), Imm); 221} 222 223 224bool ARMDAGToDAGISel::SelectShifterOperandReg(SDNode *Op, 225 SDValue N, 226 SDValue &BaseReg, 227 SDValue &ShReg, 228 SDValue &Opc) { 229 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N); 230 231 // Don't match base register only case. That is matched to a separate 232 // lower complexity pattern with explicit register operand. 233 if (ShOpcVal == ARM_AM::no_shift) return false; 234 235 BaseReg = N.getOperand(0); 236 unsigned ShImmVal = 0; 237 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 238 ShReg = CurDAG->getRegister(0, MVT::i32); 239 ShImmVal = RHS->getZExtValue() & 31; 240 } else { 241 ShReg = N.getOperand(1); 242 } 243 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal), 244 MVT::i32); 245 return true; 246} 247 248bool ARMDAGToDAGISel::SelectAddrMode2(SDNode *Op, SDValue N, 249 SDValue &Base, SDValue &Offset, 250 SDValue &Opc) { 251 if (N.getOpcode() == ISD::MUL) { 252 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 253 // X * [3,5,9] -> X + X * [2,4,8] etc. 254 int RHSC = (int)RHS->getZExtValue(); 255 if (RHSC & 1) { 256 RHSC = RHSC & ~1; 257 ARM_AM::AddrOpc AddSub = ARM_AM::add; 258 if (RHSC < 0) { 259 AddSub = ARM_AM::sub; 260 RHSC = - RHSC; 261 } 262 if (isPowerOf2_32(RHSC)) { 263 unsigned ShAmt = Log2_32(RHSC); 264 Base = Offset = N.getOperand(0); 265 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, 266 ARM_AM::lsl), 267 MVT::i32); 268 return true; 269 } 270 } 271 } 272 } 273 274 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) { 275 Base = N; 276 if (N.getOpcode() == ISD::FrameIndex) { 277 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 278 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 279 } else if (N.getOpcode() == ARMISD::Wrapper && 280 !(Subtarget->useMovt() && 281 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 282 Base = N.getOperand(0); 283 } 284 Offset = CurDAG->getRegister(0, MVT::i32); 285 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0, 286 ARM_AM::no_shift), 287 MVT::i32); 288 return true; 289 } 290 291 // Match simple R +/- imm12 operands. 292 if (N.getOpcode() == ISD::ADD) 293 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 294 int RHSC = (int)RHS->getZExtValue(); 295 if ((RHSC >= 0 && RHSC < 0x1000) || 296 (RHSC < 0 && RHSC > -0x1000)) { // 12 bits. 297 Base = N.getOperand(0); 298 if (Base.getOpcode() == ISD::FrameIndex) { 299 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 300 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 301 } 302 Offset = CurDAG->getRegister(0, MVT::i32); 303 304 ARM_AM::AddrOpc AddSub = ARM_AM::add; 305 if (RHSC < 0) { 306 AddSub = ARM_AM::sub; 307 RHSC = - RHSC; 308 } 309 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC, 310 ARM_AM::no_shift), 311 MVT::i32); 312 return true; 313 } 314 } 315 316 // Otherwise this is R +/- [possibly shifted] R. 317 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub; 318 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1)); 319 unsigned ShAmt = 0; 320 321 Base = N.getOperand(0); 322 Offset = N.getOperand(1); 323 324 if (ShOpcVal != ARM_AM::no_shift) { 325 // Check to see if the RHS of the shift is a constant, if not, we can't fold 326 // it. 327 if (ConstantSDNode *Sh = 328 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) { 329 ShAmt = Sh->getZExtValue(); 330 Offset = N.getOperand(1).getOperand(0); 331 } else { 332 ShOpcVal = ARM_AM::no_shift; 333 } 334 } 335 336 // Try matching (R shl C) + (R). 337 if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) { 338 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0)); 339 if (ShOpcVal != ARM_AM::no_shift) { 340 // Check to see if the RHS of the shift is a constant, if not, we can't 341 // fold it. 342 if (ConstantSDNode *Sh = 343 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) { 344 ShAmt = Sh->getZExtValue(); 345 Offset = N.getOperand(0).getOperand(0); 346 Base = N.getOperand(1); 347 } else { 348 ShOpcVal = ARM_AM::no_shift; 349 } 350 } 351 } 352 353 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal), 354 MVT::i32); 355 return true; 356} 357 358bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDNode *Op, SDValue N, 359 SDValue &Offset, SDValue &Opc) { 360 unsigned Opcode = Op->getOpcode(); 361 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 362 ? cast<LoadSDNode>(Op)->getAddressingMode() 363 : cast<StoreSDNode>(Op)->getAddressingMode(); 364 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) 365 ? ARM_AM::add : ARM_AM::sub; 366 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) { 367 int Val = (int)C->getZExtValue(); 368 if (Val >= 0 && Val < 0x1000) { // 12 bits. 369 Offset = CurDAG->getRegister(0, MVT::i32); 370 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val, 371 ARM_AM::no_shift), 372 MVT::i32); 373 return true; 374 } 375 } 376 377 Offset = N; 378 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N); 379 unsigned ShAmt = 0; 380 if (ShOpcVal != ARM_AM::no_shift) { 381 // Check to see if the RHS of the shift is a constant, if not, we can't fold 382 // it. 383 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 384 ShAmt = Sh->getZExtValue(); 385 Offset = N.getOperand(0); 386 } else { 387 ShOpcVal = ARM_AM::no_shift; 388 } 389 } 390 391 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal), 392 MVT::i32); 393 return true; 394} 395 396 397bool ARMDAGToDAGISel::SelectAddrMode3(SDNode *Op, SDValue N, 398 SDValue &Base, SDValue &Offset, 399 SDValue &Opc) { 400 if (N.getOpcode() == ISD::SUB) { 401 // X - C is canonicalize to X + -C, no need to handle it here. 402 Base = N.getOperand(0); 403 Offset = N.getOperand(1); 404 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32); 405 return true; 406 } 407 408 if (N.getOpcode() != ISD::ADD) { 409 Base = N; 410 if (N.getOpcode() == ISD::FrameIndex) { 411 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 412 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 413 } 414 Offset = CurDAG->getRegister(0, MVT::i32); 415 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32); 416 return true; 417 } 418 419 // If the RHS is +/- imm8, fold into addr mode. 420 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 421 int RHSC = (int)RHS->getZExtValue(); 422 if ((RHSC >= 0 && RHSC < 256) || 423 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed. 424 Base = N.getOperand(0); 425 if (Base.getOpcode() == ISD::FrameIndex) { 426 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 427 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 428 } 429 Offset = CurDAG->getRegister(0, MVT::i32); 430 431 ARM_AM::AddrOpc AddSub = ARM_AM::add; 432 if (RHSC < 0) { 433 AddSub = ARM_AM::sub; 434 RHSC = - RHSC; 435 } 436 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32); 437 return true; 438 } 439 } 440 441 Base = N.getOperand(0); 442 Offset = N.getOperand(1); 443 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32); 444 return true; 445} 446 447bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N, 448 SDValue &Offset, SDValue &Opc) { 449 unsigned Opcode = Op->getOpcode(); 450 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 451 ? cast<LoadSDNode>(Op)->getAddressingMode() 452 : cast<StoreSDNode>(Op)->getAddressingMode(); 453 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) 454 ? ARM_AM::add : ARM_AM::sub; 455 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) { 456 int Val = (int)C->getZExtValue(); 457 if (Val >= 0 && Val < 256) { 458 Offset = CurDAG->getRegister(0, MVT::i32); 459 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32); 460 return true; 461 } 462 } 463 464 Offset = N; 465 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32); 466 return true; 467} 468 469bool ARMDAGToDAGISel::SelectAddrMode4(SDNode *Op, SDValue N, 470 SDValue &Addr, SDValue &Mode) { 471 Addr = N; 472 Mode = CurDAG->getTargetConstant(0, MVT::i32); 473 return true; 474} 475 476bool ARMDAGToDAGISel::SelectAddrMode5(SDNode *Op, SDValue N, 477 SDValue &Base, SDValue &Offset) { 478 if (N.getOpcode() != ISD::ADD) { 479 Base = N; 480 if (N.getOpcode() == ISD::FrameIndex) { 481 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 482 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 483 } else if (N.getOpcode() == ARMISD::Wrapper && 484 !(Subtarget->useMovt() && 485 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 486 Base = N.getOperand(0); 487 } 488 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0), 489 MVT::i32); 490 return true; 491 } 492 493 // If the RHS is +/- imm8, fold into addr mode. 494 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 495 int RHSC = (int)RHS->getZExtValue(); 496 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied by 4. 497 RHSC >>= 2; 498 if ((RHSC >= 0 && RHSC < 256) || 499 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed. 500 Base = N.getOperand(0); 501 if (Base.getOpcode() == ISD::FrameIndex) { 502 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 503 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 504 } 505 506 ARM_AM::AddrOpc AddSub = ARM_AM::add; 507 if (RHSC < 0) { 508 AddSub = ARM_AM::sub; 509 RHSC = - RHSC; 510 } 511 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC), 512 MVT::i32); 513 return true; 514 } 515 } 516 } 517 518 Base = N; 519 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0), 520 MVT::i32); 521 return true; 522} 523 524bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Op, SDValue N, 525 SDValue &Addr, SDValue &Align) { 526 Addr = N; 527 // Default to no alignment. 528 Align = CurDAG->getTargetConstant(0, MVT::i32); 529 return true; 530} 531 532bool ARMDAGToDAGISel::SelectAddrModePC(SDNode *Op, SDValue N, 533 SDValue &Offset, SDValue &Label) { 534 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) { 535 Offset = N.getOperand(0); 536 SDValue N1 = N.getOperand(1); 537 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(), 538 MVT::i32); 539 return true; 540 } 541 return false; 542} 543 544bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDNode *Op, SDValue N, 545 SDValue &Base, SDValue &Offset){ 546 // FIXME dl should come from the parent load or store, not the address 547 DebugLoc dl = Op->getDebugLoc(); 548 if (N.getOpcode() != ISD::ADD) { 549 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N); 550 if (!NC || NC->getZExtValue() != 0) 551 return false; 552 553 Base = Offset = N; 554 return true; 555 } 556 557 Base = N.getOperand(0); 558 Offset = N.getOperand(1); 559 return true; 560} 561 562bool 563ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDNode *Op, SDValue N, 564 unsigned Scale, SDValue &Base, 565 SDValue &OffImm, SDValue &Offset) { 566 if (Scale == 4) { 567 SDValue TmpBase, TmpOffImm; 568 if (SelectThumbAddrModeSP(Op, N, TmpBase, TmpOffImm)) 569 return false; // We want to select tLDRspi / tSTRspi instead. 570 if (N.getOpcode() == ARMISD::Wrapper && 571 N.getOperand(0).getOpcode() == ISD::TargetConstantPool) 572 return false; // We want to select tLDRpci instead. 573 } 574 575 if (N.getOpcode() != ISD::ADD) { 576 if (N.getOpcode() == ARMISD::Wrapper && 577 !(Subtarget->useMovt() && 578 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 579 Base = N.getOperand(0); 580 } else 581 Base = N; 582 583 Offset = CurDAG->getRegister(0, MVT::i32); 584 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 585 return true; 586 } 587 588 // Thumb does not have [sp, r] address mode. 589 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0)); 590 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1)); 591 if ((LHSR && LHSR->getReg() == ARM::SP) || 592 (RHSR && RHSR->getReg() == ARM::SP)) { 593 Base = N; 594 Offset = CurDAG->getRegister(0, MVT::i32); 595 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 596 return true; 597 } 598 599 // If the RHS is + imm5 * scale, fold into addr mode. 600 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 601 int RHSC = (int)RHS->getZExtValue(); 602 if ((RHSC & (Scale-1)) == 0) { // The constant is implicitly multiplied. 603 RHSC /= Scale; 604 if (RHSC >= 0 && RHSC < 32) { 605 Base = N.getOperand(0); 606 Offset = CurDAG->getRegister(0, MVT::i32); 607 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 608 return true; 609 } 610 } 611 } 612 613 Base = N.getOperand(0); 614 Offset = N.getOperand(1); 615 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 616 return true; 617} 618 619bool ARMDAGToDAGISel::SelectThumbAddrModeS1(SDNode *Op, SDValue N, 620 SDValue &Base, SDValue &OffImm, 621 SDValue &Offset) { 622 return SelectThumbAddrModeRI5(Op, N, 1, Base, OffImm, Offset); 623} 624 625bool ARMDAGToDAGISel::SelectThumbAddrModeS2(SDNode *Op, SDValue N, 626 SDValue &Base, SDValue &OffImm, 627 SDValue &Offset) { 628 return SelectThumbAddrModeRI5(Op, N, 2, Base, OffImm, Offset); 629} 630 631bool ARMDAGToDAGISel::SelectThumbAddrModeS4(SDNode *Op, SDValue N, 632 SDValue &Base, SDValue &OffImm, 633 SDValue &Offset) { 634 return SelectThumbAddrModeRI5(Op, N, 4, Base, OffImm, Offset); 635} 636 637bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDNode *Op, SDValue N, 638 SDValue &Base, SDValue &OffImm) { 639 if (N.getOpcode() == ISD::FrameIndex) { 640 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 641 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 642 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 643 return true; 644 } 645 646 if (N.getOpcode() != ISD::ADD) 647 return false; 648 649 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0)); 650 if (N.getOperand(0).getOpcode() == ISD::FrameIndex || 651 (LHSR && LHSR->getReg() == ARM::SP)) { 652 // If the RHS is + imm8 * scale, fold into addr mode. 653 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 654 int RHSC = (int)RHS->getZExtValue(); 655 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied. 656 RHSC >>= 2; 657 if (RHSC >= 0 && RHSC < 256) { 658 Base = N.getOperand(0); 659 if (Base.getOpcode() == ISD::FrameIndex) { 660 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 661 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 662 } 663 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 664 return true; 665 } 666 } 667 } 668 } 669 670 return false; 671} 672 673bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDNode *Op, SDValue N, 674 SDValue &BaseReg, 675 SDValue &Opc) { 676 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N); 677 678 // Don't match base register only case. That is matched to a separate 679 // lower complexity pattern with explicit register operand. 680 if (ShOpcVal == ARM_AM::no_shift) return false; 681 682 BaseReg = N.getOperand(0); 683 unsigned ShImmVal = 0; 684 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 685 ShImmVal = RHS->getZExtValue() & 31; 686 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal)); 687 return true; 688 } 689 690 return false; 691} 692 693bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDNode *Op, SDValue N, 694 SDValue &Base, SDValue &OffImm) { 695 // Match simple R + imm12 operands. 696 697 // Base only. 698 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) { 699 if (N.getOpcode() == ISD::FrameIndex) { 700 // Match frame index... 701 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 702 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 703 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 704 return true; 705 } else if (N.getOpcode() == ARMISD::Wrapper && 706 !(Subtarget->useMovt() && 707 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 708 Base = N.getOperand(0); 709 if (Base.getOpcode() == ISD::TargetConstantPool) 710 return false; // We want to select t2LDRpci instead. 711 } else 712 Base = N; 713 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 714 return true; 715 } 716 717 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 718 if (SelectT2AddrModeImm8(Op, N, Base, OffImm)) 719 // Let t2LDRi8 handle (R - imm8). 720 return false; 721 722 int RHSC = (int)RHS->getZExtValue(); 723 if (N.getOpcode() == ISD::SUB) 724 RHSC = -RHSC; 725 726 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned) 727 Base = N.getOperand(0); 728 if (Base.getOpcode() == ISD::FrameIndex) { 729 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 730 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 731 } 732 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 733 return true; 734 } 735 } 736 737 // Base only. 738 Base = N; 739 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 740 return true; 741} 742 743bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDNode *Op, SDValue N, 744 SDValue &Base, SDValue &OffImm) { 745 // Match simple R - imm8 operands. 746 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::SUB) { 747 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 748 int RHSC = (int)RHS->getSExtValue(); 749 if (N.getOpcode() == ISD::SUB) 750 RHSC = -RHSC; 751 752 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative) 753 Base = N.getOperand(0); 754 if (Base.getOpcode() == ISD::FrameIndex) { 755 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 756 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 757 } 758 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 759 return true; 760 } 761 } 762 } 763 764 return false; 765} 766 767bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N, 768 SDValue &OffImm){ 769 unsigned Opcode = Op->getOpcode(); 770 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 771 ? cast<LoadSDNode>(Op)->getAddressingMode() 772 : cast<StoreSDNode>(Op)->getAddressingMode(); 773 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N)) { 774 int RHSC = (int)RHS->getZExtValue(); 775 if (RHSC >= 0 && RHSC < 0x100) { // 8 bits. 776 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC)) 777 ? CurDAG->getTargetConstant(RHSC, MVT::i32) 778 : CurDAG->getTargetConstant(-RHSC, MVT::i32); 779 return true; 780 } 781 } 782 783 return false; 784} 785 786bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDNode *Op, SDValue N, 787 SDValue &Base, SDValue &OffImm) { 788 if (N.getOpcode() == ISD::ADD) { 789 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 790 int RHSC = (int)RHS->getZExtValue(); 791 // 8 bits. 792 if (((RHSC & 0x3) == 0) && 793 ((RHSC >= 0 && RHSC < 0x400) || (RHSC < 0 && RHSC > -0x400))) { 794 Base = N.getOperand(0); 795 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 796 return true; 797 } 798 } 799 } else if (N.getOpcode() == ISD::SUB) { 800 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 801 int RHSC = (int)RHS->getZExtValue(); 802 // 8 bits. 803 if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) { 804 Base = N.getOperand(0); 805 OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32); 806 return true; 807 } 808 } 809 } 810 811 return false; 812} 813 814bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDNode *Op, SDValue N, 815 SDValue &Base, 816 SDValue &OffReg, SDValue &ShImm) { 817 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12. 818 if (N.getOpcode() != ISD::ADD) 819 return false; 820 821 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8. 822 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 823 int RHSC = (int)RHS->getZExtValue(); 824 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned) 825 return false; 826 else if (RHSC < 0 && RHSC >= -255) // 8 bits 827 return false; 828 } 829 830 // Look for (R + R) or (R + (R << [1,2,3])). 831 unsigned ShAmt = 0; 832 Base = N.getOperand(0); 833 OffReg = N.getOperand(1); 834 835 // Swap if it is ((R << c) + R). 836 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg); 837 if (ShOpcVal != ARM_AM::lsl) { 838 ShOpcVal = ARM_AM::getShiftOpcForNode(Base); 839 if (ShOpcVal == ARM_AM::lsl) 840 std::swap(Base, OffReg); 841 } 842 843 if (ShOpcVal == ARM_AM::lsl) { 844 // Check to see if the RHS of the shift is a constant, if not, we can't fold 845 // it. 846 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) { 847 ShAmt = Sh->getZExtValue(); 848 if (ShAmt >= 4) { 849 ShAmt = 0; 850 ShOpcVal = ARM_AM::no_shift; 851 } else 852 OffReg = OffReg.getOperand(0); 853 } else { 854 ShOpcVal = ARM_AM::no_shift; 855 } 856 } 857 858 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32); 859 860 return true; 861} 862 863//===--------------------------------------------------------------------===// 864 865/// getAL - Returns a ARMCC::AL immediate node. 866static inline SDValue getAL(SelectionDAG *CurDAG) { 867 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32); 868} 869 870SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) { 871 LoadSDNode *LD = cast<LoadSDNode>(N); 872 ISD::MemIndexedMode AM = LD->getAddressingMode(); 873 if (AM == ISD::UNINDEXED) 874 return NULL; 875 876 EVT LoadedVT = LD->getMemoryVT(); 877 SDValue Offset, AMOpc; 878 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); 879 unsigned Opcode = 0; 880 bool Match = false; 881 if (LoadedVT == MVT::i32 && 882 SelectAddrMode2Offset(N, LD->getOffset(), Offset, AMOpc)) { 883 Opcode = isPre ? ARM::LDR_PRE : ARM::LDR_POST; 884 Match = true; 885 } else if (LoadedVT == MVT::i16 && 886 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) { 887 Match = true; 888 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD) 889 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST) 890 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST); 891 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) { 892 if (LD->getExtensionType() == ISD::SEXTLOAD) { 893 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) { 894 Match = true; 895 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST; 896 } 897 } else { 898 if (SelectAddrMode2Offset(N, LD->getOffset(), Offset, AMOpc)) { 899 Match = true; 900 Opcode = isPre ? ARM::LDRB_PRE : ARM::LDRB_POST; 901 } 902 } 903 } 904 905 if (Match) { 906 SDValue Chain = LD->getChain(); 907 SDValue Base = LD->getBasePtr(); 908 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG), 909 CurDAG->getRegister(0, MVT::i32), Chain }; 910 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32, 911 MVT::Other, Ops, 6); 912 } 913 914 return NULL; 915} 916 917SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) { 918 LoadSDNode *LD = cast<LoadSDNode>(N); 919 ISD::MemIndexedMode AM = LD->getAddressingMode(); 920 if (AM == ISD::UNINDEXED) 921 return NULL; 922 923 EVT LoadedVT = LD->getMemoryVT(); 924 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD; 925 SDValue Offset; 926 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); 927 unsigned Opcode = 0; 928 bool Match = false; 929 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) { 930 switch (LoadedVT.getSimpleVT().SimpleTy) { 931 case MVT::i32: 932 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST; 933 break; 934 case MVT::i16: 935 if (isSExtLd) 936 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST; 937 else 938 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST; 939 break; 940 case MVT::i8: 941 case MVT::i1: 942 if (isSExtLd) 943 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST; 944 else 945 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST; 946 break; 947 default: 948 return NULL; 949 } 950 Match = true; 951 } 952 953 if (Match) { 954 SDValue Chain = LD->getChain(); 955 SDValue Base = LD->getBasePtr(); 956 SDValue Ops[]= { Base, Offset, getAL(CurDAG), 957 CurDAG->getRegister(0, MVT::i32), Chain }; 958 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32, 959 MVT::Other, Ops, 5); 960 } 961 962 return NULL; 963} 964 965/// PairDRegs - Form a quad register from a pair of D registers. 966/// 967SDNode *ARMDAGToDAGISel::PairDRegs(EVT VT, SDValue V0, SDValue V1) { 968 DebugLoc dl = V0.getNode()->getDebugLoc(); 969 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32); 970 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32); 971 if (llvm::ModelWithRegSequence()) { 972 const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 }; 973 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4); 974 } 975 SDValue Undef = 976 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0); 977 SDNode *Pair = CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 978 VT, Undef, V0, SubReg0); 979 return CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 980 VT, SDValue(Pair, 0), V1, SubReg1); 981} 982 983/// PairQRegs - Form 4 consecutive D registers from a pair of Q registers. 984/// 985SDNode *ARMDAGToDAGISel::PairQRegs(EVT VT, SDValue V0, SDValue V1) { 986 DebugLoc dl = V0.getNode()->getDebugLoc(); 987 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32); 988 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32); 989 const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 }; 990 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4); 991} 992 993/// QuadDRegs - Form 4 consecutive D registers. 994/// 995SDNode *ARMDAGToDAGISel::QuadDRegs(EVT VT, SDValue V0, SDValue V1, 996 SDValue V2, SDValue V3) { 997 DebugLoc dl = V0.getNode()->getDebugLoc(); 998 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32); 999 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32); 1000 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32); 1001 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32); 1002 const SDValue Ops[] = { V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, SubReg3 }; 1003 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 8); 1004} 1005 1006/// QuadQRegs - Form 4 consecutive Q registers. 1007/// 1008SDNode *ARMDAGToDAGISel::QuadQRegs(EVT VT, SDValue V0, SDValue V1, 1009 SDValue V2, SDValue V3) { 1010 DebugLoc dl = V0.getNode()->getDebugLoc(); 1011 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32); 1012 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32); 1013 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32); 1014 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32); 1015 const SDValue Ops[] = { V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, SubReg3 }; 1016 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 8); 1017} 1018 1019/// OctoDRegs - Form 8 consecutive D registers. 1020/// 1021SDNode *ARMDAGToDAGISel::OctoDRegs(EVT VT, SDValue V0, SDValue V1, 1022 SDValue V2, SDValue V3, 1023 SDValue V4, SDValue V5, 1024 SDValue V6, SDValue V7) { 1025 DebugLoc dl = V0.getNode()->getDebugLoc(); 1026 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32); 1027 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32); 1028 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32); 1029 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32); 1030 SDValue SubReg4 = CurDAG->getTargetConstant(ARM::dsub_4, MVT::i32); 1031 SDValue SubReg5 = CurDAG->getTargetConstant(ARM::dsub_5, MVT::i32); 1032 SDValue SubReg6 = CurDAG->getTargetConstant(ARM::dsub_6, MVT::i32); 1033 SDValue SubReg7 = CurDAG->getTargetConstant(ARM::dsub_7, MVT::i32); 1034 const SDValue Ops[] ={ V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, SubReg3, 1035 V4, SubReg4, V5, SubReg5, V6, SubReg6, V7, SubReg7 }; 1036 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 16); 1037} 1038 1039/// GetNEONSubregVT - Given a type for a 128-bit NEON vector, return the type 1040/// for a 64-bit subregister of the vector. 1041static EVT GetNEONSubregVT(EVT VT) { 1042 switch (VT.getSimpleVT().SimpleTy) { 1043 default: llvm_unreachable("unhandled NEON type"); 1044 case MVT::v16i8: return MVT::v8i8; 1045 case MVT::v8i16: return MVT::v4i16; 1046 case MVT::v4f32: return MVT::v2f32; 1047 case MVT::v4i32: return MVT::v2i32; 1048 case MVT::v2i64: return MVT::v1i64; 1049 } 1050} 1051 1052SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs, 1053 unsigned *DOpcodes, unsigned *QOpcodes0, 1054 unsigned *QOpcodes1) { 1055 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range"); 1056 DebugLoc dl = N->getDebugLoc(); 1057 1058 SDValue MemAddr, Align; 1059 if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, Align)) 1060 return NULL; 1061 1062 SDValue Chain = N->getOperand(0); 1063 EVT VT = N->getValueType(0); 1064 bool is64BitVector = VT.is64BitVector(); 1065 1066 unsigned OpcodeIndex; 1067 switch (VT.getSimpleVT().SimpleTy) { 1068 default: llvm_unreachable("unhandled vld type"); 1069 // Double-register operations: 1070 case MVT::v8i8: OpcodeIndex = 0; break; 1071 case MVT::v4i16: OpcodeIndex = 1; break; 1072 case MVT::v2f32: 1073 case MVT::v2i32: OpcodeIndex = 2; break; 1074 case MVT::v1i64: OpcodeIndex = 3; break; 1075 // Quad-register operations: 1076 case MVT::v16i8: OpcodeIndex = 0; break; 1077 case MVT::v8i16: OpcodeIndex = 1; break; 1078 case MVT::v4f32: 1079 case MVT::v4i32: OpcodeIndex = 2; break; 1080 case MVT::v2i64: OpcodeIndex = 3; 1081 assert(NumVecs == 1 && "v2i64 type only supported for VLD1"); 1082 break; 1083 } 1084 1085 SDValue Pred = getAL(CurDAG); 1086 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1087 if (is64BitVector) { 1088 unsigned Opc = DOpcodes[OpcodeIndex]; 1089 const SDValue Ops[] = { MemAddr, Align, Pred, Reg0, Chain }; 1090 std::vector<EVT> ResTys(NumVecs, VT); 1091 ResTys.push_back(MVT::Other); 1092 SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 5); 1093 if (!llvm::ModelWithRegSequence() || NumVecs < 2) 1094 return VLd; 1095 1096 SDValue RegSeq; 1097 SDValue V0 = SDValue(VLd, 0); 1098 SDValue V1 = SDValue(VLd, 1); 1099 1100 // Form a REG_SEQUENCE to force register allocation. 1101 if (NumVecs == 2) 1102 RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0); 1103 else { 1104 SDValue V2 = SDValue(VLd, 2); 1105 // If it's a vld3, form a quad D-register but discard the last part. 1106 SDValue V3 = (NumVecs == 3) 1107 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0) 1108 : SDValue(VLd, 3); 1109 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0); 1110 } 1111 1112 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering"); 1113 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) { 1114 SDValue D = CurDAG->getTargetExtractSubreg(ARM::dsub_0+Vec, 1115 dl, VT, RegSeq); 1116 ReplaceUses(SDValue(N, Vec), D); 1117 } 1118 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, NumVecs)); 1119 return NULL; 1120 } 1121 1122 EVT RegVT = GetNEONSubregVT(VT); 1123 if (NumVecs <= 2) { 1124 // Quad registers are directly supported for VLD1 and VLD2, 1125 // loading pairs of D regs. 1126 unsigned Opc = QOpcodes0[OpcodeIndex]; 1127 const SDValue Ops[] = { MemAddr, Align, Pred, Reg0, Chain }; 1128 std::vector<EVT> ResTys(2 * NumVecs, RegVT); 1129 ResTys.push_back(MVT::Other); 1130 SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 5); 1131 Chain = SDValue(VLd, 2 * NumVecs); 1132 1133 // Combine the even and odd subregs to produce the result. 1134 if (llvm::ModelWithRegSequence()) { 1135 if (NumVecs == 1) { 1136 SDNode *Q = PairDRegs(VT, SDValue(VLd, 0), SDValue(VLd, 1)); 1137 ReplaceUses(SDValue(N, 0), SDValue(Q, 0)); 1138 } else { 1139 SDValue QQ = SDValue(QuadDRegs(MVT::v4i64, 1140 SDValue(VLd, 0), SDValue(VLd, 1), 1141 SDValue(VLd, 2), SDValue(VLd, 3)), 0); 1142 SDValue Q0 = CurDAG->getTargetExtractSubreg(ARM::qsub_0, dl, VT, QQ); 1143 SDValue Q1 = CurDAG->getTargetExtractSubreg(ARM::qsub_1, dl, VT, QQ); 1144 ReplaceUses(SDValue(N, 0), Q0); 1145 ReplaceUses(SDValue(N, 1), Q1); 1146 } 1147 } else { 1148 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) { 1149 SDNode *Q = PairDRegs(VT, SDValue(VLd, 2*Vec), SDValue(VLd, 2*Vec+1)); 1150 ReplaceUses(SDValue(N, Vec), SDValue(Q, 0)); 1151 } 1152 } 1153 } else { 1154 // Otherwise, quad registers are loaded with two separate instructions, 1155 // where one loads the even registers and the other loads the odd registers. 1156 1157 std::vector<EVT> ResTys(NumVecs, RegVT); 1158 ResTys.push_back(MemAddr.getValueType()); 1159 ResTys.push_back(MVT::Other); 1160 1161 // Load the even subregs. 1162 unsigned Opc = QOpcodes0[OpcodeIndex]; 1163 const SDValue OpsA[] = { MemAddr, Align, Reg0, Pred, Reg0, Chain }; 1164 SDNode *VLdA = CurDAG->getMachineNode(Opc, dl, ResTys, OpsA, 6); 1165 Chain = SDValue(VLdA, NumVecs+1); 1166 1167 // Load the odd subregs. 1168 Opc = QOpcodes1[OpcodeIndex]; 1169 const SDValue OpsB[] = { SDValue(VLdA, NumVecs), 1170 Align, Reg0, Pred, Reg0, Chain }; 1171 SDNode *VLdB = CurDAG->getMachineNode(Opc, dl, ResTys, OpsB, 6); 1172 Chain = SDValue(VLdB, NumVecs+1); 1173 1174 if (llvm::ModelWithRegSequence()) { 1175 SDValue V0 = SDValue(VLdA, 0); 1176 SDValue V1 = SDValue(VLdB, 0); 1177 SDValue V2 = SDValue(VLdA, 1); 1178 SDValue V3 = SDValue(VLdB, 1); 1179 SDValue V4 = SDValue(VLdA, 2); 1180 SDValue V5 = SDValue(VLdB, 2); 1181 SDValue V6 = (NumVecs == 3) 1182 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,RegVT), 1183 0) 1184 : SDValue(VLdA, 3); 1185 SDValue V7 = (NumVecs == 3) 1186 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,RegVT), 1187 0) 1188 : SDValue(VLdB, 3); 1189 SDValue RegSeq = SDValue(OctoDRegs(MVT::v8i64, V0, V1, V2, V3, 1190 V4, V5, V6, V7), 0); 1191 1192 // Extract out the 3 / 4 Q registers. 1193 assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering"); 1194 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) { 1195 SDValue Q = CurDAG->getTargetExtractSubreg(ARM::qsub_0+Vec, 1196 dl, VT, RegSeq); 1197 ReplaceUses(SDValue(N, Vec), Q); 1198 } 1199 } else { 1200 // Combine the even and odd subregs to produce the result. 1201 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) { 1202 SDNode *Q = PairDRegs(VT, SDValue(VLdA, Vec), SDValue(VLdB, Vec)); 1203 ReplaceUses(SDValue(N, Vec), SDValue(Q, 0)); 1204 } 1205 } 1206 } 1207 ReplaceUses(SDValue(N, NumVecs), Chain); 1208 return NULL; 1209} 1210 1211SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs, 1212 unsigned *DOpcodes, unsigned *QOpcodes0, 1213 unsigned *QOpcodes1) { 1214 assert(NumVecs >=1 && NumVecs <= 4 && "VST NumVecs out-of-range"); 1215 DebugLoc dl = N->getDebugLoc(); 1216 1217 SDValue MemAddr, Align; 1218 if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, Align)) 1219 return NULL; 1220 1221 SDValue Chain = N->getOperand(0); 1222 EVT VT = N->getOperand(3).getValueType(); 1223 bool is64BitVector = VT.is64BitVector(); 1224 1225 unsigned OpcodeIndex; 1226 switch (VT.getSimpleVT().SimpleTy) { 1227 default: llvm_unreachable("unhandled vst type"); 1228 // Double-register operations: 1229 case MVT::v8i8: OpcodeIndex = 0; break; 1230 case MVT::v4i16: OpcodeIndex = 1; break; 1231 case MVT::v2f32: 1232 case MVT::v2i32: OpcodeIndex = 2; break; 1233 case MVT::v1i64: OpcodeIndex = 3; break; 1234 // Quad-register operations: 1235 case MVT::v16i8: OpcodeIndex = 0; break; 1236 case MVT::v8i16: OpcodeIndex = 1; break; 1237 case MVT::v4f32: 1238 case MVT::v4i32: OpcodeIndex = 2; break; 1239 case MVT::v2i64: OpcodeIndex = 3; 1240 assert(NumVecs == 1 && "v2i64 type only supported for VST1"); 1241 break; 1242 } 1243 1244 SDValue Pred = getAL(CurDAG); 1245 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1246 1247 SmallVector<SDValue, 10> Ops; 1248 Ops.push_back(MemAddr); 1249 Ops.push_back(Align); 1250 1251 if (is64BitVector) { 1252 if (llvm::ModelWithRegSequence() && NumVecs >= 2) { 1253 SDValue RegSeq; 1254 SDValue V0 = N->getOperand(0+3); 1255 SDValue V1 = N->getOperand(1+3); 1256 1257 // Form a REG_SEQUENCE to force register allocation. 1258 if (NumVecs == 2) 1259 RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0); 1260 else { 1261 SDValue V2 = N->getOperand(2+3); 1262 // If it's a vld3, form a quad D-register and leave the last part as 1263 // an undef. 1264 SDValue V3 = (NumVecs == 3) 1265 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0) 1266 : N->getOperand(3+3); 1267 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0); 1268 } 1269 1270 // Now extract the D registers back out. 1271 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, VT, 1272 RegSeq)); 1273 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, VT, 1274 RegSeq)); 1275 if (NumVecs > 2) 1276 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_2, dl, VT, 1277 RegSeq)); 1278 if (NumVecs > 3) 1279 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_3, dl, VT, 1280 RegSeq)); 1281 } else { 1282 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1283 Ops.push_back(N->getOperand(Vec+3)); 1284 } 1285 Ops.push_back(Pred); 1286 Ops.push_back(Reg0); // predicate register 1287 Ops.push_back(Chain); 1288 unsigned Opc = DOpcodes[OpcodeIndex]; 1289 return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), NumVecs+5); 1290 } 1291 1292 EVT RegVT = GetNEONSubregVT(VT); 1293 if (NumVecs <= 2) { 1294 // Quad registers are directly supported for VST1 and VST2, 1295 // storing pairs of D regs. 1296 unsigned Opc = QOpcodes0[OpcodeIndex]; 1297 if (llvm::ModelWithRegSequence() && NumVecs == 2) { 1298 // First extract the pair of Q registers. 1299 SDValue Q0 = N->getOperand(3); 1300 SDValue Q1 = N->getOperand(4); 1301 1302 // Form a QQ register. 1303 SDValue QQ = SDValue(PairQRegs(MVT::v4i64, Q0, Q1), 0); 1304 1305 // Now extract the D registers back out. 1306 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, RegVT, 1307 QQ)); 1308 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, RegVT, 1309 QQ)); 1310 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_2, dl, RegVT, 1311 QQ)); 1312 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_3, dl, RegVT, 1313 QQ)); 1314 Ops.push_back(Pred); 1315 Ops.push_back(Reg0); // predicate register 1316 Ops.push_back(Chain); 1317 return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), 5 + 4); 1318 } else { 1319 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) { 1320 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, RegVT, 1321 N->getOperand(Vec+3))); 1322 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, RegVT, 1323 N->getOperand(Vec+3))); 1324 } 1325 Ops.push_back(Pred); 1326 Ops.push_back(Reg0); // predicate register 1327 Ops.push_back(Chain); 1328 return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), 1329 5 + 2 * NumVecs); 1330 } 1331 } 1332 1333 // Otherwise, quad registers are stored with two separate instructions, 1334 // where one stores the even registers and the other stores the odd registers. 1335 if (llvm::ModelWithRegSequence()) { 1336 // Form the QQQQ REG_SEQUENCE. 1337 SDValue V[8]; 1338 for (unsigned Vec = 0, i = 0; Vec < NumVecs; ++Vec, i+=2) { 1339 V[i] = CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, RegVT, 1340 N->getOperand(Vec+3)); 1341 V[i+1] = CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, RegVT, 1342 N->getOperand(Vec+3)); 1343 } 1344 if (NumVecs == 3) 1345 V[6] = V[7] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1346 dl, RegVT), 0); 1347 1348 SDValue RegSeq = SDValue(OctoDRegs(MVT::v8i64, V[0], V[1], V[2], V[3], 1349 V[4], V[5], V[6], V[7]), 0); 1350 1351 // Store the even D registers. 1352 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering"); 1353 Ops.push_back(Reg0); // post-access address offset 1354 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1355 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0+Vec*2, dl, 1356 RegVT, RegSeq)); 1357 Ops.push_back(Pred); 1358 Ops.push_back(Reg0); // predicate register 1359 Ops.push_back(Chain); 1360 unsigned Opc = QOpcodes0[OpcodeIndex]; 1361 SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(), 1362 MVT::Other, Ops.data(), NumVecs+6); 1363 Chain = SDValue(VStA, 1); 1364 1365 // Store the odd D registers. 1366 Ops[0] = SDValue(VStA, 0); // MemAddr 1367 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1368 Ops[Vec+3] = CurDAG->getTargetExtractSubreg(ARM::dsub_1+Vec*2, dl, 1369 RegVT, RegSeq); 1370 Ops[NumVecs+5] = Chain; 1371 Opc = QOpcodes1[OpcodeIndex]; 1372 SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(), 1373 MVT::Other, Ops.data(), NumVecs+6); 1374 Chain = SDValue(VStB, 1); 1375 ReplaceUses(SDValue(N, 0), Chain); 1376 return NULL; 1377 } else { 1378 Ops.push_back(Reg0); // post-access address offset 1379 1380 // Store the even subregs. 1381 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1382 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, RegVT, 1383 N->getOperand(Vec+3))); 1384 Ops.push_back(Pred); 1385 Ops.push_back(Reg0); // predicate register 1386 Ops.push_back(Chain); 1387 unsigned Opc = QOpcodes0[OpcodeIndex]; 1388 SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(), 1389 MVT::Other, Ops.data(), NumVecs+6); 1390 Chain = SDValue(VStA, 1); 1391 1392 // Store the odd subregs. 1393 Ops[0] = SDValue(VStA, 0); // MemAddr 1394 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1395 Ops[Vec+3] = CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, RegVT, 1396 N->getOperand(Vec+3)); 1397 Ops[NumVecs+5] = Chain; 1398 Opc = QOpcodes1[OpcodeIndex]; 1399 SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(), 1400 MVT::Other, Ops.data(), NumVecs+6); 1401 Chain = SDValue(VStB, 1); 1402 ReplaceUses(SDValue(N, 0), Chain); 1403 return NULL; 1404 } 1405} 1406 1407SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, 1408 unsigned NumVecs, unsigned *DOpcodes, 1409 unsigned *QOpcodes0, 1410 unsigned *QOpcodes1) { 1411 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range"); 1412 DebugLoc dl = N->getDebugLoc(); 1413 1414 SDValue MemAddr, Align; 1415 if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, Align)) 1416 return NULL; 1417 1418 SDValue Chain = N->getOperand(0); 1419 unsigned Lane = 1420 cast<ConstantSDNode>(N->getOperand(NumVecs+3))->getZExtValue(); 1421 EVT VT = IsLoad ? N->getValueType(0) : N->getOperand(3).getValueType(); 1422 bool is64BitVector = VT.is64BitVector(); 1423 1424 // Quad registers are handled by load/store of subregs. Find the subreg info. 1425 unsigned NumElts = 0; 1426 int SubregIdx = 0; 1427 bool Even = false; 1428 EVT RegVT = VT; 1429 if (!is64BitVector) { 1430 RegVT = GetNEONSubregVT(VT); 1431 NumElts = RegVT.getVectorNumElements(); 1432 SubregIdx = (Lane < NumElts) ? ARM::dsub_0 : ARM::dsub_1; 1433 Even = Lane < NumElts; 1434 } 1435 1436 unsigned OpcodeIndex; 1437 switch (VT.getSimpleVT().SimpleTy) { 1438 default: llvm_unreachable("unhandled vld/vst lane type"); 1439 // Double-register operations: 1440 case MVT::v8i8: OpcodeIndex = 0; break; 1441 case MVT::v4i16: OpcodeIndex = 1; break; 1442 case MVT::v2f32: 1443 case MVT::v2i32: OpcodeIndex = 2; break; 1444 // Quad-register operations: 1445 case MVT::v8i16: OpcodeIndex = 0; break; 1446 case MVT::v4f32: 1447 case MVT::v4i32: OpcodeIndex = 1; break; 1448 } 1449 1450 SDValue Pred = getAL(CurDAG); 1451 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1452 1453 SmallVector<SDValue, 10> Ops; 1454 Ops.push_back(MemAddr); 1455 Ops.push_back(Align); 1456 1457 unsigned Opc = 0; 1458 if (is64BitVector) { 1459 Opc = DOpcodes[OpcodeIndex]; 1460 if (llvm::ModelWithRegSequence()) { 1461 SDValue RegSeq; 1462 SDValue V0 = N->getOperand(0+3); 1463 SDValue V1 = N->getOperand(1+3); 1464 if (NumVecs == 2) { 1465 RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0); 1466 } else { 1467 SDValue V2 = N->getOperand(2+3); 1468 SDValue V3 = (NumVecs == 3) 1469 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0) 1470 : N->getOperand(3+3); 1471 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0); 1472 } 1473 1474 // Now extract the D registers back out. 1475 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, VT, 1476 RegSeq)); 1477 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, VT, 1478 RegSeq)); 1479 if (NumVecs > 2) 1480 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_2, dl, VT, 1481 RegSeq)); 1482 if (NumVecs > 3) 1483 Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_3, dl, VT, 1484 RegSeq)); 1485 } else { 1486 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1487 Ops.push_back(N->getOperand(Vec+3)); 1488 } 1489 } else { 1490 // Check if this is loading the even or odd subreg of a Q register. 1491 if (Lane < NumElts) { 1492 Opc = QOpcodes0[OpcodeIndex]; 1493 } else { 1494 Lane -= NumElts; 1495 Opc = QOpcodes1[OpcodeIndex]; 1496 } 1497 1498 if (llvm::ModelWithRegSequence()) { 1499 SDValue RegSeq; 1500 SDValue V0 = N->getOperand(0+3); 1501 SDValue V1 = N->getOperand(1+3); 1502 if (NumVecs == 2) { 1503 RegSeq = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0); 1504 } else { 1505 SDValue V2 = N->getOperand(2+3); 1506 SDValue V3 = (NumVecs == 3) 1507 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0) 1508 : N->getOperand(3+3); 1509 RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0); 1510 } 1511 1512 // Extract the subregs of the input vector. 1513 unsigned SubIdx = Even ? ARM::dsub_0 : ARM::dsub_1; 1514 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1515 Ops.push_back(CurDAG->getTargetExtractSubreg(SubIdx+Vec*2, dl, RegVT, 1516 RegSeq)); 1517 } else { 1518 // Extract the subregs of the input vector. 1519 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1520 Ops.push_back(CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT, 1521 N->getOperand(Vec+3))); 1522 } 1523 } 1524 Ops.push_back(getI32Imm(Lane)); 1525 Ops.push_back(Pred); 1526 Ops.push_back(Reg0); 1527 Ops.push_back(Chain); 1528 1529 if (!IsLoad) 1530 return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), NumVecs+6); 1531 1532 std::vector<EVT> ResTys(NumVecs, RegVT); 1533 ResTys.push_back(MVT::Other); 1534 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(),NumVecs+6); 1535 1536 if (llvm::ModelWithRegSequence()) { 1537 // Form a REG_SEQUENCE to force register allocation. 1538 SDValue RegSeq; 1539 if (is64BitVector) { 1540 SDValue V0 = SDValue(VLdLn, 0); 1541 SDValue V1 = SDValue(VLdLn, 1); 1542 if (NumVecs == 2) { 1543 RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0); 1544 } else { 1545 SDValue V2 = SDValue(VLdLn, 2); 1546 // If it's a vld3, form a quad D-register but discard the last part. 1547 SDValue V3 = (NumVecs == 3) 1548 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0) 1549 : SDValue(VLdLn, 3); 1550 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0); 1551 } 1552 } else { 1553 // For 128-bit vectors, take the 64-bit results of the load and insert 1554 // them as subregs into the result. 1555 SDValue V[8]; 1556 for (unsigned Vec = 0, i = 0; Vec < NumVecs; ++Vec, i+=2) { 1557 if (Even) { 1558 V[i] = SDValue(VLdLn, Vec); 1559 V[i+1] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1560 dl, RegVT), 0); 1561 } else { 1562 V[i] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1563 dl, RegVT), 0); 1564 V[i+1] = SDValue(VLdLn, Vec); 1565 } 1566 } 1567 if (NumVecs == 3) 1568 V[6] = V[7] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1569 dl, RegVT), 0); 1570 1571 if (NumVecs == 2) 1572 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V[0], V[1], V[2], V[3]), 0); 1573 else 1574 RegSeq = SDValue(OctoDRegs(MVT::v8i64, V[0], V[1], V[2], V[3], 1575 V[4], V[5], V[6], V[7]), 0); 1576 } 1577 1578 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering"); 1579 assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering"); 1580 unsigned SubIdx = is64BitVector ? ARM::dsub_0 : ARM::qsub_0; 1581 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1582 ReplaceUses(SDValue(N, Vec), 1583 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, RegSeq)); 1584 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, NumVecs)); 1585 return NULL; 1586 } 1587 1588 // For a 64-bit vector load to D registers, nothing more needs to be done. 1589 if (is64BitVector) 1590 return VLdLn; 1591 1592 // For 128-bit vectors, take the 64-bit results of the load and insert them 1593 // as subregs into the result. 1594 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) { 1595 SDValue QuadVec = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT, 1596 N->getOperand(Vec+3), 1597 SDValue(VLdLn, Vec)); 1598 ReplaceUses(SDValue(N, Vec), QuadVec); 1599 } 1600 1601 Chain = SDValue(VLdLn, NumVecs); 1602 ReplaceUses(SDValue(N, NumVecs), Chain); 1603 return NULL; 1604} 1605 1606SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, 1607 bool isSigned) { 1608 if (!Subtarget->hasV6T2Ops()) 1609 return NULL; 1610 1611 unsigned Opc = isSigned ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX) 1612 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX); 1613 1614 1615 // For unsigned extracts, check for a shift right and mask 1616 unsigned And_imm = 0; 1617 if (N->getOpcode() == ISD::AND) { 1618 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) { 1619 1620 // The immediate is a mask of the low bits iff imm & (imm+1) == 0 1621 if (And_imm & (And_imm + 1)) 1622 return NULL; 1623 1624 unsigned Srl_imm = 0; 1625 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, 1626 Srl_imm)) { 1627 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!"); 1628 1629 unsigned Width = CountTrailingOnes_32(And_imm); 1630 unsigned LSB = Srl_imm; 1631 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1632 SDValue Ops[] = { N->getOperand(0).getOperand(0), 1633 CurDAG->getTargetConstant(LSB, MVT::i32), 1634 CurDAG->getTargetConstant(Width, MVT::i32), 1635 getAL(CurDAG), Reg0 }; 1636 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 1637 } 1638 } 1639 return NULL; 1640 } 1641 1642 // Otherwise, we're looking for a shift of a shift 1643 unsigned Shl_imm = 0; 1644 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) { 1645 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!"); 1646 unsigned Srl_imm = 0; 1647 if (isInt32Immediate(N->getOperand(1), Srl_imm)) { 1648 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!"); 1649 unsigned Width = 32 - Srl_imm; 1650 int LSB = Srl_imm - Shl_imm; 1651 if (LSB < 0) 1652 return NULL; 1653 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1654 SDValue Ops[] = { N->getOperand(0).getOperand(0), 1655 CurDAG->getTargetConstant(LSB, MVT::i32), 1656 CurDAG->getTargetConstant(Width, MVT::i32), 1657 getAL(CurDAG), Reg0 }; 1658 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 1659 } 1660 } 1661 return NULL; 1662} 1663 1664SDNode *ARMDAGToDAGISel:: 1665SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 1666 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) { 1667 SDValue CPTmp0; 1668 SDValue CPTmp1; 1669 if (SelectT2ShifterOperandReg(N, TrueVal, CPTmp0, CPTmp1)) { 1670 unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue(); 1671 unsigned SOShOp = ARM_AM::getSORegShOp(SOVal); 1672 unsigned Opc = 0; 1673 switch (SOShOp) { 1674 case ARM_AM::lsl: Opc = ARM::t2MOVCClsl; break; 1675 case ARM_AM::lsr: Opc = ARM::t2MOVCClsr; break; 1676 case ARM_AM::asr: Opc = ARM::t2MOVCCasr; break; 1677 case ARM_AM::ror: Opc = ARM::t2MOVCCror; break; 1678 default: 1679 llvm_unreachable("Unknown so_reg opcode!"); 1680 break; 1681 } 1682 SDValue SOShImm = 1683 CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32); 1684 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 1685 SDValue Ops[] = { FalseVal, CPTmp0, SOShImm, CC, CCR, InFlag }; 1686 return CurDAG->SelectNodeTo(N, Opc, MVT::i32,Ops, 6); 1687 } 1688 return 0; 1689} 1690 1691SDNode *ARMDAGToDAGISel:: 1692SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 1693 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) { 1694 SDValue CPTmp0; 1695 SDValue CPTmp1; 1696 SDValue CPTmp2; 1697 if (SelectShifterOperandReg(N, TrueVal, CPTmp0, CPTmp1, CPTmp2)) { 1698 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 1699 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag }; 1700 return CurDAG->SelectNodeTo(N, ARM::MOVCCs, MVT::i32, Ops, 7); 1701 } 1702 return 0; 1703} 1704 1705SDNode *ARMDAGToDAGISel:: 1706SelectT2CMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 1707 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) { 1708 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal); 1709 if (!T) 1710 return 0; 1711 1712 if (Predicate_t2_so_imm(TrueVal.getNode())) { 1713 SDValue True = CurDAG->getTargetConstant(T->getZExtValue(), MVT::i32); 1714 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 1715 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag }; 1716 return CurDAG->SelectNodeTo(N, 1717 ARM::t2MOVCCi, MVT::i32, Ops, 5); 1718 } 1719 return 0; 1720} 1721 1722SDNode *ARMDAGToDAGISel:: 1723SelectARMCMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 1724 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) { 1725 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal); 1726 if (!T) 1727 return 0; 1728 1729 if (Predicate_so_imm(TrueVal.getNode())) { 1730 SDValue True = CurDAG->getTargetConstant(T->getZExtValue(), MVT::i32); 1731 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 1732 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag }; 1733 return CurDAG->SelectNodeTo(N, 1734 ARM::MOVCCi, MVT::i32, Ops, 5); 1735 } 1736 return 0; 1737} 1738 1739SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) { 1740 EVT VT = N->getValueType(0); 1741 SDValue FalseVal = N->getOperand(0); 1742 SDValue TrueVal = N->getOperand(1); 1743 SDValue CC = N->getOperand(2); 1744 SDValue CCR = N->getOperand(3); 1745 SDValue InFlag = N->getOperand(4); 1746 assert(CC.getOpcode() == ISD::Constant); 1747 assert(CCR.getOpcode() == ISD::Register); 1748 ARMCC::CondCodes CCVal = 1749 (ARMCC::CondCodes)cast<ConstantSDNode>(CC)->getZExtValue(); 1750 1751 if (!Subtarget->isThumb1Only() && VT == MVT::i32) { 1752 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc) 1753 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc) 1754 // Pattern complexity = 18 cost = 1 size = 0 1755 SDValue CPTmp0; 1756 SDValue CPTmp1; 1757 SDValue CPTmp2; 1758 if (Subtarget->isThumb()) { 1759 SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal, 1760 CCVal, CCR, InFlag); 1761 if (!Res) 1762 Res = SelectT2CMOVShiftOp(N, TrueVal, FalseVal, 1763 ARMCC::getOppositeCondition(CCVal), CCR, InFlag); 1764 if (Res) 1765 return Res; 1766 } else { 1767 SDNode *Res = SelectARMCMOVShiftOp(N, FalseVal, TrueVal, 1768 CCVal, CCR, InFlag); 1769 if (!Res) 1770 Res = SelectARMCMOVShiftOp(N, TrueVal, FalseVal, 1771 ARMCC::getOppositeCondition(CCVal), CCR, InFlag); 1772 if (Res) 1773 return Res; 1774 } 1775 1776 // Pattern: (ARMcmov:i32 GPR:i32:$false, 1777 // (imm:i32)<<P:Predicate_so_imm>>:$true, 1778 // (imm:i32):$cc) 1779 // Emits: (MOVCCi:i32 GPR:i32:$false, 1780 // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc) 1781 // Pattern complexity = 10 cost = 1 size = 0 1782 if (Subtarget->isThumb()) { 1783 SDNode *Res = SelectT2CMOVSoImmOp(N, FalseVal, TrueVal, 1784 CCVal, CCR, InFlag); 1785 if (!Res) 1786 Res = SelectT2CMOVSoImmOp(N, TrueVal, FalseVal, 1787 ARMCC::getOppositeCondition(CCVal), CCR, InFlag); 1788 if (Res) 1789 return Res; 1790 } else { 1791 SDNode *Res = SelectARMCMOVSoImmOp(N, FalseVal, TrueVal, 1792 CCVal, CCR, InFlag); 1793 if (!Res) 1794 Res = SelectARMCMOVSoImmOp(N, TrueVal, FalseVal, 1795 ARMCC::getOppositeCondition(CCVal), CCR, InFlag); 1796 if (Res) 1797 return Res; 1798 } 1799 } 1800 1801 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc) 1802 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc) 1803 // Pattern complexity = 6 cost = 1 size = 0 1804 // 1805 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc) 1806 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc) 1807 // Pattern complexity = 6 cost = 11 size = 0 1808 // 1809 // Also FCPYScc and FCPYDcc. 1810 SDValue Tmp2 = CurDAG->getTargetConstant(CCVal, MVT::i32); 1811 SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag }; 1812 unsigned Opc = 0; 1813 switch (VT.getSimpleVT().SimpleTy) { 1814 default: assert(false && "Illegal conditional move type!"); 1815 break; 1816 case MVT::i32: 1817 Opc = Subtarget->isThumb() 1818 ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo) 1819 : ARM::MOVCCr; 1820 break; 1821 case MVT::f32: 1822 Opc = ARM::VMOVScc; 1823 break; 1824 case MVT::f64: 1825 Opc = ARM::VMOVDcc; 1826 break; 1827 } 1828 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5); 1829} 1830 1831SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) { 1832 // The only time a CONCAT_VECTORS operation can have legal types is when 1833 // two 64-bit vectors are concatenated to a 128-bit vector. 1834 EVT VT = N->getValueType(0); 1835 if (!VT.is128BitVector() || N->getNumOperands() != 2) 1836 llvm_unreachable("unexpected CONCAT_VECTORS"); 1837 DebugLoc dl = N->getDebugLoc(); 1838 SDValue V0 = N->getOperand(0); 1839 SDValue V1 = N->getOperand(1); 1840 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32); 1841 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32); 1842 const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 }; 1843 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4); 1844} 1845 1846SDNode *ARMDAGToDAGISel::Select(SDNode *N) { 1847 DebugLoc dl = N->getDebugLoc(); 1848 1849 if (N->isMachineOpcode()) 1850 return NULL; // Already selected. 1851 1852 switch (N->getOpcode()) { 1853 default: break; 1854 case ISD::Constant: { 1855 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue(); 1856 bool UseCP = true; 1857 if (Subtarget->hasThumb2()) 1858 // Thumb2-aware targets have the MOVT instruction, so all immediates can 1859 // be done with MOV + MOVT, at worst. 1860 UseCP = 0; 1861 else { 1862 if (Subtarget->isThumb()) { 1863 UseCP = (Val > 255 && // MOV 1864 ~Val > 255 && // MOV + MVN 1865 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL 1866 } else 1867 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV 1868 ARM_AM::getSOImmVal(~Val) == -1 && // MVN 1869 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs. 1870 } 1871 1872 if (UseCP) { 1873 SDValue CPIdx = 1874 CurDAG->getTargetConstantPool(ConstantInt::get( 1875 Type::getInt32Ty(*CurDAG->getContext()), Val), 1876 TLI.getPointerTy()); 1877 1878 SDNode *ResNode; 1879 if (Subtarget->isThumb1Only()) { 1880 SDValue Pred = getAL(CurDAG); 1881 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 1882 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() }; 1883 ResNode = CurDAG->getMachineNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other, 1884 Ops, 4); 1885 } else { 1886 SDValue Ops[] = { 1887 CPIdx, 1888 CurDAG->getRegister(0, MVT::i32), 1889 CurDAG->getTargetConstant(0, MVT::i32), 1890 getAL(CurDAG), 1891 CurDAG->getRegister(0, MVT::i32), 1892 CurDAG->getEntryNode() 1893 }; 1894 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other, 1895 Ops, 6); 1896 } 1897 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0)); 1898 return NULL; 1899 } 1900 1901 // Other cases are autogenerated. 1902 break; 1903 } 1904 case ISD::FrameIndex: { 1905 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm. 1906 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 1907 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 1908 if (Subtarget->isThumb1Only()) { 1909 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI, 1910 CurDAG->getTargetConstant(0, MVT::i32)); 1911 } else { 1912 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ? 1913 ARM::t2ADDri : ARM::ADDri); 1914 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32), 1915 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 1916 CurDAG->getRegister(0, MVT::i32) }; 1917 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 1918 } 1919 } 1920 case ISD::SRL: 1921 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false)) 1922 return I; 1923 break; 1924 case ISD::SRA: 1925 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true)) 1926 return I; 1927 break; 1928 case ISD::MUL: 1929 if (Subtarget->isThumb1Only()) 1930 break; 1931 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 1932 unsigned RHSV = C->getZExtValue(); 1933 if (!RHSV) break; 1934 if (isPowerOf2_32(RHSV-1)) { // 2^n+1? 1935 unsigned ShImm = Log2_32(RHSV-1); 1936 if (ShImm >= 32) 1937 break; 1938 SDValue V = N->getOperand(0); 1939 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm); 1940 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32); 1941 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1942 if (Subtarget->isThumb()) { 1943 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 1944 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6); 1945 } else { 1946 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 1947 return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7); 1948 } 1949 } 1950 if (isPowerOf2_32(RHSV+1)) { // 2^n-1? 1951 unsigned ShImm = Log2_32(RHSV+1); 1952 if (ShImm >= 32) 1953 break; 1954 SDValue V = N->getOperand(0); 1955 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm); 1956 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32); 1957 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1958 if (Subtarget->isThumb()) { 1959 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 1960 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6); 1961 } else { 1962 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 1963 return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7); 1964 } 1965 } 1966 } 1967 break; 1968 case ISD::AND: { 1969 // Check for unsigned bitfield extract 1970 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false)) 1971 return I; 1972 1973 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits 1974 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits 1975 // are entirely contributed by c2 and lower 16-bits are entirely contributed 1976 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)). 1977 // Select it to: "movt x, ((c1 & 0xffff) >> 16) 1978 EVT VT = N->getValueType(0); 1979 if (VT != MVT::i32) 1980 break; 1981 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2()) 1982 ? ARM::t2MOVTi16 1983 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0); 1984 if (!Opc) 1985 break; 1986 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 1987 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1988 if (!N1C) 1989 break; 1990 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) { 1991 SDValue N2 = N0.getOperand(1); 1992 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 1993 if (!N2C) 1994 break; 1995 unsigned N1CVal = N1C->getZExtValue(); 1996 unsigned N2CVal = N2C->getZExtValue(); 1997 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) && 1998 (N1CVal & 0xffffU) == 0xffffU && 1999 (N2CVal & 0xffffU) == 0x0U) { 2000 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16, 2001 MVT::i32); 2002 SDValue Ops[] = { N0.getOperand(0), Imm16, 2003 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; 2004 return CurDAG->getMachineNode(Opc, dl, VT, Ops, 4); 2005 } 2006 } 2007 break; 2008 } 2009 case ARMISD::VMOVRRD: 2010 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32, 2011 N->getOperand(0), getAL(CurDAG), 2012 CurDAG->getRegister(0, MVT::i32)); 2013 case ISD::UMUL_LOHI: { 2014 if (Subtarget->isThumb1Only()) 2015 break; 2016 if (Subtarget->isThumb()) { 2017 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 2018 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 2019 CurDAG->getRegister(0, MVT::i32) }; 2020 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32,Ops,4); 2021 } else { 2022 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 2023 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 2024 CurDAG->getRegister(0, MVT::i32) }; 2025 return CurDAG->getMachineNode(ARM::UMULL, dl, MVT::i32, MVT::i32, Ops, 5); 2026 } 2027 } 2028 case ISD::SMUL_LOHI: { 2029 if (Subtarget->isThumb1Only()) 2030 break; 2031 if (Subtarget->isThumb()) { 2032 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 2033 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; 2034 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32,Ops,4); 2035 } else { 2036 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 2037 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 2038 CurDAG->getRegister(0, MVT::i32) }; 2039 return CurDAG->getMachineNode(ARM::SMULL, dl, MVT::i32, MVT::i32, Ops, 5); 2040 } 2041 } 2042 case ISD::LOAD: { 2043 SDNode *ResNode = 0; 2044 if (Subtarget->isThumb() && Subtarget->hasThumb2()) 2045 ResNode = SelectT2IndexedLoad(N); 2046 else 2047 ResNode = SelectARMIndexedLoad(N); 2048 if (ResNode) 2049 return ResNode; 2050 2051 // VLDMQ must be custom-selected for "v2f64 load" to set the AM5Opc value. 2052 if (Subtarget->hasVFP2() && 2053 N->getValueType(0).getSimpleVT().SimpleTy == MVT::v2f64) { 2054 SDValue Chain = N->getOperand(0); 2055 SDValue AM5Opc = 2056 CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::ia, 4), MVT::i32); 2057 SDValue Pred = getAL(CurDAG); 2058 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2059 SDValue Ops[] = { N->getOperand(1), AM5Opc, Pred, PredReg, Chain }; 2060 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 2061 MemOp[0] = cast<MemSDNode>(N)->getMemOperand(); 2062 SDNode *Ret = CurDAG->getMachineNode(ARM::VLDMQ, dl, 2063 MVT::v2f64, MVT::Other, Ops, 5); 2064 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 2065 return Ret; 2066 } 2067 // Other cases are autogenerated. 2068 break; 2069 } 2070 case ISD::STORE: { 2071 // VSTMQ must be custom-selected for "v2f64 store" to set the AM5Opc value. 2072 if (Subtarget->hasVFP2() && 2073 N->getOperand(1).getValueType().getSimpleVT().SimpleTy == MVT::v2f64) { 2074 SDValue Chain = N->getOperand(0); 2075 SDValue AM5Opc = 2076 CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::ia, 4), MVT::i32); 2077 SDValue Pred = getAL(CurDAG); 2078 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2079 SDValue Ops[] = { N->getOperand(1), N->getOperand(2), 2080 AM5Opc, Pred, PredReg, Chain }; 2081 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 2082 MemOp[0] = cast<MemSDNode>(N)->getMemOperand(); 2083 SDNode *Ret = CurDAG->getMachineNode(ARM::VSTMQ, dl, MVT::Other, Ops, 6); 2084 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 2085 return Ret; 2086 } 2087 // Other cases are autogenerated. 2088 break; 2089 } 2090 case ARMISD::BRCOND: { 2091 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc) 2092 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc) 2093 // Pattern complexity = 6 cost = 1 size = 0 2094 2095 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc) 2096 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc) 2097 // Pattern complexity = 6 cost = 1 size = 0 2098 2099 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc) 2100 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc) 2101 // Pattern complexity = 6 cost = 1 size = 0 2102 2103 unsigned Opc = Subtarget->isThumb() ? 2104 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc; 2105 SDValue Chain = N->getOperand(0); 2106 SDValue N1 = N->getOperand(1); 2107 SDValue N2 = N->getOperand(2); 2108 SDValue N3 = N->getOperand(3); 2109 SDValue InFlag = N->getOperand(4); 2110 assert(N1.getOpcode() == ISD::BasicBlock); 2111 assert(N2.getOpcode() == ISD::Constant); 2112 assert(N3.getOpcode() == ISD::Register); 2113 2114 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned) 2115 cast<ConstantSDNode>(N2)->getZExtValue()), 2116 MVT::i32); 2117 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag }; 2118 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, 2119 MVT::Flag, Ops, 5); 2120 Chain = SDValue(ResNode, 0); 2121 if (N->getNumValues() == 2) { 2122 InFlag = SDValue(ResNode, 1); 2123 ReplaceUses(SDValue(N, 1), InFlag); 2124 } 2125 ReplaceUses(SDValue(N, 0), 2126 SDValue(Chain.getNode(), Chain.getResNo())); 2127 return NULL; 2128 } 2129 case ARMISD::CMOV: 2130 return SelectCMOVOp(N); 2131 case ARMISD::CNEG: { 2132 EVT VT = N->getValueType(0); 2133 SDValue N0 = N->getOperand(0); 2134 SDValue N1 = N->getOperand(1); 2135 SDValue N2 = N->getOperand(2); 2136 SDValue N3 = N->getOperand(3); 2137 SDValue InFlag = N->getOperand(4); 2138 assert(N2.getOpcode() == ISD::Constant); 2139 assert(N3.getOpcode() == ISD::Register); 2140 2141 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned) 2142 cast<ConstantSDNode>(N2)->getZExtValue()), 2143 MVT::i32); 2144 SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag }; 2145 unsigned Opc = 0; 2146 switch (VT.getSimpleVT().SimpleTy) { 2147 default: assert(false && "Illegal conditional move type!"); 2148 break; 2149 case MVT::f32: 2150 Opc = ARM::VNEGScc; 2151 break; 2152 case MVT::f64: 2153 Opc = ARM::VNEGDcc; 2154 break; 2155 } 2156 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5); 2157 } 2158 2159 case ARMISD::VZIP: { 2160 unsigned Opc = 0; 2161 EVT VT = N->getValueType(0); 2162 switch (VT.getSimpleVT().SimpleTy) { 2163 default: return NULL; 2164 case MVT::v8i8: Opc = ARM::VZIPd8; break; 2165 case MVT::v4i16: Opc = ARM::VZIPd16; break; 2166 case MVT::v2f32: 2167 case MVT::v2i32: Opc = ARM::VZIPd32; break; 2168 case MVT::v16i8: Opc = ARM::VZIPq8; break; 2169 case MVT::v8i16: Opc = ARM::VZIPq16; break; 2170 case MVT::v4f32: 2171 case MVT::v4i32: Opc = ARM::VZIPq32; break; 2172 } 2173 SDValue Pred = getAL(CurDAG); 2174 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2175 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; 2176 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4); 2177 } 2178 case ARMISD::VUZP: { 2179 unsigned Opc = 0; 2180 EVT VT = N->getValueType(0); 2181 switch (VT.getSimpleVT().SimpleTy) { 2182 default: return NULL; 2183 case MVT::v8i8: Opc = ARM::VUZPd8; break; 2184 case MVT::v4i16: Opc = ARM::VUZPd16; break; 2185 case MVT::v2f32: 2186 case MVT::v2i32: Opc = ARM::VUZPd32; break; 2187 case MVT::v16i8: Opc = ARM::VUZPq8; break; 2188 case MVT::v8i16: Opc = ARM::VUZPq16; break; 2189 case MVT::v4f32: 2190 case MVT::v4i32: Opc = ARM::VUZPq32; break; 2191 } 2192 SDValue Pred = getAL(CurDAG); 2193 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2194 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; 2195 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4); 2196 } 2197 case ARMISD::VTRN: { 2198 unsigned Opc = 0; 2199 EVT VT = N->getValueType(0); 2200 switch (VT.getSimpleVT().SimpleTy) { 2201 default: return NULL; 2202 case MVT::v8i8: Opc = ARM::VTRNd8; break; 2203 case MVT::v4i16: Opc = ARM::VTRNd16; break; 2204 case MVT::v2f32: 2205 case MVT::v2i32: Opc = ARM::VTRNd32; break; 2206 case MVT::v16i8: Opc = ARM::VTRNq8; break; 2207 case MVT::v8i16: Opc = ARM::VTRNq16; break; 2208 case MVT::v4f32: 2209 case MVT::v4i32: Opc = ARM::VTRNq32; break; 2210 } 2211 SDValue Pred = getAL(CurDAG); 2212 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2213 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; 2214 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4); 2215 } 2216 2217 case ISD::INTRINSIC_VOID: 2218 case ISD::INTRINSIC_W_CHAIN: { 2219 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 2220 switch (IntNo) { 2221 default: 2222 break; 2223 2224 case Intrinsic::arm_neon_vld1: { 2225 unsigned DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16, 2226 ARM::VLD1d32, ARM::VLD1d64 }; 2227 unsigned QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16, 2228 ARM::VLD1q32, ARM::VLD1q64 }; 2229 return SelectVLD(N, 1, DOpcodes, QOpcodes, 0); 2230 } 2231 2232 case Intrinsic::arm_neon_vld2: { 2233 unsigned DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16, 2234 ARM::VLD2d32, ARM::VLD1q64 }; 2235 unsigned QOpcodes[] = { ARM::VLD2q8, ARM::VLD2q16, ARM::VLD2q32 }; 2236 return SelectVLD(N, 2, DOpcodes, QOpcodes, 0); 2237 } 2238 2239 case Intrinsic::arm_neon_vld3: { 2240 unsigned DOpcodes[] = { ARM::VLD3d8, ARM::VLD3d16, 2241 ARM::VLD3d32, ARM::VLD1d64T }; 2242 unsigned QOpcodes0[] = { ARM::VLD3q8_UPD, 2243 ARM::VLD3q16_UPD, 2244 ARM::VLD3q32_UPD }; 2245 unsigned QOpcodes1[] = { ARM::VLD3q8odd_UPD, 2246 ARM::VLD3q16odd_UPD, 2247 ARM::VLD3q32odd_UPD }; 2248 return SelectVLD(N, 3, DOpcodes, QOpcodes0, QOpcodes1); 2249 } 2250 2251 case Intrinsic::arm_neon_vld4: { 2252 unsigned DOpcodes[] = { ARM::VLD4d8, ARM::VLD4d16, 2253 ARM::VLD4d32, ARM::VLD1d64Q }; 2254 unsigned QOpcodes0[] = { ARM::VLD4q8_UPD, 2255 ARM::VLD4q16_UPD, 2256 ARM::VLD4q32_UPD }; 2257 unsigned QOpcodes1[] = { ARM::VLD4q8odd_UPD, 2258 ARM::VLD4q16odd_UPD, 2259 ARM::VLD4q32odd_UPD }; 2260 return SelectVLD(N, 4, DOpcodes, QOpcodes0, QOpcodes1); 2261 } 2262 2263 case Intrinsic::arm_neon_vld2lane: { 2264 unsigned DOpcodes[] = { ARM::VLD2LNd8, ARM::VLD2LNd16, ARM::VLD2LNd32 }; 2265 unsigned QOpcodes0[] = { ARM::VLD2LNq16, ARM::VLD2LNq32 }; 2266 unsigned QOpcodes1[] = { ARM::VLD2LNq16odd, ARM::VLD2LNq32odd }; 2267 return SelectVLDSTLane(N, true, 2, DOpcodes, QOpcodes0, QOpcodes1); 2268 } 2269 2270 case Intrinsic::arm_neon_vld3lane: { 2271 unsigned DOpcodes[] = { ARM::VLD3LNd8, ARM::VLD3LNd16, ARM::VLD3LNd32 }; 2272 unsigned QOpcodes0[] = { ARM::VLD3LNq16, ARM::VLD3LNq32 }; 2273 unsigned QOpcodes1[] = { ARM::VLD3LNq16odd, ARM::VLD3LNq32odd }; 2274 return SelectVLDSTLane(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1); 2275 } 2276 2277 case Intrinsic::arm_neon_vld4lane: { 2278 unsigned DOpcodes[] = { ARM::VLD4LNd8, ARM::VLD4LNd16, ARM::VLD4LNd32 }; 2279 unsigned QOpcodes0[] = { ARM::VLD4LNq16, ARM::VLD4LNq32 }; 2280 unsigned QOpcodes1[] = { ARM::VLD4LNq16odd, ARM::VLD4LNq32odd }; 2281 return SelectVLDSTLane(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1); 2282 } 2283 2284 case Intrinsic::arm_neon_vst1: { 2285 unsigned DOpcodes[] = { ARM::VST1d8, ARM::VST1d16, 2286 ARM::VST1d32, ARM::VST1d64 }; 2287 unsigned QOpcodes[] = { ARM::VST1q8, ARM::VST1q16, 2288 ARM::VST1q32, ARM::VST1q64 }; 2289 return SelectVST(N, 1, DOpcodes, QOpcodes, 0); 2290 } 2291 2292 case Intrinsic::arm_neon_vst2: { 2293 unsigned DOpcodes[] = { ARM::VST2d8, ARM::VST2d16, 2294 ARM::VST2d32, ARM::VST1q64 }; 2295 unsigned QOpcodes[] = { ARM::VST2q8, ARM::VST2q16, ARM::VST2q32 }; 2296 return SelectVST(N, 2, DOpcodes, QOpcodes, 0); 2297 } 2298 2299 case Intrinsic::arm_neon_vst3: { 2300 unsigned DOpcodes[] = { ARM::VST3d8, ARM::VST3d16, 2301 ARM::VST3d32, ARM::VST1d64T }; 2302 unsigned QOpcodes0[] = { ARM::VST3q8_UPD, 2303 ARM::VST3q16_UPD, 2304 ARM::VST3q32_UPD }; 2305 unsigned QOpcodes1[] = { ARM::VST3q8odd_UPD, 2306 ARM::VST3q16odd_UPD, 2307 ARM::VST3q32odd_UPD }; 2308 return SelectVST(N, 3, DOpcodes, QOpcodes0, QOpcodes1); 2309 } 2310 2311 case Intrinsic::arm_neon_vst4: { 2312 unsigned DOpcodes[] = { ARM::VST4d8, ARM::VST4d16, 2313 ARM::VST4d32, ARM::VST1d64Q }; 2314 unsigned QOpcodes0[] = { ARM::VST4q8_UPD, 2315 ARM::VST4q16_UPD, 2316 ARM::VST4q32_UPD }; 2317 unsigned QOpcodes1[] = { ARM::VST4q8odd_UPD, 2318 ARM::VST4q16odd_UPD, 2319 ARM::VST4q32odd_UPD }; 2320 return SelectVST(N, 4, DOpcodes, QOpcodes0, QOpcodes1); 2321 } 2322 2323 case Intrinsic::arm_neon_vst2lane: { 2324 unsigned DOpcodes[] = { ARM::VST2LNd8, ARM::VST2LNd16, ARM::VST2LNd32 }; 2325 unsigned QOpcodes0[] = { ARM::VST2LNq16, ARM::VST2LNq32 }; 2326 unsigned QOpcodes1[] = { ARM::VST2LNq16odd, ARM::VST2LNq32odd }; 2327 return SelectVLDSTLane(N, false, 2, DOpcodes, QOpcodes0, QOpcodes1); 2328 } 2329 2330 case Intrinsic::arm_neon_vst3lane: { 2331 unsigned DOpcodes[] = { ARM::VST3LNd8, ARM::VST3LNd16, ARM::VST3LNd32 }; 2332 unsigned QOpcodes0[] = { ARM::VST3LNq16, ARM::VST3LNq32 }; 2333 unsigned QOpcodes1[] = { ARM::VST3LNq16odd, ARM::VST3LNq32odd }; 2334 return SelectVLDSTLane(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1); 2335 } 2336 2337 case Intrinsic::arm_neon_vst4lane: { 2338 unsigned DOpcodes[] = { ARM::VST4LNd8, ARM::VST4LNd16, ARM::VST4LNd32 }; 2339 unsigned QOpcodes0[] = { ARM::VST4LNq16, ARM::VST4LNq32 }; 2340 unsigned QOpcodes1[] = { ARM::VST4LNq16odd, ARM::VST4LNq32odd }; 2341 return SelectVLDSTLane(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1); 2342 } 2343 } 2344 break; 2345 } 2346 2347 case ISD::CONCAT_VECTORS: 2348 return SelectConcatVector(N); 2349 } 2350 2351 return SelectCode(N); 2352} 2353 2354bool ARMDAGToDAGISel:: 2355SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 2356 std::vector<SDValue> &OutOps) { 2357 assert(ConstraintCode == 'm' && "unexpected asm memory constraint"); 2358 // Require the address to be in a register. That is safe for all ARM 2359 // variants and it is hard to do anything much smarter without knowing 2360 // how the operand is used. 2361 OutOps.push_back(Op); 2362 return false; 2363} 2364 2365/// createARMISelDag - This pass converts a legalized DAG into a 2366/// ARM-specific DAG, ready for instruction scheduling. 2367/// 2368FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM, 2369 CodeGenOpt::Level OptLevel) { 2370 return new ARMDAGToDAGISel(TM, OptLevel); 2371} 2372 2373/// ModelWithRegSequence - Return true if isel should use REG_SEQUENCE to model 2374/// operations involving sub-registers. 2375bool llvm::ModelWithRegSequence() { 2376 return UseRegSeq; 2377} 2378