ARMISelDAGToDAG.cpp revision fb77752253717bc9c26cd2f6915925dc19edb8a3
1//===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines an instruction selector for the ARM target. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "arm-isel" 15#include "ARM.h" 16#include "ARMBaseInstrInfo.h" 17#include "ARMTargetMachine.h" 18#include "MCTargetDesc/ARMAddressingModes.h" 19#include "llvm/CallingConv.h" 20#include "llvm/Constants.h" 21#include "llvm/DerivedTypes.h" 22#include "llvm/Function.h" 23#include "llvm/Intrinsics.h" 24#include "llvm/LLVMContext.h" 25#include "llvm/CodeGen/MachineFrameInfo.h" 26#include "llvm/CodeGen/MachineFunction.h" 27#include "llvm/CodeGen/MachineInstrBuilder.h" 28#include "llvm/CodeGen/SelectionDAG.h" 29#include "llvm/CodeGen/SelectionDAGISel.h" 30#include "llvm/Target/TargetLowering.h" 31#include "llvm/Target/TargetOptions.h" 32#include "llvm/Support/CommandLine.h" 33#include "llvm/Support/Compiler.h" 34#include "llvm/Support/Debug.h" 35#include "llvm/Support/ErrorHandling.h" 36#include "llvm/Support/raw_ostream.h" 37 38using namespace llvm; 39 40static cl::opt<bool> 41DisableShifterOp("disable-shifter-op", cl::Hidden, 42 cl::desc("Disable isel of shifter-op"), 43 cl::init(false)); 44 45static cl::opt<bool> 46CheckVMLxHazard("check-vmlx-hazard", cl::Hidden, 47 cl::desc("Check fp vmla / vmls hazard at isel time"), 48 cl::init(true)); 49 50//===--------------------------------------------------------------------===// 51/// ARMDAGToDAGISel - ARM specific code to select ARM machine 52/// instructions for SelectionDAG operations. 53/// 54namespace { 55 56enum AddrMode2Type { 57 AM2_BASE, // Simple AM2 (+-imm12) 58 AM2_SHOP // Shifter-op AM2 59}; 60 61class ARMDAGToDAGISel : public SelectionDAGISel { 62 ARMBaseTargetMachine &TM; 63 const ARMBaseInstrInfo *TII; 64 65 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 66 /// make the right decision when generating code for different targets. 67 const ARMSubtarget *Subtarget; 68 69public: 70 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, 71 CodeGenOpt::Level OptLevel) 72 : SelectionDAGISel(tm, OptLevel), TM(tm), 73 TII(static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo())), 74 Subtarget(&TM.getSubtarget<ARMSubtarget>()) { 75 } 76 77 virtual const char *getPassName() const { 78 return "ARM Instruction Selection"; 79 } 80 81 /// getI32Imm - Return a target constant of type i32 with the specified 82 /// value. 83 inline SDValue getI32Imm(unsigned Imm) { 84 return CurDAG->getTargetConstant(Imm, MVT::i32); 85 } 86 87 SDNode *Select(SDNode *N); 88 89 90 bool hasNoVMLxHazardUse(SDNode *N) const; 91 bool isShifterOpProfitable(const SDValue &Shift, 92 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt); 93 bool SelectRegShifterOperand(SDValue N, SDValue &A, 94 SDValue &B, SDValue &C, 95 bool CheckProfitability = true); 96 bool SelectImmShifterOperand(SDValue N, SDValue &A, 97 SDValue &B, bool CheckProfitability = true); 98 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A, 99 SDValue &B, SDValue &C) { 100 // Don't apply the profitability check 101 return SelectRegShifterOperand(N, A, B, C, false); 102 } 103 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A, 104 SDValue &B) { 105 // Don't apply the profitability check 106 return SelectImmShifterOperand(N, A, B, false); 107 } 108 109 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm); 110 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc); 111 112 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base, 113 SDValue &Offset, SDValue &Opc); 114 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset, 115 SDValue &Opc) { 116 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE; 117 } 118 119 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset, 120 SDValue &Opc) { 121 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP; 122 } 123 124 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset, 125 SDValue &Opc) { 126 SelectAddrMode2Worker(N, Base, Offset, Opc); 127// return SelectAddrMode2ShOp(N, Base, Offset, Opc); 128 // This always matches one way or another. 129 return true; 130 } 131 132 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N, 133 SDValue &Offset, SDValue &Opc); 134 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N, 135 SDValue &Offset, SDValue &Opc); 136 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N, 137 SDValue &Offset, SDValue &Opc); 138 bool SelectAddrOffsetNone(SDValue N, SDValue &Base); 139 bool SelectAddrMode3(SDValue N, SDValue &Base, 140 SDValue &Offset, SDValue &Opc); 141 bool SelectAddrMode3Offset(SDNode *Op, SDValue N, 142 SDValue &Offset, SDValue &Opc); 143 bool SelectAddrMode5(SDValue N, SDValue &Base, 144 SDValue &Offset); 145 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align); 146 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset); 147 148 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label); 149 150 // Thumb Addressing Modes: 151 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset); 152 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset, 153 unsigned Scale); 154 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset); 155 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset); 156 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset); 157 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base, 158 SDValue &OffImm); 159 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base, 160 SDValue &OffImm); 161 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base, 162 SDValue &OffImm); 163 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base, 164 SDValue &OffImm); 165 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm); 166 167 // Thumb 2 Addressing Modes: 168 bool SelectT2ShifterOperandReg(SDValue N, 169 SDValue &BaseReg, SDValue &Opc); 170 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm); 171 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base, 172 SDValue &OffImm); 173 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N, 174 SDValue &OffImm); 175 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base, 176 SDValue &OffReg, SDValue &ShImm); 177 178 inline bool is_so_imm(unsigned Imm) const { 179 return ARM_AM::getSOImmVal(Imm) != -1; 180 } 181 182 inline bool is_so_imm_not(unsigned Imm) const { 183 return ARM_AM::getSOImmVal(~Imm) != -1; 184 } 185 186 inline bool is_t2_so_imm(unsigned Imm) const { 187 return ARM_AM::getT2SOImmVal(Imm) != -1; 188 } 189 190 inline bool is_t2_so_imm_not(unsigned Imm) const { 191 return ARM_AM::getT2SOImmVal(~Imm) != -1; 192 } 193 194 // Include the pieces autogenerated from the target description. 195#include "ARMGenDAGISel.inc" 196 197private: 198 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for 199 /// ARM. 200 SDNode *SelectARMIndexedLoad(SDNode *N); 201 SDNode *SelectT2IndexedLoad(SDNode *N); 202 203 /// SelectVLD - Select NEON load intrinsics. NumVecs should be 204 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for 205 /// loads of D registers and even subregs and odd subregs of Q registers. 206 /// For NumVecs <= 2, QOpcodes1 is not used. 207 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs, 208 unsigned *DOpcodes, 209 unsigned *QOpcodes0, unsigned *QOpcodes1); 210 211 /// SelectVST - Select NEON store intrinsics. NumVecs should 212 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for 213 /// stores of D registers and even subregs and odd subregs of Q registers. 214 /// For NumVecs <= 2, QOpcodes1 is not used. 215 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs, 216 unsigned *DOpcodes, 217 unsigned *QOpcodes0, unsigned *QOpcodes1); 218 219 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should 220 /// be 2, 3 or 4. The opcode arrays specify the instructions used for 221 /// load/store of D registers and Q registers. 222 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad, 223 bool isUpdating, unsigned NumVecs, 224 unsigned *DOpcodes, unsigned *QOpcodes); 225 226 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs 227 /// should be 2, 3 or 4. The opcode array specifies the instructions used 228 /// for loading D registers. (Q registers are not supported.) 229 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs, 230 unsigned *Opcodes); 231 232 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2, 233 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be 234 /// generated to force the table registers to be consecutive. 235 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc); 236 237 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM. 238 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned); 239 240 /// SelectCMOVOp - Select CMOV instructions for ARM. 241 SDNode *SelectCMOVOp(SDNode *N); 242 SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 243 ARMCC::CondCodes CCVal, SDValue CCR, 244 SDValue InFlag); 245 SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 246 ARMCC::CondCodes CCVal, SDValue CCR, 247 SDValue InFlag); 248 SDNode *SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 249 ARMCC::CondCodes CCVal, SDValue CCR, 250 SDValue InFlag); 251 SDNode *SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 252 ARMCC::CondCodes CCVal, SDValue CCR, 253 SDValue InFlag); 254 255 SDNode *SelectConcatVector(SDNode *N); 256 257 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc); 258 259 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 260 /// inline asm expressions. 261 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 262 char ConstraintCode, 263 std::vector<SDValue> &OutOps); 264 265 // Form pairs of consecutive S, D, or Q registers. 266 SDNode *PairSRegs(EVT VT, SDValue V0, SDValue V1); 267 SDNode *PairDRegs(EVT VT, SDValue V0, SDValue V1); 268 SDNode *PairQRegs(EVT VT, SDValue V0, SDValue V1); 269 270 // Form sequences of 4 consecutive S, D, or Q registers. 271 SDNode *QuadSRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3); 272 SDNode *QuadDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3); 273 SDNode *QuadQRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3); 274 275 // Get the alignment operand for a NEON VLD or VST instruction. 276 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector); 277}; 278} 279 280/// isInt32Immediate - This method tests to see if the node is a 32-bit constant 281/// operand. If so Imm will receive the 32-bit value. 282static bool isInt32Immediate(SDNode *N, unsigned &Imm) { 283 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) { 284 Imm = cast<ConstantSDNode>(N)->getZExtValue(); 285 return true; 286 } 287 return false; 288} 289 290// isInt32Immediate - This method tests to see if a constant operand. 291// If so Imm will receive the 32 bit value. 292static bool isInt32Immediate(SDValue N, unsigned &Imm) { 293 return isInt32Immediate(N.getNode(), Imm); 294} 295 296// isOpcWithIntImmediate - This method tests to see if the node is a specific 297// opcode and that it has a immediate integer right operand. 298// If so Imm will receive the 32 bit value. 299static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) { 300 return N->getOpcode() == Opc && 301 isInt32Immediate(N->getOperand(1).getNode(), Imm); 302} 303 304/// \brief Check whether a particular node is a constant value representable as 305/// (N * Scale) where (N in [\arg RangeMin, \arg RangeMax). 306/// 307/// \param ScaledConstant [out] - On success, the pre-scaled constant value. 308static bool isScaledConstantInRange(SDValue Node, int Scale, 309 int RangeMin, int RangeMax, 310 int &ScaledConstant) { 311 assert(Scale > 0 && "Invalid scale!"); 312 313 // Check that this is a constant. 314 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node); 315 if (!C) 316 return false; 317 318 ScaledConstant = (int) C->getZExtValue(); 319 if ((ScaledConstant % Scale) != 0) 320 return false; 321 322 ScaledConstant /= Scale; 323 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax; 324} 325 326/// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS 327/// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at 328/// least on current ARM implementations) which should be avoidded. 329bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const { 330 if (OptLevel == CodeGenOpt::None) 331 return true; 332 333 if (!CheckVMLxHazard) 334 return true; 335 336 if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9()) 337 return true; 338 339 if (!N->hasOneUse()) 340 return false; 341 342 SDNode *Use = *N->use_begin(); 343 if (Use->getOpcode() == ISD::CopyToReg) 344 return true; 345 if (Use->isMachineOpcode()) { 346 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode()); 347 if (MCID.mayStore()) 348 return true; 349 unsigned Opcode = MCID.getOpcode(); 350 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD) 351 return true; 352 // vmlx feeding into another vmlx. We actually want to unfold 353 // the use later in the MLxExpansion pass. e.g. 354 // vmla 355 // vmla (stall 8 cycles) 356 // 357 // vmul (5 cycles) 358 // vadd (5 cycles) 359 // vmla 360 // This adds up to about 18 - 19 cycles. 361 // 362 // vmla 363 // vmul (stall 4 cycles) 364 // vadd adds up to about 14 cycles. 365 return TII->isFpMLxInstruction(Opcode); 366 } 367 368 return false; 369} 370 371bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift, 372 ARM_AM::ShiftOpc ShOpcVal, 373 unsigned ShAmt) { 374 if (!Subtarget->isCortexA9()) 375 return true; 376 if (Shift.hasOneUse()) 377 return true; 378 // R << 2 is free. 379 return ShOpcVal == ARM_AM::lsl && ShAmt == 2; 380} 381 382bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N, 383 SDValue &BaseReg, 384 SDValue &Opc, 385 bool CheckProfitability) { 386 if (DisableShifterOp) 387 return false; 388 389 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode()); 390 391 // Don't match base register only case. That is matched to a separate 392 // lower complexity pattern with explicit register operand. 393 if (ShOpcVal == ARM_AM::no_shift) return false; 394 395 BaseReg = N.getOperand(0); 396 unsigned ShImmVal = 0; 397 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1)); 398 if (!RHS) return false; 399 ShImmVal = RHS->getZExtValue() & 31; 400 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal), 401 MVT::i32); 402 return true; 403} 404 405bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N, 406 SDValue &BaseReg, 407 SDValue &ShReg, 408 SDValue &Opc, 409 bool CheckProfitability) { 410 if (DisableShifterOp) 411 return false; 412 413 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode()); 414 415 // Don't match base register only case. That is matched to a separate 416 // lower complexity pattern with explicit register operand. 417 if (ShOpcVal == ARM_AM::no_shift) return false; 418 419 BaseReg = N.getOperand(0); 420 unsigned ShImmVal = 0; 421 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1)); 422 if (RHS) return false; 423 424 ShReg = N.getOperand(1); 425 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal)) 426 return false; 427 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal), 428 MVT::i32); 429 return true; 430} 431 432 433bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N, 434 SDValue &Base, 435 SDValue &OffImm) { 436 // Match simple R + imm12 operands. 437 438 // Base only. 439 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB && 440 !CurDAG->isBaseWithConstantOffset(N)) { 441 if (N.getOpcode() == ISD::FrameIndex) { 442 // Match frame index. 443 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 444 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 445 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 446 return true; 447 } 448 449 if (N.getOpcode() == ARMISD::Wrapper && 450 !(Subtarget->useMovt() && 451 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 452 Base = N.getOperand(0); 453 } else 454 Base = N; 455 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 456 return true; 457 } 458 459 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 460 int RHSC = (int)RHS->getZExtValue(); 461 if (N.getOpcode() == ISD::SUB) 462 RHSC = -RHSC; 463 464 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned) 465 Base = N.getOperand(0); 466 if (Base.getOpcode() == ISD::FrameIndex) { 467 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 468 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 469 } 470 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 471 return true; 472 } 473 } 474 475 // Base only. 476 Base = N; 477 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 478 return true; 479} 480 481 482 483bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, 484 SDValue &Opc) { 485 if (N.getOpcode() == ISD::MUL && 486 (!Subtarget->isCortexA9() || N.hasOneUse())) { 487 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 488 // X * [3,5,9] -> X + X * [2,4,8] etc. 489 int RHSC = (int)RHS->getZExtValue(); 490 if (RHSC & 1) { 491 RHSC = RHSC & ~1; 492 ARM_AM::AddrOpc AddSub = ARM_AM::add; 493 if (RHSC < 0) { 494 AddSub = ARM_AM::sub; 495 RHSC = - RHSC; 496 } 497 if (isPowerOf2_32(RHSC)) { 498 unsigned ShAmt = Log2_32(RHSC); 499 Base = Offset = N.getOperand(0); 500 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, 501 ARM_AM::lsl), 502 MVT::i32); 503 return true; 504 } 505 } 506 } 507 } 508 509 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB && 510 // ISD::OR that is equivalent to an ISD::ADD. 511 !CurDAG->isBaseWithConstantOffset(N)) 512 return false; 513 514 // Leave simple R +/- imm12 operands for LDRi12 515 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) { 516 int RHSC; 517 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1, 518 -0x1000+1, 0x1000, RHSC)) // 12 bits. 519 return false; 520 } 521 522 if (Subtarget->isCortexA9() && !N.hasOneUse()) { 523 // Compute R +/- (R << N) and reuse it. 524 return false; 525 } 526 527 // Otherwise this is R +/- [possibly shifted] R. 528 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add; 529 ARM_AM::ShiftOpc ShOpcVal = 530 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode()); 531 unsigned ShAmt = 0; 532 533 Base = N.getOperand(0); 534 Offset = N.getOperand(1); 535 536 if (ShOpcVal != ARM_AM::no_shift) { 537 // Check to see if the RHS of the shift is a constant, if not, we can't fold 538 // it. 539 if (ConstantSDNode *Sh = 540 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) { 541 ShAmt = Sh->getZExtValue(); 542 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt)) 543 Offset = N.getOperand(1).getOperand(0); 544 else { 545 ShAmt = 0; 546 ShOpcVal = ARM_AM::no_shift; 547 } 548 } else { 549 ShOpcVal = ARM_AM::no_shift; 550 } 551 } 552 553 // Try matching (R shl C) + (R). 554 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift && 555 !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) { 556 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode()); 557 if (ShOpcVal != ARM_AM::no_shift) { 558 // Check to see if the RHS of the shift is a constant, if not, we can't 559 // fold it. 560 if (ConstantSDNode *Sh = 561 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) { 562 ShAmt = Sh->getZExtValue(); 563 if (!Subtarget->isCortexA9() || 564 (N.hasOneUse() && 565 isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt))) { 566 Offset = N.getOperand(0).getOperand(0); 567 Base = N.getOperand(1); 568 } else { 569 ShAmt = 0; 570 ShOpcVal = ARM_AM::no_shift; 571 } 572 } else { 573 ShOpcVal = ARM_AM::no_shift; 574 } 575 } 576 } 577 578 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal), 579 MVT::i32); 580 return true; 581} 582 583 584 585 586//----- 587 588AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N, 589 SDValue &Base, 590 SDValue &Offset, 591 SDValue &Opc) { 592 if (N.getOpcode() == ISD::MUL && 593 (!Subtarget->isCortexA9() || N.hasOneUse())) { 594 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 595 // X * [3,5,9] -> X + X * [2,4,8] etc. 596 int RHSC = (int)RHS->getZExtValue(); 597 if (RHSC & 1) { 598 RHSC = RHSC & ~1; 599 ARM_AM::AddrOpc AddSub = ARM_AM::add; 600 if (RHSC < 0) { 601 AddSub = ARM_AM::sub; 602 RHSC = - RHSC; 603 } 604 if (isPowerOf2_32(RHSC)) { 605 unsigned ShAmt = Log2_32(RHSC); 606 Base = Offset = N.getOperand(0); 607 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, 608 ARM_AM::lsl), 609 MVT::i32); 610 return AM2_SHOP; 611 } 612 } 613 } 614 } 615 616 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB && 617 // ISD::OR that is equivalent to an ADD. 618 !CurDAG->isBaseWithConstantOffset(N)) { 619 Base = N; 620 if (N.getOpcode() == ISD::FrameIndex) { 621 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 622 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 623 } else if (N.getOpcode() == ARMISD::Wrapper && 624 !(Subtarget->useMovt() && 625 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 626 Base = N.getOperand(0); 627 } 628 Offset = CurDAG->getRegister(0, MVT::i32); 629 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0, 630 ARM_AM::no_shift), 631 MVT::i32); 632 return AM2_BASE; 633 } 634 635 // Match simple R +/- imm12 operands. 636 if (N.getOpcode() != ISD::SUB) { 637 int RHSC; 638 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1, 639 -0x1000+1, 0x1000, RHSC)) { // 12 bits. 640 Base = N.getOperand(0); 641 if (Base.getOpcode() == ISD::FrameIndex) { 642 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 643 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 644 } 645 Offset = CurDAG->getRegister(0, MVT::i32); 646 647 ARM_AM::AddrOpc AddSub = ARM_AM::add; 648 if (RHSC < 0) { 649 AddSub = ARM_AM::sub; 650 RHSC = - RHSC; 651 } 652 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC, 653 ARM_AM::no_shift), 654 MVT::i32); 655 return AM2_BASE; 656 } 657 } 658 659 if (Subtarget->isCortexA9() && !N.hasOneUse()) { 660 // Compute R +/- (R << N) and reuse it. 661 Base = N; 662 Offset = CurDAG->getRegister(0, MVT::i32); 663 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0, 664 ARM_AM::no_shift), 665 MVT::i32); 666 return AM2_BASE; 667 } 668 669 // Otherwise this is R +/- [possibly shifted] R. 670 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub; 671 ARM_AM::ShiftOpc ShOpcVal = 672 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode()); 673 unsigned ShAmt = 0; 674 675 Base = N.getOperand(0); 676 Offset = N.getOperand(1); 677 678 if (ShOpcVal != ARM_AM::no_shift) { 679 // Check to see if the RHS of the shift is a constant, if not, we can't fold 680 // it. 681 if (ConstantSDNode *Sh = 682 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) { 683 ShAmt = Sh->getZExtValue(); 684 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt)) 685 Offset = N.getOperand(1).getOperand(0); 686 else { 687 ShAmt = 0; 688 ShOpcVal = ARM_AM::no_shift; 689 } 690 } else { 691 ShOpcVal = ARM_AM::no_shift; 692 } 693 } 694 695 // Try matching (R shl C) + (R). 696 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift && 697 !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) { 698 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode()); 699 if (ShOpcVal != ARM_AM::no_shift) { 700 // Check to see if the RHS of the shift is a constant, if not, we can't 701 // fold it. 702 if (ConstantSDNode *Sh = 703 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) { 704 ShAmt = Sh->getZExtValue(); 705 if (!Subtarget->isCortexA9() || 706 (N.hasOneUse() && 707 isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt))) { 708 Offset = N.getOperand(0).getOperand(0); 709 Base = N.getOperand(1); 710 } else { 711 ShAmt = 0; 712 ShOpcVal = ARM_AM::no_shift; 713 } 714 } else { 715 ShOpcVal = ARM_AM::no_shift; 716 } 717 } 718 } 719 720 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal), 721 MVT::i32); 722 return AM2_SHOP; 723} 724 725bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N, 726 SDValue &Offset, SDValue &Opc) { 727 unsigned Opcode = Op->getOpcode(); 728 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 729 ? cast<LoadSDNode>(Op)->getAddressingMode() 730 : cast<StoreSDNode>(Op)->getAddressingMode(); 731 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) 732 ? ARM_AM::add : ARM_AM::sub; 733 int Val; 734 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) 735 return false; 736 737 Offset = N; 738 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode()); 739 unsigned ShAmt = 0; 740 if (ShOpcVal != ARM_AM::no_shift) { 741 // Check to see if the RHS of the shift is a constant, if not, we can't fold 742 // it. 743 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 744 ShAmt = Sh->getZExtValue(); 745 if (isShifterOpProfitable(N, ShOpcVal, ShAmt)) 746 Offset = N.getOperand(0); 747 else { 748 ShAmt = 0; 749 ShOpcVal = ARM_AM::no_shift; 750 } 751 } else { 752 ShOpcVal = ARM_AM::no_shift; 753 } 754 } 755 756 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal), 757 MVT::i32); 758 return true; 759} 760 761bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N, 762 SDValue &Offset, SDValue &Opc) { 763 unsigned Opcode = Op->getOpcode(); 764 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 765 ? cast<LoadSDNode>(Op)->getAddressingMode() 766 : cast<StoreSDNode>(Op)->getAddressingMode(); 767 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) 768 ? ARM_AM::add : ARM_AM::sub; 769 int Val; 770 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits. 771 if (AddSub == ARM_AM::sub) Val *= -1; 772 Offset = CurDAG->getRegister(0, MVT::i32); 773 Opc = CurDAG->getTargetConstant(Val, MVT::i32); 774 return true; 775 } 776 777 return false; 778} 779 780 781bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N, 782 SDValue &Offset, SDValue &Opc) { 783 unsigned Opcode = Op->getOpcode(); 784 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 785 ? cast<LoadSDNode>(Op)->getAddressingMode() 786 : cast<StoreSDNode>(Op)->getAddressingMode(); 787 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) 788 ? ARM_AM::add : ARM_AM::sub; 789 int Val; 790 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits. 791 Offset = CurDAG->getRegister(0, MVT::i32); 792 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val, 793 ARM_AM::no_shift), 794 MVT::i32); 795 return true; 796 } 797 798 return false; 799} 800 801bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) { 802 Base = N; 803 return true; 804} 805 806bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N, 807 SDValue &Base, SDValue &Offset, 808 SDValue &Opc) { 809 if (N.getOpcode() == ISD::SUB) { 810 // X - C is canonicalize to X + -C, no need to handle it here. 811 Base = N.getOperand(0); 812 Offset = N.getOperand(1); 813 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32); 814 return true; 815 } 816 817 if (!CurDAG->isBaseWithConstantOffset(N)) { 818 Base = N; 819 if (N.getOpcode() == ISD::FrameIndex) { 820 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 821 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 822 } 823 Offset = CurDAG->getRegister(0, MVT::i32); 824 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32); 825 return true; 826 } 827 828 // If the RHS is +/- imm8, fold into addr mode. 829 int RHSC; 830 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1, 831 -256 + 1, 256, RHSC)) { // 8 bits. 832 Base = N.getOperand(0); 833 if (Base.getOpcode() == ISD::FrameIndex) { 834 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 835 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 836 } 837 Offset = CurDAG->getRegister(0, MVT::i32); 838 839 ARM_AM::AddrOpc AddSub = ARM_AM::add; 840 if (RHSC < 0) { 841 AddSub = ARM_AM::sub; 842 RHSC = -RHSC; 843 } 844 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32); 845 return true; 846 } 847 848 Base = N.getOperand(0); 849 Offset = N.getOperand(1); 850 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32); 851 return true; 852} 853 854bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N, 855 SDValue &Offset, SDValue &Opc) { 856 unsigned Opcode = Op->getOpcode(); 857 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 858 ? cast<LoadSDNode>(Op)->getAddressingMode() 859 : cast<StoreSDNode>(Op)->getAddressingMode(); 860 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC) 861 ? ARM_AM::add : ARM_AM::sub; 862 int Val; 863 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits. 864 Offset = CurDAG->getRegister(0, MVT::i32); 865 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32); 866 return true; 867 } 868 869 Offset = N; 870 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32); 871 return true; 872} 873 874bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N, 875 SDValue &Base, SDValue &Offset) { 876 if (!CurDAG->isBaseWithConstantOffset(N)) { 877 Base = N; 878 if (N.getOpcode() == ISD::FrameIndex) { 879 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 880 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 881 } else if (N.getOpcode() == ARMISD::Wrapper && 882 !(Subtarget->useMovt() && 883 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 884 Base = N.getOperand(0); 885 } 886 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0), 887 MVT::i32); 888 return true; 889 } 890 891 // If the RHS is +/- imm8, fold into addr mode. 892 int RHSC; 893 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 894 -256 + 1, 256, RHSC)) { 895 Base = N.getOperand(0); 896 if (Base.getOpcode() == ISD::FrameIndex) { 897 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 898 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 899 } 900 901 ARM_AM::AddrOpc AddSub = ARM_AM::add; 902 if (RHSC < 0) { 903 AddSub = ARM_AM::sub; 904 RHSC = -RHSC; 905 } 906 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC), 907 MVT::i32); 908 return true; 909 } 910 911 Base = N; 912 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0), 913 MVT::i32); 914 return true; 915} 916 917bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr, 918 SDValue &Align) { 919 Addr = N; 920 921 unsigned Alignment = 0; 922 if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) { 923 // This case occurs only for VLD1-lane/dup and VST1-lane instructions. 924 // The maximum alignment is equal to the memory size being referenced. 925 unsigned LSNAlign = LSN->getAlignment(); 926 unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8; 927 if (LSNAlign > MemSize && MemSize > 1) 928 Alignment = MemSize; 929 } else { 930 // All other uses of addrmode6 are for intrinsics. For now just record 931 // the raw alignment value; it will be refined later based on the legal 932 // alignment operands for the intrinsic. 933 Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment(); 934 } 935 936 Align = CurDAG->getTargetConstant(Alignment, MVT::i32); 937 return true; 938} 939 940bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N, 941 SDValue &Offset) { 942 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op); 943 ISD::MemIndexedMode AM = LdSt->getAddressingMode(); 944 if (AM != ISD::POST_INC) 945 return false; 946 Offset = N; 947 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) { 948 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits()) 949 Offset = CurDAG->getRegister(0, MVT::i32); 950 } 951 return true; 952} 953 954bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N, 955 SDValue &Offset, SDValue &Label) { 956 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) { 957 Offset = N.getOperand(0); 958 SDValue N1 = N.getOperand(1); 959 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(), 960 MVT::i32); 961 return true; 962 } 963 964 return false; 965} 966 967 968//===----------------------------------------------------------------------===// 969// Thumb Addressing Modes 970//===----------------------------------------------------------------------===// 971 972bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N, 973 SDValue &Base, SDValue &Offset){ 974 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) { 975 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N); 976 if (!NC || !NC->isNullValue()) 977 return false; 978 979 Base = Offset = N; 980 return true; 981 } 982 983 Base = N.getOperand(0); 984 Offset = N.getOperand(1); 985 return true; 986} 987 988bool 989ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base, 990 SDValue &Offset, unsigned Scale) { 991 if (Scale == 4) { 992 SDValue TmpBase, TmpOffImm; 993 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm)) 994 return false; // We want to select tLDRspi / tSTRspi instead. 995 996 if (N.getOpcode() == ARMISD::Wrapper && 997 N.getOperand(0).getOpcode() == ISD::TargetConstantPool) 998 return false; // We want to select tLDRpci instead. 999 } 1000 1001 if (!CurDAG->isBaseWithConstantOffset(N)) 1002 return false; 1003 1004 // Thumb does not have [sp, r] address mode. 1005 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0)); 1006 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1)); 1007 if ((LHSR && LHSR->getReg() == ARM::SP) || 1008 (RHSR && RHSR->getReg() == ARM::SP)) 1009 return false; 1010 1011 // FIXME: Why do we explicitly check for a match here and then return false? 1012 // Presumably to allow something else to match, but shouldn't this be 1013 // documented? 1014 int RHSC; 1015 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) 1016 return false; 1017 1018 Base = N.getOperand(0); 1019 Offset = N.getOperand(1); 1020 return true; 1021} 1022 1023bool 1024ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N, 1025 SDValue &Base, 1026 SDValue &Offset) { 1027 return SelectThumbAddrModeRI(N, Base, Offset, 1); 1028} 1029 1030bool 1031ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N, 1032 SDValue &Base, 1033 SDValue &Offset) { 1034 return SelectThumbAddrModeRI(N, Base, Offset, 2); 1035} 1036 1037bool 1038ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N, 1039 SDValue &Base, 1040 SDValue &Offset) { 1041 return SelectThumbAddrModeRI(N, Base, Offset, 4); 1042} 1043 1044bool 1045ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, 1046 SDValue &Base, SDValue &OffImm) { 1047 if (Scale == 4) { 1048 SDValue TmpBase, TmpOffImm; 1049 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm)) 1050 return false; // We want to select tLDRspi / tSTRspi instead. 1051 1052 if (N.getOpcode() == ARMISD::Wrapper && 1053 N.getOperand(0).getOpcode() == ISD::TargetConstantPool) 1054 return false; // We want to select tLDRpci instead. 1055 } 1056 1057 if (!CurDAG->isBaseWithConstantOffset(N)) { 1058 if (N.getOpcode() == ARMISD::Wrapper && 1059 !(Subtarget->useMovt() && 1060 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 1061 Base = N.getOperand(0); 1062 } else { 1063 Base = N; 1064 } 1065 1066 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1067 return true; 1068 } 1069 1070 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0)); 1071 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1)); 1072 if ((LHSR && LHSR->getReg() == ARM::SP) || 1073 (RHSR && RHSR->getReg() == ARM::SP)) { 1074 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0)); 1075 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1)); 1076 unsigned LHSC = LHS ? LHS->getZExtValue() : 0; 1077 unsigned RHSC = RHS ? RHS->getZExtValue() : 0; 1078 1079 // Thumb does not have [sp, #imm5] address mode for non-zero imm5. 1080 if (LHSC != 0 || RHSC != 0) return false; 1081 1082 Base = N; 1083 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1084 return true; 1085 } 1086 1087 // If the RHS is + imm5 * scale, fold into addr mode. 1088 int RHSC; 1089 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) { 1090 Base = N.getOperand(0); 1091 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 1092 return true; 1093 } 1094 1095 Base = N.getOperand(0); 1096 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1097 return true; 1098} 1099 1100bool 1101ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base, 1102 SDValue &OffImm) { 1103 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm); 1104} 1105 1106bool 1107ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base, 1108 SDValue &OffImm) { 1109 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm); 1110} 1111 1112bool 1113ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base, 1114 SDValue &OffImm) { 1115 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm); 1116} 1117 1118bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N, 1119 SDValue &Base, SDValue &OffImm) { 1120 if (N.getOpcode() == ISD::FrameIndex) { 1121 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 1122 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 1123 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1124 return true; 1125 } 1126 1127 if (!CurDAG->isBaseWithConstantOffset(N)) 1128 return false; 1129 1130 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0)); 1131 if (N.getOperand(0).getOpcode() == ISD::FrameIndex || 1132 (LHSR && LHSR->getReg() == ARM::SP)) { 1133 // If the RHS is + imm8 * scale, fold into addr mode. 1134 int RHSC; 1135 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) { 1136 Base = N.getOperand(0); 1137 if (Base.getOpcode() == ISD::FrameIndex) { 1138 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 1139 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 1140 } 1141 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 1142 return true; 1143 } 1144 } 1145 1146 return false; 1147} 1148 1149 1150//===----------------------------------------------------------------------===// 1151// Thumb 2 Addressing Modes 1152//===----------------------------------------------------------------------===// 1153 1154 1155bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg, 1156 SDValue &Opc) { 1157 if (DisableShifterOp) 1158 return false; 1159 1160 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode()); 1161 1162 // Don't match base register only case. That is matched to a separate 1163 // lower complexity pattern with explicit register operand. 1164 if (ShOpcVal == ARM_AM::no_shift) return false; 1165 1166 BaseReg = N.getOperand(0); 1167 unsigned ShImmVal = 0; 1168 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 1169 ShImmVal = RHS->getZExtValue() & 31; 1170 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal)); 1171 return true; 1172 } 1173 1174 return false; 1175} 1176 1177bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N, 1178 SDValue &Base, SDValue &OffImm) { 1179 // Match simple R + imm12 operands. 1180 1181 // Base only. 1182 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB && 1183 !CurDAG->isBaseWithConstantOffset(N)) { 1184 if (N.getOpcode() == ISD::FrameIndex) { 1185 // Match frame index. 1186 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 1187 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 1188 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1189 return true; 1190 } 1191 1192 if (N.getOpcode() == ARMISD::Wrapper && 1193 !(Subtarget->useMovt() && 1194 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) { 1195 Base = N.getOperand(0); 1196 if (Base.getOpcode() == ISD::TargetConstantPool) 1197 return false; // We want to select t2LDRpci instead. 1198 } else 1199 Base = N; 1200 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1201 return true; 1202 } 1203 1204 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 1205 if (SelectT2AddrModeImm8(N, Base, OffImm)) 1206 // Let t2LDRi8 handle (R - imm8). 1207 return false; 1208 1209 int RHSC = (int)RHS->getZExtValue(); 1210 if (N.getOpcode() == ISD::SUB) 1211 RHSC = -RHSC; 1212 1213 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned) 1214 Base = N.getOperand(0); 1215 if (Base.getOpcode() == ISD::FrameIndex) { 1216 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 1217 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 1218 } 1219 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 1220 return true; 1221 } 1222 } 1223 1224 // Base only. 1225 Base = N; 1226 OffImm = CurDAG->getTargetConstant(0, MVT::i32); 1227 return true; 1228} 1229 1230bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N, 1231 SDValue &Base, SDValue &OffImm) { 1232 // Match simple R - imm8 operands. 1233 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB && 1234 !CurDAG->isBaseWithConstantOffset(N)) 1235 return false; 1236 1237 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 1238 int RHSC = (int)RHS->getSExtValue(); 1239 if (N.getOpcode() == ISD::SUB) 1240 RHSC = -RHSC; 1241 1242 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative) 1243 Base = N.getOperand(0); 1244 if (Base.getOpcode() == ISD::FrameIndex) { 1245 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 1246 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 1247 } 1248 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32); 1249 return true; 1250 } 1251 } 1252 1253 return false; 1254} 1255 1256bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N, 1257 SDValue &OffImm){ 1258 unsigned Opcode = Op->getOpcode(); 1259 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) 1260 ? cast<LoadSDNode>(Op)->getAddressingMode() 1261 : cast<StoreSDNode>(Op)->getAddressingMode(); 1262 int RHSC; 1263 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits. 1264 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC)) 1265 ? CurDAG->getTargetConstant(RHSC, MVT::i32) 1266 : CurDAG->getTargetConstant(-RHSC, MVT::i32); 1267 return true; 1268 } 1269 1270 return false; 1271} 1272 1273bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N, 1274 SDValue &Base, 1275 SDValue &OffReg, SDValue &ShImm) { 1276 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12. 1277 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) 1278 return false; 1279 1280 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8. 1281 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 1282 int RHSC = (int)RHS->getZExtValue(); 1283 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned) 1284 return false; 1285 else if (RHSC < 0 && RHSC >= -255) // 8 bits 1286 return false; 1287 } 1288 1289 if (Subtarget->isCortexA9() && !N.hasOneUse()) { 1290 // Compute R + (R << [1,2,3]) and reuse it. 1291 return false; 1292 } 1293 1294 // Look for (R + R) or (R + (R << [1,2,3])). 1295 unsigned ShAmt = 0; 1296 Base = N.getOperand(0); 1297 OffReg = N.getOperand(1); 1298 1299 // Swap if it is ((R << c) + R). 1300 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode()); 1301 if (ShOpcVal != ARM_AM::lsl) { 1302 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode()); 1303 if (ShOpcVal == ARM_AM::lsl) 1304 std::swap(Base, OffReg); 1305 } 1306 1307 if (ShOpcVal == ARM_AM::lsl) { 1308 // Check to see if the RHS of the shift is a constant, if not, we can't fold 1309 // it. 1310 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) { 1311 ShAmt = Sh->getZExtValue(); 1312 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt)) 1313 OffReg = OffReg.getOperand(0); 1314 else { 1315 ShAmt = 0; 1316 ShOpcVal = ARM_AM::no_shift; 1317 } 1318 } else { 1319 ShOpcVal = ARM_AM::no_shift; 1320 } 1321 } 1322 1323 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32); 1324 1325 return true; 1326} 1327 1328//===--------------------------------------------------------------------===// 1329 1330/// getAL - Returns a ARMCC::AL immediate node. 1331static inline SDValue getAL(SelectionDAG *CurDAG) { 1332 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32); 1333} 1334 1335SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) { 1336 LoadSDNode *LD = cast<LoadSDNode>(N); 1337 ISD::MemIndexedMode AM = LD->getAddressingMode(); 1338 if (AM == ISD::UNINDEXED) 1339 return NULL; 1340 1341 EVT LoadedVT = LD->getMemoryVT(); 1342 SDValue Offset, AMOpc; 1343 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); 1344 unsigned Opcode = 0; 1345 bool Match = false; 1346 if (LoadedVT == MVT::i32 && isPre && 1347 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) { 1348 Opcode = ARM::LDR_PRE_IMM; 1349 Match = true; 1350 } else if (LoadedVT == MVT::i32 && !isPre && 1351 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) { 1352 Opcode = ARM::LDR_POST_IMM; 1353 Match = true; 1354 } else if (LoadedVT == MVT::i32 && 1355 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) { 1356 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG; 1357 Match = true; 1358 1359 } else if (LoadedVT == MVT::i16 && 1360 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) { 1361 Match = true; 1362 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD) 1363 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST) 1364 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST); 1365 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) { 1366 if (LD->getExtensionType() == ISD::SEXTLOAD) { 1367 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) { 1368 Match = true; 1369 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST; 1370 } 1371 } else { 1372 if (isPre && 1373 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) { 1374 Match = true; 1375 Opcode = ARM::LDRB_PRE_IMM; 1376 } else if (!isPre && 1377 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) { 1378 Match = true; 1379 Opcode = ARM::LDRB_POST_IMM; 1380 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) { 1381 Match = true; 1382 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG; 1383 } 1384 } 1385 } 1386 1387 if (Match) { 1388 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) { 1389 SDValue Chain = LD->getChain(); 1390 SDValue Base = LD->getBasePtr(); 1391 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG), 1392 CurDAG->getRegister(0, MVT::i32), Chain }; 1393 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, 1394 MVT::i32, MVT::Other, Ops, 5); 1395 } else { 1396 SDValue Chain = LD->getChain(); 1397 SDValue Base = LD->getBasePtr(); 1398 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG), 1399 CurDAG->getRegister(0, MVT::i32), Chain }; 1400 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, 1401 MVT::i32, MVT::Other, Ops, 6); 1402 } 1403 } 1404 1405 return NULL; 1406} 1407 1408SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) { 1409 LoadSDNode *LD = cast<LoadSDNode>(N); 1410 ISD::MemIndexedMode AM = LD->getAddressingMode(); 1411 if (AM == ISD::UNINDEXED) 1412 return NULL; 1413 1414 EVT LoadedVT = LD->getMemoryVT(); 1415 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD; 1416 SDValue Offset; 1417 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); 1418 unsigned Opcode = 0; 1419 bool Match = false; 1420 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) { 1421 switch (LoadedVT.getSimpleVT().SimpleTy) { 1422 case MVT::i32: 1423 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST; 1424 break; 1425 case MVT::i16: 1426 if (isSExtLd) 1427 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST; 1428 else 1429 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST; 1430 break; 1431 case MVT::i8: 1432 case MVT::i1: 1433 if (isSExtLd) 1434 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST; 1435 else 1436 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST; 1437 break; 1438 default: 1439 return NULL; 1440 } 1441 Match = true; 1442 } 1443 1444 if (Match) { 1445 SDValue Chain = LD->getChain(); 1446 SDValue Base = LD->getBasePtr(); 1447 SDValue Ops[]= { Base, Offset, getAL(CurDAG), 1448 CurDAG->getRegister(0, MVT::i32), Chain }; 1449 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32, 1450 MVT::Other, Ops, 5); 1451 } 1452 1453 return NULL; 1454} 1455 1456/// PairSRegs - Form a D register from a pair of S registers. 1457/// 1458SDNode *ARMDAGToDAGISel::PairSRegs(EVT VT, SDValue V0, SDValue V1) { 1459 DebugLoc dl = V0.getNode()->getDebugLoc(); 1460 SDValue RegClass = 1461 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32); 1462 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32); 1463 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32); 1464 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; 1465 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5); 1466} 1467 1468/// PairDRegs - Form a quad register from a pair of D registers. 1469/// 1470SDNode *ARMDAGToDAGISel::PairDRegs(EVT VT, SDValue V0, SDValue V1) { 1471 DebugLoc dl = V0.getNode()->getDebugLoc(); 1472 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32); 1473 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32); 1474 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32); 1475 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; 1476 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5); 1477} 1478 1479/// PairQRegs - Form 4 consecutive D registers from a pair of Q registers. 1480/// 1481SDNode *ARMDAGToDAGISel::PairQRegs(EVT VT, SDValue V0, SDValue V1) { 1482 DebugLoc dl = V0.getNode()->getDebugLoc(); 1483 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32); 1484 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32); 1485 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32); 1486 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; 1487 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5); 1488} 1489 1490/// QuadSRegs - Form 4 consecutive S registers. 1491/// 1492SDNode *ARMDAGToDAGISel::QuadSRegs(EVT VT, SDValue V0, SDValue V1, 1493 SDValue V2, SDValue V3) { 1494 DebugLoc dl = V0.getNode()->getDebugLoc(); 1495 SDValue RegClass = 1496 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32); 1497 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32); 1498 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32); 1499 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32); 1500 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32); 1501 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, 1502 V2, SubReg2, V3, SubReg3 }; 1503 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9); 1504} 1505 1506/// QuadDRegs - Form 4 consecutive D registers. 1507/// 1508SDNode *ARMDAGToDAGISel::QuadDRegs(EVT VT, SDValue V0, SDValue V1, 1509 SDValue V2, SDValue V3) { 1510 DebugLoc dl = V0.getNode()->getDebugLoc(); 1511 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32); 1512 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32); 1513 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32); 1514 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32); 1515 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32); 1516 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, 1517 V2, SubReg2, V3, SubReg3 }; 1518 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9); 1519} 1520 1521/// QuadQRegs - Form 4 consecutive Q registers. 1522/// 1523SDNode *ARMDAGToDAGISel::QuadQRegs(EVT VT, SDValue V0, SDValue V1, 1524 SDValue V2, SDValue V3) { 1525 DebugLoc dl = V0.getNode()->getDebugLoc(); 1526 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32); 1527 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32); 1528 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32); 1529 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32); 1530 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32); 1531 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, 1532 V2, SubReg2, V3, SubReg3 }; 1533 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9); 1534} 1535 1536/// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand 1537/// of a NEON VLD or VST instruction. The supported values depend on the 1538/// number of registers being loaded. 1539SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs, 1540 bool is64BitVector) { 1541 unsigned NumRegs = NumVecs; 1542 if (!is64BitVector && NumVecs < 3) 1543 NumRegs *= 2; 1544 1545 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue(); 1546 if (Alignment >= 32 && NumRegs == 4) 1547 Alignment = 32; 1548 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4)) 1549 Alignment = 16; 1550 else if (Alignment >= 8) 1551 Alignment = 8; 1552 else 1553 Alignment = 0; 1554 1555 return CurDAG->getTargetConstant(Alignment, MVT::i32); 1556} 1557 1558SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs, 1559 unsigned *DOpcodes, unsigned *QOpcodes0, 1560 unsigned *QOpcodes1) { 1561 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range"); 1562 DebugLoc dl = N->getDebugLoc(); 1563 1564 SDValue MemAddr, Align; 1565 unsigned AddrOpIdx = isUpdating ? 1 : 2; 1566 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align)) 1567 return NULL; 1568 1569 SDValue Chain = N->getOperand(0); 1570 EVT VT = N->getValueType(0); 1571 bool is64BitVector = VT.is64BitVector(); 1572 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector); 1573 1574 unsigned OpcodeIndex; 1575 switch (VT.getSimpleVT().SimpleTy) { 1576 default: llvm_unreachable("unhandled vld type"); 1577 // Double-register operations: 1578 case MVT::v8i8: OpcodeIndex = 0; break; 1579 case MVT::v4i16: OpcodeIndex = 1; break; 1580 case MVT::v2f32: 1581 case MVT::v2i32: OpcodeIndex = 2; break; 1582 case MVT::v1i64: OpcodeIndex = 3; break; 1583 // Quad-register operations: 1584 case MVT::v16i8: OpcodeIndex = 0; break; 1585 case MVT::v8i16: OpcodeIndex = 1; break; 1586 case MVT::v4f32: 1587 case MVT::v4i32: OpcodeIndex = 2; break; 1588 case MVT::v2i64: OpcodeIndex = 3; 1589 assert(NumVecs == 1 && "v2i64 type only supported for VLD1"); 1590 break; 1591 } 1592 1593 EVT ResTy; 1594 if (NumVecs == 1) 1595 ResTy = VT; 1596 else { 1597 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs; 1598 if (!is64BitVector) 1599 ResTyElts *= 2; 1600 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts); 1601 } 1602 std::vector<EVT> ResTys; 1603 ResTys.push_back(ResTy); 1604 if (isUpdating) 1605 ResTys.push_back(MVT::i32); 1606 ResTys.push_back(MVT::Other); 1607 1608 SDValue Pred = getAL(CurDAG); 1609 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1610 SDNode *VLd; 1611 SmallVector<SDValue, 7> Ops; 1612 1613 // Double registers and VLD1/VLD2 quad registers are directly supported. 1614 if (is64BitVector || NumVecs <= 2) { 1615 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] : 1616 QOpcodes0[OpcodeIndex]); 1617 Ops.push_back(MemAddr); 1618 Ops.push_back(Align); 1619 if (isUpdating) { 1620 SDValue Inc = N->getOperand(AddrOpIdx + 1); 1621 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc); 1622 } 1623 Ops.push_back(Pred); 1624 Ops.push_back(Reg0); 1625 Ops.push_back(Chain); 1626 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size()); 1627 1628 } else { 1629 // Otherwise, quad registers are loaded with two separate instructions, 1630 // where one loads the even registers and the other loads the odd registers. 1631 EVT AddrTy = MemAddr.getValueType(); 1632 1633 // Load the even subregs. This is always an updating load, so that it 1634 // provides the address to the second load for the odd subregs. 1635 SDValue ImplDef = 1636 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0); 1637 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain }; 1638 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, 1639 ResTy, AddrTy, MVT::Other, OpsA, 7); 1640 Chain = SDValue(VLdA, 2); 1641 1642 // Load the odd subregs. 1643 Ops.push_back(SDValue(VLdA, 1)); 1644 Ops.push_back(Align); 1645 if (isUpdating) { 1646 SDValue Inc = N->getOperand(AddrOpIdx + 1); 1647 assert(isa<ConstantSDNode>(Inc.getNode()) && 1648 "only constant post-increment update allowed for VLD3/4"); 1649 (void)Inc; 1650 Ops.push_back(Reg0); 1651 } 1652 Ops.push_back(SDValue(VLdA, 0)); 1653 Ops.push_back(Pred); 1654 Ops.push_back(Reg0); 1655 Ops.push_back(Chain); 1656 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, 1657 Ops.data(), Ops.size()); 1658 } 1659 1660 // Transfer memoperands. 1661 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1662 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 1663 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1); 1664 1665 if (NumVecs == 1) 1666 return VLd; 1667 1668 // Extract out the subregisters. 1669 SDValue SuperReg = SDValue(VLd, 0); 1670 assert(ARM::dsub_7 == ARM::dsub_0+7 && 1671 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering"); 1672 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0); 1673 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1674 ReplaceUses(SDValue(N, Vec), 1675 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg)); 1676 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1)); 1677 if (isUpdating) 1678 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2)); 1679 return NULL; 1680} 1681 1682SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs, 1683 unsigned *DOpcodes, unsigned *QOpcodes0, 1684 unsigned *QOpcodes1) { 1685 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range"); 1686 DebugLoc dl = N->getDebugLoc(); 1687 1688 SDValue MemAddr, Align; 1689 unsigned AddrOpIdx = isUpdating ? 1 : 2; 1690 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1) 1691 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align)) 1692 return NULL; 1693 1694 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1695 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 1696 1697 SDValue Chain = N->getOperand(0); 1698 EVT VT = N->getOperand(Vec0Idx).getValueType(); 1699 bool is64BitVector = VT.is64BitVector(); 1700 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector); 1701 1702 unsigned OpcodeIndex; 1703 switch (VT.getSimpleVT().SimpleTy) { 1704 default: llvm_unreachable("unhandled vst type"); 1705 // Double-register operations: 1706 case MVT::v8i8: OpcodeIndex = 0; break; 1707 case MVT::v4i16: OpcodeIndex = 1; break; 1708 case MVT::v2f32: 1709 case MVT::v2i32: OpcodeIndex = 2; break; 1710 case MVT::v1i64: OpcodeIndex = 3; break; 1711 // Quad-register operations: 1712 case MVT::v16i8: OpcodeIndex = 0; break; 1713 case MVT::v8i16: OpcodeIndex = 1; break; 1714 case MVT::v4f32: 1715 case MVT::v4i32: OpcodeIndex = 2; break; 1716 case MVT::v2i64: OpcodeIndex = 3; 1717 assert(NumVecs == 1 && "v2i64 type only supported for VST1"); 1718 break; 1719 } 1720 1721 std::vector<EVT> ResTys; 1722 if (isUpdating) 1723 ResTys.push_back(MVT::i32); 1724 ResTys.push_back(MVT::Other); 1725 1726 SDValue Pred = getAL(CurDAG); 1727 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1728 SmallVector<SDValue, 7> Ops; 1729 1730 // Double registers and VST1/VST2 quad registers are directly supported. 1731 if (is64BitVector || NumVecs <= 2) { 1732 SDValue SrcReg; 1733 if (NumVecs == 1) { 1734 SrcReg = N->getOperand(Vec0Idx); 1735 } else if (is64BitVector) { 1736 // Form a REG_SEQUENCE to force register allocation. 1737 SDValue V0 = N->getOperand(Vec0Idx + 0); 1738 SDValue V1 = N->getOperand(Vec0Idx + 1); 1739 if (NumVecs == 2) 1740 SrcReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0); 1741 else { 1742 SDValue V2 = N->getOperand(Vec0Idx + 2); 1743 // If it's a vst3, form a quad D-register and leave the last part as 1744 // an undef. 1745 SDValue V3 = (NumVecs == 3) 1746 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0) 1747 : N->getOperand(Vec0Idx + 3); 1748 SrcReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0); 1749 } 1750 } else { 1751 // Form a QQ register. 1752 SDValue Q0 = N->getOperand(Vec0Idx); 1753 SDValue Q1 = N->getOperand(Vec0Idx + 1); 1754 SrcReg = SDValue(PairQRegs(MVT::v4i64, Q0, Q1), 0); 1755 } 1756 1757 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] : 1758 QOpcodes0[OpcodeIndex]); 1759 Ops.push_back(MemAddr); 1760 Ops.push_back(Align); 1761 if (isUpdating) { 1762 SDValue Inc = N->getOperand(AddrOpIdx + 1); 1763 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc); 1764 } 1765 Ops.push_back(SrcReg); 1766 Ops.push_back(Pred); 1767 Ops.push_back(Reg0); 1768 Ops.push_back(Chain); 1769 SDNode *VSt = 1770 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size()); 1771 1772 // Transfer memoperands. 1773 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1); 1774 1775 return VSt; 1776 } 1777 1778 // Otherwise, quad registers are stored with two separate instructions, 1779 // where one stores the even registers and the other stores the odd registers. 1780 1781 // Form the QQQQ REG_SEQUENCE. 1782 SDValue V0 = N->getOperand(Vec0Idx + 0); 1783 SDValue V1 = N->getOperand(Vec0Idx + 1); 1784 SDValue V2 = N->getOperand(Vec0Idx + 2); 1785 SDValue V3 = (NumVecs == 3) 1786 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0) 1787 : N->getOperand(Vec0Idx + 3); 1788 SDValue RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0); 1789 1790 // Store the even D registers. This is always an updating store, so that it 1791 // provides the address to the second store for the odd subregs. 1792 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain }; 1793 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl, 1794 MemAddr.getValueType(), 1795 MVT::Other, OpsA, 7); 1796 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1); 1797 Chain = SDValue(VStA, 1); 1798 1799 // Store the odd D registers. 1800 Ops.push_back(SDValue(VStA, 0)); 1801 Ops.push_back(Align); 1802 if (isUpdating) { 1803 SDValue Inc = N->getOperand(AddrOpIdx + 1); 1804 assert(isa<ConstantSDNode>(Inc.getNode()) && 1805 "only constant post-increment update allowed for VST3/4"); 1806 (void)Inc; 1807 Ops.push_back(Reg0); 1808 } 1809 Ops.push_back(RegSeq); 1810 Ops.push_back(Pred); 1811 Ops.push_back(Reg0); 1812 Ops.push_back(Chain); 1813 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, 1814 Ops.data(), Ops.size()); 1815 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1); 1816 return VStB; 1817} 1818 1819SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad, 1820 bool isUpdating, unsigned NumVecs, 1821 unsigned *DOpcodes, 1822 unsigned *QOpcodes) { 1823 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range"); 1824 DebugLoc dl = N->getDebugLoc(); 1825 1826 SDValue MemAddr, Align; 1827 unsigned AddrOpIdx = isUpdating ? 1 : 2; 1828 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1) 1829 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align)) 1830 return NULL; 1831 1832 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1833 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 1834 1835 SDValue Chain = N->getOperand(0); 1836 unsigned Lane = 1837 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue(); 1838 EVT VT = N->getOperand(Vec0Idx).getValueType(); 1839 bool is64BitVector = VT.is64BitVector(); 1840 1841 unsigned Alignment = 0; 1842 if (NumVecs != 3) { 1843 Alignment = cast<ConstantSDNode>(Align)->getZExtValue(); 1844 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8; 1845 if (Alignment > NumBytes) 1846 Alignment = NumBytes; 1847 if (Alignment < 8 && Alignment < NumBytes) 1848 Alignment = 0; 1849 // Alignment must be a power of two; make sure of that. 1850 Alignment = (Alignment & -Alignment); 1851 if (Alignment == 1) 1852 Alignment = 0; 1853 } 1854 Align = CurDAG->getTargetConstant(Alignment, MVT::i32); 1855 1856 unsigned OpcodeIndex; 1857 switch (VT.getSimpleVT().SimpleTy) { 1858 default: llvm_unreachable("unhandled vld/vst lane type"); 1859 // Double-register operations: 1860 case MVT::v8i8: OpcodeIndex = 0; break; 1861 case MVT::v4i16: OpcodeIndex = 1; break; 1862 case MVT::v2f32: 1863 case MVT::v2i32: OpcodeIndex = 2; break; 1864 // Quad-register operations: 1865 case MVT::v8i16: OpcodeIndex = 0; break; 1866 case MVT::v4f32: 1867 case MVT::v4i32: OpcodeIndex = 1; break; 1868 } 1869 1870 std::vector<EVT> ResTys; 1871 if (IsLoad) { 1872 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs; 1873 if (!is64BitVector) 1874 ResTyElts *= 2; 1875 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), 1876 MVT::i64, ResTyElts)); 1877 } 1878 if (isUpdating) 1879 ResTys.push_back(MVT::i32); 1880 ResTys.push_back(MVT::Other); 1881 1882 SDValue Pred = getAL(CurDAG); 1883 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1884 1885 SmallVector<SDValue, 8> Ops; 1886 Ops.push_back(MemAddr); 1887 Ops.push_back(Align); 1888 if (isUpdating) { 1889 SDValue Inc = N->getOperand(AddrOpIdx + 1); 1890 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc); 1891 } 1892 1893 SDValue SuperReg; 1894 SDValue V0 = N->getOperand(Vec0Idx + 0); 1895 SDValue V1 = N->getOperand(Vec0Idx + 1); 1896 if (NumVecs == 2) { 1897 if (is64BitVector) 1898 SuperReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0); 1899 else 1900 SuperReg = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0); 1901 } else { 1902 SDValue V2 = N->getOperand(Vec0Idx + 2); 1903 SDValue V3 = (NumVecs == 3) 1904 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0) 1905 : N->getOperand(Vec0Idx + 3); 1906 if (is64BitVector) 1907 SuperReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0); 1908 else 1909 SuperReg = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0); 1910 } 1911 Ops.push_back(SuperReg); 1912 Ops.push_back(getI32Imm(Lane)); 1913 Ops.push_back(Pred); 1914 Ops.push_back(Reg0); 1915 Ops.push_back(Chain); 1916 1917 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] : 1918 QOpcodes[OpcodeIndex]); 1919 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, 1920 Ops.data(), Ops.size()); 1921 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1); 1922 if (!IsLoad) 1923 return VLdLn; 1924 1925 // Extract the subregisters. 1926 SuperReg = SDValue(VLdLn, 0); 1927 assert(ARM::dsub_7 == ARM::dsub_0+7 && 1928 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering"); 1929 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0; 1930 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 1931 ReplaceUses(SDValue(N, Vec), 1932 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg)); 1933 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1)); 1934 if (isUpdating) 1935 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2)); 1936 return NULL; 1937} 1938 1939SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating, 1940 unsigned NumVecs, unsigned *Opcodes) { 1941 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range"); 1942 DebugLoc dl = N->getDebugLoc(); 1943 1944 SDValue MemAddr, Align; 1945 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align)) 1946 return NULL; 1947 1948 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1949 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 1950 1951 SDValue Chain = N->getOperand(0); 1952 EVT VT = N->getValueType(0); 1953 1954 unsigned Alignment = 0; 1955 if (NumVecs != 3) { 1956 Alignment = cast<ConstantSDNode>(Align)->getZExtValue(); 1957 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8; 1958 if (Alignment > NumBytes) 1959 Alignment = NumBytes; 1960 if (Alignment < 8 && Alignment < NumBytes) 1961 Alignment = 0; 1962 // Alignment must be a power of two; make sure of that. 1963 Alignment = (Alignment & -Alignment); 1964 if (Alignment == 1) 1965 Alignment = 0; 1966 } 1967 Align = CurDAG->getTargetConstant(Alignment, MVT::i32); 1968 1969 unsigned OpcodeIndex; 1970 switch (VT.getSimpleVT().SimpleTy) { 1971 default: llvm_unreachable("unhandled vld-dup type"); 1972 case MVT::v8i8: OpcodeIndex = 0; break; 1973 case MVT::v4i16: OpcodeIndex = 1; break; 1974 case MVT::v2f32: 1975 case MVT::v2i32: OpcodeIndex = 2; break; 1976 } 1977 1978 SDValue Pred = getAL(CurDAG); 1979 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 1980 SDValue SuperReg; 1981 unsigned Opc = Opcodes[OpcodeIndex]; 1982 SmallVector<SDValue, 6> Ops; 1983 Ops.push_back(MemAddr); 1984 Ops.push_back(Align); 1985 if (isUpdating) { 1986 SDValue Inc = N->getOperand(2); 1987 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc); 1988 } 1989 Ops.push_back(Pred); 1990 Ops.push_back(Reg0); 1991 Ops.push_back(Chain); 1992 1993 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs; 1994 std::vector<EVT> ResTys; 1995 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts)); 1996 if (isUpdating) 1997 ResTys.push_back(MVT::i32); 1998 ResTys.push_back(MVT::Other); 1999 SDNode *VLdDup = 2000 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size()); 2001 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1); 2002 SuperReg = SDValue(VLdDup, 0); 2003 2004 // Extract the subregisters. 2005 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering"); 2006 unsigned SubIdx = ARM::dsub_0; 2007 for (unsigned Vec = 0; Vec < NumVecs; ++Vec) 2008 ReplaceUses(SDValue(N, Vec), 2009 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg)); 2010 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1)); 2011 if (isUpdating) 2012 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2)); 2013 return NULL; 2014} 2015 2016SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, 2017 unsigned Opc) { 2018 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range"); 2019 DebugLoc dl = N->getDebugLoc(); 2020 EVT VT = N->getValueType(0); 2021 unsigned FirstTblReg = IsExt ? 2 : 1; 2022 2023 // Form a REG_SEQUENCE to force register allocation. 2024 SDValue RegSeq; 2025 SDValue V0 = N->getOperand(FirstTblReg + 0); 2026 SDValue V1 = N->getOperand(FirstTblReg + 1); 2027 if (NumVecs == 2) 2028 RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0); 2029 else { 2030 SDValue V2 = N->getOperand(FirstTblReg + 2); 2031 // If it's a vtbl3, form a quad D-register and leave the last part as 2032 // an undef. 2033 SDValue V3 = (NumVecs == 3) 2034 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0) 2035 : N->getOperand(FirstTblReg + 3); 2036 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0); 2037 } 2038 2039 SmallVector<SDValue, 6> Ops; 2040 if (IsExt) 2041 Ops.push_back(N->getOperand(1)); 2042 Ops.push_back(RegSeq); 2043 Ops.push_back(N->getOperand(FirstTblReg + NumVecs)); 2044 Ops.push_back(getAL(CurDAG)); // predicate 2045 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register 2046 return CurDAG->getMachineNode(Opc, dl, VT, Ops.data(), Ops.size()); 2047} 2048 2049SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N, 2050 bool isSigned) { 2051 if (!Subtarget->hasV6T2Ops()) 2052 return NULL; 2053 2054 unsigned Opc = isSigned ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX) 2055 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX); 2056 2057 2058 // For unsigned extracts, check for a shift right and mask 2059 unsigned And_imm = 0; 2060 if (N->getOpcode() == ISD::AND) { 2061 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) { 2062 2063 // The immediate is a mask of the low bits iff imm & (imm+1) == 0 2064 if (And_imm & (And_imm + 1)) 2065 return NULL; 2066 2067 unsigned Srl_imm = 0; 2068 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, 2069 Srl_imm)) { 2070 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!"); 2071 2072 // Note: The width operand is encoded as width-1. 2073 unsigned Width = CountTrailingOnes_32(And_imm) - 1; 2074 unsigned LSB = Srl_imm; 2075 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 2076 SDValue Ops[] = { N->getOperand(0).getOperand(0), 2077 CurDAG->getTargetConstant(LSB, MVT::i32), 2078 CurDAG->getTargetConstant(Width, MVT::i32), 2079 getAL(CurDAG), Reg0 }; 2080 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 2081 } 2082 } 2083 return NULL; 2084 } 2085 2086 // Otherwise, we're looking for a shift of a shift 2087 unsigned Shl_imm = 0; 2088 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) { 2089 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!"); 2090 unsigned Srl_imm = 0; 2091 if (isInt32Immediate(N->getOperand(1), Srl_imm)) { 2092 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!"); 2093 // Note: The width operand is encoded as width-1. 2094 unsigned Width = 32 - Srl_imm - 1; 2095 int LSB = Srl_imm - Shl_imm; 2096 if (LSB < 0) 2097 return NULL; 2098 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 2099 SDValue Ops[] = { N->getOperand(0).getOperand(0), 2100 CurDAG->getTargetConstant(LSB, MVT::i32), 2101 CurDAG->getTargetConstant(Width, MVT::i32), 2102 getAL(CurDAG), Reg0 }; 2103 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 2104 } 2105 } 2106 return NULL; 2107} 2108 2109SDNode *ARMDAGToDAGISel:: 2110SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 2111 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) { 2112 SDValue CPTmp0; 2113 SDValue CPTmp1; 2114 if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) { 2115 unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue(); 2116 unsigned SOShOp = ARM_AM::getSORegShOp(SOVal); 2117 unsigned Opc = 0; 2118 switch (SOShOp) { 2119 case ARM_AM::lsl: Opc = ARM::t2MOVCClsl; break; 2120 case ARM_AM::lsr: Opc = ARM::t2MOVCClsr; break; 2121 case ARM_AM::asr: Opc = ARM::t2MOVCCasr; break; 2122 case ARM_AM::ror: Opc = ARM::t2MOVCCror; break; 2123 default: 2124 llvm_unreachable("Unknown so_reg opcode!"); 2125 break; 2126 } 2127 SDValue SOShImm = 2128 CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32); 2129 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 2130 SDValue Ops[] = { FalseVal, CPTmp0, SOShImm, CC, CCR, InFlag }; 2131 return CurDAG->SelectNodeTo(N, Opc, MVT::i32,Ops, 6); 2132 } 2133 return 0; 2134} 2135 2136SDNode *ARMDAGToDAGISel:: 2137SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 2138 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) { 2139 SDValue CPTmp0; 2140 SDValue CPTmp1; 2141 SDValue CPTmp2; 2142 if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) { 2143 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 2144 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp2, CC, CCR, InFlag }; 2145 return CurDAG->SelectNodeTo(N, ARM::MOVCCsi, MVT::i32, Ops, 6); 2146 } 2147 2148 if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) { 2149 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 2150 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag }; 2151 return CurDAG->SelectNodeTo(N, ARM::MOVCCsr, MVT::i32, Ops, 7); 2152 } 2153 return 0; 2154} 2155 2156SDNode *ARMDAGToDAGISel:: 2157SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 2158 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) { 2159 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal); 2160 if (!T) 2161 return 0; 2162 2163 unsigned Opc = 0; 2164 unsigned TrueImm = T->getZExtValue(); 2165 if (is_t2_so_imm(TrueImm)) { 2166 Opc = ARM::t2MOVCCi; 2167 } else if (TrueImm <= 0xffff) { 2168 Opc = ARM::t2MOVCCi16; 2169 } else if (is_t2_so_imm_not(TrueImm)) { 2170 TrueImm = ~TrueImm; 2171 Opc = ARM::t2MVNCCi; 2172 } else if (TrueVal.getNode()->hasOneUse() && Subtarget->hasV6T2Ops()) { 2173 // Large immediate. 2174 Opc = ARM::t2MOVCCi32imm; 2175 } 2176 2177 if (Opc) { 2178 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32); 2179 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 2180 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag }; 2181 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 2182 } 2183 2184 return 0; 2185} 2186 2187SDNode *ARMDAGToDAGISel:: 2188SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, 2189 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) { 2190 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal); 2191 if (!T) 2192 return 0; 2193 2194 unsigned Opc = 0; 2195 unsigned TrueImm = T->getZExtValue(); 2196 bool isSoImm = is_so_imm(TrueImm); 2197 if (isSoImm) { 2198 Opc = ARM::MOVCCi; 2199 } else if (Subtarget->hasV6T2Ops() && TrueImm <= 0xffff) { 2200 Opc = ARM::MOVCCi16; 2201 } else if (is_so_imm_not(TrueImm)) { 2202 TrueImm = ~TrueImm; 2203 Opc = ARM::MVNCCi; 2204 } else if (TrueVal.getNode()->hasOneUse() && 2205 (Subtarget->hasV6T2Ops() || ARM_AM::isSOImmTwoPartVal(TrueImm))) { 2206 // Large immediate. 2207 Opc = ARM::MOVCCi32imm; 2208 } 2209 2210 if (Opc) { 2211 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32); 2212 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); 2213 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag }; 2214 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 2215 } 2216 2217 return 0; 2218} 2219 2220SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) { 2221 EVT VT = N->getValueType(0); 2222 SDValue FalseVal = N->getOperand(0); 2223 SDValue TrueVal = N->getOperand(1); 2224 SDValue CC = N->getOperand(2); 2225 SDValue CCR = N->getOperand(3); 2226 SDValue InFlag = N->getOperand(4); 2227 assert(CC.getOpcode() == ISD::Constant); 2228 assert(CCR.getOpcode() == ISD::Register); 2229 ARMCC::CondCodes CCVal = 2230 (ARMCC::CondCodes)cast<ConstantSDNode>(CC)->getZExtValue(); 2231 2232 if (!Subtarget->isThumb1Only() && VT == MVT::i32) { 2233 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc) 2234 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc) 2235 // Pattern complexity = 18 cost = 1 size = 0 2236 SDValue CPTmp0; 2237 SDValue CPTmp1; 2238 SDValue CPTmp2; 2239 if (Subtarget->isThumb()) { 2240 SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal, 2241 CCVal, CCR, InFlag); 2242 if (!Res) 2243 Res = SelectT2CMOVShiftOp(N, TrueVal, FalseVal, 2244 ARMCC::getOppositeCondition(CCVal), CCR, InFlag); 2245 if (Res) 2246 return Res; 2247 } else { 2248 SDNode *Res = SelectARMCMOVShiftOp(N, FalseVal, TrueVal, 2249 CCVal, CCR, InFlag); 2250 if (!Res) 2251 Res = SelectARMCMOVShiftOp(N, TrueVal, FalseVal, 2252 ARMCC::getOppositeCondition(CCVal), CCR, InFlag); 2253 if (Res) 2254 return Res; 2255 } 2256 2257 // Pattern: (ARMcmov:i32 GPR:i32:$false, 2258 // (imm:i32)<<P:Pred_so_imm>>:$true, 2259 // (imm:i32):$cc) 2260 // Emits: (MOVCCi:i32 GPR:i32:$false, 2261 // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc) 2262 // Pattern complexity = 10 cost = 1 size = 0 2263 if (Subtarget->isThumb()) { 2264 SDNode *Res = SelectT2CMOVImmOp(N, FalseVal, TrueVal, 2265 CCVal, CCR, InFlag); 2266 if (!Res) 2267 Res = SelectT2CMOVImmOp(N, TrueVal, FalseVal, 2268 ARMCC::getOppositeCondition(CCVal), CCR, InFlag); 2269 if (Res) 2270 return Res; 2271 } else { 2272 SDNode *Res = SelectARMCMOVImmOp(N, FalseVal, TrueVal, 2273 CCVal, CCR, InFlag); 2274 if (!Res) 2275 Res = SelectARMCMOVImmOp(N, TrueVal, FalseVal, 2276 ARMCC::getOppositeCondition(CCVal), CCR, InFlag); 2277 if (Res) 2278 return Res; 2279 } 2280 } 2281 2282 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc) 2283 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc) 2284 // Pattern complexity = 6 cost = 1 size = 0 2285 // 2286 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc) 2287 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc) 2288 // Pattern complexity = 6 cost = 11 size = 0 2289 // 2290 // Also VMOVScc and VMOVDcc. 2291 SDValue Tmp2 = CurDAG->getTargetConstant(CCVal, MVT::i32); 2292 SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag }; 2293 unsigned Opc = 0; 2294 switch (VT.getSimpleVT().SimpleTy) { 2295 default: assert(false && "Illegal conditional move type!"); 2296 break; 2297 case MVT::i32: 2298 Opc = Subtarget->isThumb() 2299 ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo) 2300 : ARM::MOVCCr; 2301 break; 2302 case MVT::f32: 2303 Opc = ARM::VMOVScc; 2304 break; 2305 case MVT::f64: 2306 Opc = ARM::VMOVDcc; 2307 break; 2308 } 2309 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5); 2310} 2311 2312SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) { 2313 // The only time a CONCAT_VECTORS operation can have legal types is when 2314 // two 64-bit vectors are concatenated to a 128-bit vector. 2315 EVT VT = N->getValueType(0); 2316 if (!VT.is128BitVector() || N->getNumOperands() != 2) 2317 llvm_unreachable("unexpected CONCAT_VECTORS"); 2318 return PairDRegs(VT, N->getOperand(0), N->getOperand(1)); 2319} 2320 2321SDNode *ARMDAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { 2322 SmallVector<SDValue, 6> Ops; 2323 Ops.push_back(Node->getOperand(1)); // Ptr 2324 Ops.push_back(Node->getOperand(2)); // Low part of Val1 2325 Ops.push_back(Node->getOperand(3)); // High part of Val1 2326 if (Opc == ARM::ATOMCMPXCHG6432) { 2327 Ops.push_back(Node->getOperand(4)); // Low part of Val2 2328 Ops.push_back(Node->getOperand(5)); // High part of Val2 2329 } 2330 Ops.push_back(Node->getOperand(0)); // Chain 2331 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 2332 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 2333 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), 2334 MVT::i32, MVT::i32, MVT::Other, 2335 Ops.data() ,Ops.size()); 2336 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); 2337 return ResNode; 2338} 2339 2340SDNode *ARMDAGToDAGISel::Select(SDNode *N) { 2341 DebugLoc dl = N->getDebugLoc(); 2342 2343 if (N->isMachineOpcode()) 2344 return NULL; // Already selected. 2345 2346 switch (N->getOpcode()) { 2347 default: break; 2348 case ISD::Constant: { 2349 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue(); 2350 bool UseCP = true; 2351 if (Subtarget->hasThumb2()) 2352 // Thumb2-aware targets have the MOVT instruction, so all immediates can 2353 // be done with MOV + MOVT, at worst. 2354 UseCP = 0; 2355 else { 2356 if (Subtarget->isThumb()) { 2357 UseCP = (Val > 255 && // MOV 2358 ~Val > 255 && // MOV + MVN 2359 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL 2360 } else 2361 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV 2362 ARM_AM::getSOImmVal(~Val) == -1 && // MVN 2363 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs. 2364 } 2365 2366 if (UseCP) { 2367 SDValue CPIdx = 2368 CurDAG->getTargetConstantPool(ConstantInt::get( 2369 Type::getInt32Ty(*CurDAG->getContext()), Val), 2370 TLI.getPointerTy()); 2371 2372 SDNode *ResNode; 2373 if (Subtarget->isThumb1Only()) { 2374 SDValue Pred = getAL(CurDAG); 2375 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2376 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() }; 2377 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other, 2378 Ops, 4); 2379 } else { 2380 SDValue Ops[] = { 2381 CPIdx, 2382 CurDAG->getTargetConstant(0, MVT::i32), 2383 getAL(CurDAG), 2384 CurDAG->getRegister(0, MVT::i32), 2385 CurDAG->getEntryNode() 2386 }; 2387 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other, 2388 Ops, 5); 2389 } 2390 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0)); 2391 return NULL; 2392 } 2393 2394 // Other cases are autogenerated. 2395 break; 2396 } 2397 case ISD::FrameIndex: { 2398 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm. 2399 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 2400 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); 2401 if (Subtarget->isThumb1Only()) { 2402 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32), 2403 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; 2404 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops, 4); 2405 } else { 2406 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ? 2407 ARM::t2ADDri : ARM::ADDri); 2408 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32), 2409 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 2410 CurDAG->getRegister(0, MVT::i32) }; 2411 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5); 2412 } 2413 } 2414 case ISD::SRL: 2415 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false)) 2416 return I; 2417 break; 2418 case ISD::SRA: 2419 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true)) 2420 return I; 2421 break; 2422 case ISD::MUL: 2423 if (Subtarget->isThumb1Only()) 2424 break; 2425 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 2426 unsigned RHSV = C->getZExtValue(); 2427 if (!RHSV) break; 2428 if (isPowerOf2_32(RHSV-1)) { // 2^n+1? 2429 unsigned ShImm = Log2_32(RHSV-1); 2430 if (ShImm >= 32) 2431 break; 2432 SDValue V = N->getOperand(0); 2433 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm); 2434 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32); 2435 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 2436 if (Subtarget->isThumb()) { 2437 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 2438 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6); 2439 } else { 2440 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 2441 return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops, 7); 2442 } 2443 } 2444 if (isPowerOf2_32(RHSV+1)) { // 2^n-1? 2445 unsigned ShImm = Log2_32(RHSV+1); 2446 if (ShImm >= 32) 2447 break; 2448 SDValue V = N->getOperand(0); 2449 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm); 2450 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32); 2451 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); 2452 if (Subtarget->isThumb()) { 2453 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 2454 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6); 2455 } else { 2456 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 }; 2457 return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops, 7); 2458 } 2459 } 2460 } 2461 break; 2462 case ISD::AND: { 2463 // Check for unsigned bitfield extract 2464 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false)) 2465 return I; 2466 2467 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits 2468 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits 2469 // are entirely contributed by c2 and lower 16-bits are entirely contributed 2470 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)). 2471 // Select it to: "movt x, ((c1 & 0xffff) >> 16) 2472 EVT VT = N->getValueType(0); 2473 if (VT != MVT::i32) 2474 break; 2475 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2()) 2476 ? ARM::t2MOVTi16 2477 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0); 2478 if (!Opc) 2479 break; 2480 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 2481 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2482 if (!N1C) 2483 break; 2484 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) { 2485 SDValue N2 = N0.getOperand(1); 2486 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 2487 if (!N2C) 2488 break; 2489 unsigned N1CVal = N1C->getZExtValue(); 2490 unsigned N2CVal = N2C->getZExtValue(); 2491 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) && 2492 (N1CVal & 0xffffU) == 0xffffU && 2493 (N2CVal & 0xffffU) == 0x0U) { 2494 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16, 2495 MVT::i32); 2496 SDValue Ops[] = { N0.getOperand(0), Imm16, 2497 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; 2498 return CurDAG->getMachineNode(Opc, dl, VT, Ops, 4); 2499 } 2500 } 2501 break; 2502 } 2503 case ARMISD::VMOVRRD: 2504 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32, 2505 N->getOperand(0), getAL(CurDAG), 2506 CurDAG->getRegister(0, MVT::i32)); 2507 case ISD::UMUL_LOHI: { 2508 if (Subtarget->isThumb1Only()) 2509 break; 2510 if (Subtarget->isThumb()) { 2511 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 2512 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 2513 CurDAG->getRegister(0, MVT::i32) }; 2514 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32,Ops,4); 2515 } else { 2516 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 2517 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 2518 CurDAG->getRegister(0, MVT::i32) }; 2519 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ? 2520 ARM::UMULL : ARM::UMULLv5, 2521 dl, MVT::i32, MVT::i32, Ops, 5); 2522 } 2523 } 2524 case ISD::SMUL_LOHI: { 2525 if (Subtarget->isThumb1Only()) 2526 break; 2527 if (Subtarget->isThumb()) { 2528 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 2529 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) }; 2530 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32,Ops,4); 2531 } else { 2532 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), 2533 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), 2534 CurDAG->getRegister(0, MVT::i32) }; 2535 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ? 2536 ARM::SMULL : ARM::SMULLv5, 2537 dl, MVT::i32, MVT::i32, Ops, 5); 2538 } 2539 } 2540 case ISD::LOAD: { 2541 SDNode *ResNode = 0; 2542 if (Subtarget->isThumb() && Subtarget->hasThumb2()) 2543 ResNode = SelectT2IndexedLoad(N); 2544 else 2545 ResNode = SelectARMIndexedLoad(N); 2546 if (ResNode) 2547 return ResNode; 2548 // Other cases are autogenerated. 2549 break; 2550 } 2551 case ARMISD::BRCOND: { 2552 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc) 2553 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc) 2554 // Pattern complexity = 6 cost = 1 size = 0 2555 2556 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc) 2557 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc) 2558 // Pattern complexity = 6 cost = 1 size = 0 2559 2560 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc) 2561 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc) 2562 // Pattern complexity = 6 cost = 1 size = 0 2563 2564 unsigned Opc = Subtarget->isThumb() ? 2565 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc; 2566 SDValue Chain = N->getOperand(0); 2567 SDValue N1 = N->getOperand(1); 2568 SDValue N2 = N->getOperand(2); 2569 SDValue N3 = N->getOperand(3); 2570 SDValue InFlag = N->getOperand(4); 2571 assert(N1.getOpcode() == ISD::BasicBlock); 2572 assert(N2.getOpcode() == ISD::Constant); 2573 assert(N3.getOpcode() == ISD::Register); 2574 2575 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned) 2576 cast<ConstantSDNode>(N2)->getZExtValue()), 2577 MVT::i32); 2578 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag }; 2579 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, 2580 MVT::Glue, Ops, 5); 2581 Chain = SDValue(ResNode, 0); 2582 if (N->getNumValues() == 2) { 2583 InFlag = SDValue(ResNode, 1); 2584 ReplaceUses(SDValue(N, 1), InFlag); 2585 } 2586 ReplaceUses(SDValue(N, 0), 2587 SDValue(Chain.getNode(), Chain.getResNo())); 2588 return NULL; 2589 } 2590 case ARMISD::CMOV: 2591 return SelectCMOVOp(N); 2592 case ARMISD::VZIP: { 2593 unsigned Opc = 0; 2594 EVT VT = N->getValueType(0); 2595 switch (VT.getSimpleVT().SimpleTy) { 2596 default: return NULL; 2597 case MVT::v8i8: Opc = ARM::VZIPd8; break; 2598 case MVT::v4i16: Opc = ARM::VZIPd16; break; 2599 case MVT::v2f32: 2600 case MVT::v2i32: Opc = ARM::VZIPd32; break; 2601 case MVT::v16i8: Opc = ARM::VZIPq8; break; 2602 case MVT::v8i16: Opc = ARM::VZIPq16; break; 2603 case MVT::v4f32: 2604 case MVT::v4i32: Opc = ARM::VZIPq32; break; 2605 } 2606 SDValue Pred = getAL(CurDAG); 2607 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2608 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; 2609 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4); 2610 } 2611 case ARMISD::VUZP: { 2612 unsigned Opc = 0; 2613 EVT VT = N->getValueType(0); 2614 switch (VT.getSimpleVT().SimpleTy) { 2615 default: return NULL; 2616 case MVT::v8i8: Opc = ARM::VUZPd8; break; 2617 case MVT::v4i16: Opc = ARM::VUZPd16; break; 2618 case MVT::v2f32: 2619 case MVT::v2i32: Opc = ARM::VUZPd32; break; 2620 case MVT::v16i8: Opc = ARM::VUZPq8; break; 2621 case MVT::v8i16: Opc = ARM::VUZPq16; break; 2622 case MVT::v4f32: 2623 case MVT::v4i32: Opc = ARM::VUZPq32; break; 2624 } 2625 SDValue Pred = getAL(CurDAG); 2626 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2627 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; 2628 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4); 2629 } 2630 case ARMISD::VTRN: { 2631 unsigned Opc = 0; 2632 EVT VT = N->getValueType(0); 2633 switch (VT.getSimpleVT().SimpleTy) { 2634 default: return NULL; 2635 case MVT::v8i8: Opc = ARM::VTRNd8; break; 2636 case MVT::v4i16: Opc = ARM::VTRNd16; break; 2637 case MVT::v2f32: 2638 case MVT::v2i32: Opc = ARM::VTRNd32; break; 2639 case MVT::v16i8: Opc = ARM::VTRNq8; break; 2640 case MVT::v8i16: Opc = ARM::VTRNq16; break; 2641 case MVT::v4f32: 2642 case MVT::v4i32: Opc = ARM::VTRNq32; break; 2643 } 2644 SDValue Pred = getAL(CurDAG); 2645 SDValue PredReg = CurDAG->getRegister(0, MVT::i32); 2646 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg }; 2647 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4); 2648 } 2649 case ARMISD::BUILD_VECTOR: { 2650 EVT VecVT = N->getValueType(0); 2651 EVT EltVT = VecVT.getVectorElementType(); 2652 unsigned NumElts = VecVT.getVectorNumElements(); 2653 if (EltVT == MVT::f64) { 2654 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR"); 2655 return PairDRegs(VecVT, N->getOperand(0), N->getOperand(1)); 2656 } 2657 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR"); 2658 if (NumElts == 2) 2659 return PairSRegs(VecVT, N->getOperand(0), N->getOperand(1)); 2660 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR"); 2661 return QuadSRegs(VecVT, N->getOperand(0), N->getOperand(1), 2662 N->getOperand(2), N->getOperand(3)); 2663 } 2664 2665 case ARMISD::VLD2DUP: { 2666 unsigned Opcodes[] = { ARM::VLD2DUPd8Pseudo, ARM::VLD2DUPd16Pseudo, 2667 ARM::VLD2DUPd32Pseudo }; 2668 return SelectVLDDup(N, false, 2, Opcodes); 2669 } 2670 2671 case ARMISD::VLD3DUP: { 2672 unsigned Opcodes[] = { ARM::VLD3DUPd8Pseudo, ARM::VLD3DUPd16Pseudo, 2673 ARM::VLD3DUPd32Pseudo }; 2674 return SelectVLDDup(N, false, 3, Opcodes); 2675 } 2676 2677 case ARMISD::VLD4DUP: { 2678 unsigned Opcodes[] = { ARM::VLD4DUPd8Pseudo, ARM::VLD4DUPd16Pseudo, 2679 ARM::VLD4DUPd32Pseudo }; 2680 return SelectVLDDup(N, false, 4, Opcodes); 2681 } 2682 2683 case ARMISD::VLD2DUP_UPD: { 2684 unsigned Opcodes[] = { ARM::VLD2DUPd8Pseudo_UPD, ARM::VLD2DUPd16Pseudo_UPD, 2685 ARM::VLD2DUPd32Pseudo_UPD }; 2686 return SelectVLDDup(N, true, 2, Opcodes); 2687 } 2688 2689 case ARMISD::VLD3DUP_UPD: { 2690 unsigned Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD, ARM::VLD3DUPd16Pseudo_UPD, 2691 ARM::VLD3DUPd32Pseudo_UPD }; 2692 return SelectVLDDup(N, true, 3, Opcodes); 2693 } 2694 2695 case ARMISD::VLD4DUP_UPD: { 2696 unsigned Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD, ARM::VLD4DUPd16Pseudo_UPD, 2697 ARM::VLD4DUPd32Pseudo_UPD }; 2698 return SelectVLDDup(N, true, 4, Opcodes); 2699 } 2700 2701 case ARMISD::VLD1_UPD: { 2702 unsigned DOpcodes[] = { ARM::VLD1d8_UPD, ARM::VLD1d16_UPD, 2703 ARM::VLD1d32_UPD, ARM::VLD1d64_UPD }; 2704 unsigned QOpcodes[] = { ARM::VLD1q8Pseudo_UPD, ARM::VLD1q16Pseudo_UPD, 2705 ARM::VLD1q32Pseudo_UPD, ARM::VLD1q64Pseudo_UPD }; 2706 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0); 2707 } 2708 2709 case ARMISD::VLD2_UPD: { 2710 unsigned DOpcodes[] = { ARM::VLD2d8Pseudo_UPD, ARM::VLD2d16Pseudo_UPD, 2711 ARM::VLD2d32Pseudo_UPD, ARM::VLD1q64Pseudo_UPD }; 2712 unsigned QOpcodes[] = { ARM::VLD2q8Pseudo_UPD, ARM::VLD2q16Pseudo_UPD, 2713 ARM::VLD2q32Pseudo_UPD }; 2714 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0); 2715 } 2716 2717 case ARMISD::VLD3_UPD: { 2718 unsigned DOpcodes[] = { ARM::VLD3d8Pseudo_UPD, ARM::VLD3d16Pseudo_UPD, 2719 ARM::VLD3d32Pseudo_UPD, ARM::VLD1d64TPseudo_UPD }; 2720 unsigned QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD, 2721 ARM::VLD3q16Pseudo_UPD, 2722 ARM::VLD3q32Pseudo_UPD }; 2723 unsigned QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD, 2724 ARM::VLD3q16oddPseudo_UPD, 2725 ARM::VLD3q32oddPseudo_UPD }; 2726 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1); 2727 } 2728 2729 case ARMISD::VLD4_UPD: { 2730 unsigned DOpcodes[] = { ARM::VLD4d8Pseudo_UPD, ARM::VLD4d16Pseudo_UPD, 2731 ARM::VLD4d32Pseudo_UPD, ARM::VLD1d64QPseudo_UPD }; 2732 unsigned QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD, 2733 ARM::VLD4q16Pseudo_UPD, 2734 ARM::VLD4q32Pseudo_UPD }; 2735 unsigned QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD, 2736 ARM::VLD4q16oddPseudo_UPD, 2737 ARM::VLD4q32oddPseudo_UPD }; 2738 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1); 2739 } 2740 2741 case ARMISD::VLD2LN_UPD: { 2742 unsigned DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD, ARM::VLD2LNd16Pseudo_UPD, 2743 ARM::VLD2LNd32Pseudo_UPD }; 2744 unsigned QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD, 2745 ARM::VLD2LNq32Pseudo_UPD }; 2746 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes); 2747 } 2748 2749 case ARMISD::VLD3LN_UPD: { 2750 unsigned DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD, ARM::VLD3LNd16Pseudo_UPD, 2751 ARM::VLD3LNd32Pseudo_UPD }; 2752 unsigned QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD, 2753 ARM::VLD3LNq32Pseudo_UPD }; 2754 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes); 2755 } 2756 2757 case ARMISD::VLD4LN_UPD: { 2758 unsigned DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD, ARM::VLD4LNd16Pseudo_UPD, 2759 ARM::VLD4LNd32Pseudo_UPD }; 2760 unsigned QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD, 2761 ARM::VLD4LNq32Pseudo_UPD }; 2762 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes); 2763 } 2764 2765 case ARMISD::VST1_UPD: { 2766 unsigned DOpcodes[] = { ARM::VST1d8_UPD, ARM::VST1d16_UPD, 2767 ARM::VST1d32_UPD, ARM::VST1d64_UPD }; 2768 unsigned QOpcodes[] = { ARM::VST1q8Pseudo_UPD, ARM::VST1q16Pseudo_UPD, 2769 ARM::VST1q32Pseudo_UPD, ARM::VST1q64Pseudo_UPD }; 2770 return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0); 2771 } 2772 2773 case ARMISD::VST2_UPD: { 2774 unsigned DOpcodes[] = { ARM::VST2d8Pseudo_UPD, ARM::VST2d16Pseudo_UPD, 2775 ARM::VST2d32Pseudo_UPD, ARM::VST1q64Pseudo_UPD }; 2776 unsigned QOpcodes[] = { ARM::VST2q8Pseudo_UPD, ARM::VST2q16Pseudo_UPD, 2777 ARM::VST2q32Pseudo_UPD }; 2778 return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0); 2779 } 2780 2781 case ARMISD::VST3_UPD: { 2782 unsigned DOpcodes[] = { ARM::VST3d8Pseudo_UPD, ARM::VST3d16Pseudo_UPD, 2783 ARM::VST3d32Pseudo_UPD, ARM::VST1d64TPseudo_UPD }; 2784 unsigned QOpcodes0[] = { ARM::VST3q8Pseudo_UPD, 2785 ARM::VST3q16Pseudo_UPD, 2786 ARM::VST3q32Pseudo_UPD }; 2787 unsigned QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD, 2788 ARM::VST3q16oddPseudo_UPD, 2789 ARM::VST3q32oddPseudo_UPD }; 2790 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1); 2791 } 2792 2793 case ARMISD::VST4_UPD: { 2794 unsigned DOpcodes[] = { ARM::VST4d8Pseudo_UPD, ARM::VST4d16Pseudo_UPD, 2795 ARM::VST4d32Pseudo_UPD, ARM::VST1d64QPseudo_UPD }; 2796 unsigned QOpcodes0[] = { ARM::VST4q8Pseudo_UPD, 2797 ARM::VST4q16Pseudo_UPD, 2798 ARM::VST4q32Pseudo_UPD }; 2799 unsigned QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD, 2800 ARM::VST4q16oddPseudo_UPD, 2801 ARM::VST4q32oddPseudo_UPD }; 2802 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1); 2803 } 2804 2805 case ARMISD::VST2LN_UPD: { 2806 unsigned DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD, ARM::VST2LNd16Pseudo_UPD, 2807 ARM::VST2LNd32Pseudo_UPD }; 2808 unsigned QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD, 2809 ARM::VST2LNq32Pseudo_UPD }; 2810 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes); 2811 } 2812 2813 case ARMISD::VST3LN_UPD: { 2814 unsigned DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD, ARM::VST3LNd16Pseudo_UPD, 2815 ARM::VST3LNd32Pseudo_UPD }; 2816 unsigned QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD, 2817 ARM::VST3LNq32Pseudo_UPD }; 2818 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes); 2819 } 2820 2821 case ARMISD::VST4LN_UPD: { 2822 unsigned DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD, ARM::VST4LNd16Pseudo_UPD, 2823 ARM::VST4LNd32Pseudo_UPD }; 2824 unsigned QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD, 2825 ARM::VST4LNq32Pseudo_UPD }; 2826 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes); 2827 } 2828 2829 case ISD::INTRINSIC_VOID: 2830 case ISD::INTRINSIC_W_CHAIN: { 2831 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 2832 switch (IntNo) { 2833 default: 2834 break; 2835 2836 case Intrinsic::arm_ldrexd: { 2837 SDValue MemAddr = N->getOperand(2); 2838 DebugLoc dl = N->getDebugLoc(); 2839 SDValue Chain = N->getOperand(0); 2840 2841 unsigned NewOpc = ARM::LDREXD; 2842 if (Subtarget->isThumb() && Subtarget->hasThumb2()) 2843 NewOpc = ARM::t2LDREXD; 2844 2845 // arm_ldrexd returns a i64 value in {i32, i32} 2846 std::vector<EVT> ResTys; 2847 ResTys.push_back(MVT::i32); 2848 ResTys.push_back(MVT::i32); 2849 ResTys.push_back(MVT::Other); 2850 2851 // place arguments in the right order 2852 SmallVector<SDValue, 7> Ops; 2853 Ops.push_back(MemAddr); 2854 Ops.push_back(getAL(CurDAG)); 2855 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); 2856 Ops.push_back(Chain); 2857 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(), 2858 Ops.size()); 2859 // Transfer memoperands. 2860 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 2861 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 2862 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1); 2863 2864 // Until there's support for specifing explicit register constraints 2865 // like the use of even/odd register pair, hardcode ldrexd to always 2866 // use the pair [R0, R1] to hold the load result. 2867 Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ARM::R0, 2868 SDValue(Ld, 0), SDValue(0,0)); 2869 Chain = CurDAG->getCopyToReg(Chain, dl, ARM::R1, 2870 SDValue(Ld, 1), Chain.getValue(1)); 2871 2872 // Remap uses. 2873 SDValue Glue = Chain.getValue(1); 2874 if (!SDValue(N, 0).use_empty()) { 2875 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2876 ARM::R0, MVT::i32, Glue); 2877 Glue = Result.getValue(2); 2878 ReplaceUses(SDValue(N, 0), Result); 2879 } 2880 if (!SDValue(N, 1).use_empty()) { 2881 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2882 ARM::R1, MVT::i32, Glue); 2883 Glue = Result.getValue(2); 2884 ReplaceUses(SDValue(N, 1), Result); 2885 } 2886 2887 ReplaceUses(SDValue(N, 2), SDValue(Ld, 2)); 2888 return NULL; 2889 } 2890 2891 case Intrinsic::arm_strexd: { 2892 DebugLoc dl = N->getDebugLoc(); 2893 SDValue Chain = N->getOperand(0); 2894 SDValue Val0 = N->getOperand(2); 2895 SDValue Val1 = N->getOperand(3); 2896 SDValue MemAddr = N->getOperand(4); 2897 2898 // Until there's support for specifing explicit register constraints 2899 // like the use of even/odd register pair, hardcode strexd to always 2900 // use the pair [R2, R3] to hold the i64 (i32, i32) value to be stored. 2901 Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ARM::R2, Val0, 2902 SDValue(0, 0)); 2903 Chain = CurDAG->getCopyToReg(Chain, dl, ARM::R3, Val1, Chain.getValue(1)); 2904 2905 SDValue Glue = Chain.getValue(1); 2906 Val0 = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2907 ARM::R2, MVT::i32, Glue); 2908 Glue = Val0.getValue(1); 2909 Val1 = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2910 ARM::R3, MVT::i32, Glue); 2911 2912 // Store exclusive double return a i32 value which is the return status 2913 // of the issued store. 2914 std::vector<EVT> ResTys; 2915 ResTys.push_back(MVT::i32); 2916 ResTys.push_back(MVT::Other); 2917 2918 // place arguments in the right order 2919 SmallVector<SDValue, 7> Ops; 2920 Ops.push_back(Val0); 2921 Ops.push_back(Val1); 2922 Ops.push_back(MemAddr); 2923 Ops.push_back(getAL(CurDAG)); 2924 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); 2925 Ops.push_back(Chain); 2926 2927 unsigned NewOpc = ARM::STREXD; 2928 if (Subtarget->isThumb() && Subtarget->hasThumb2()) 2929 NewOpc = ARM::t2STREXD; 2930 2931 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(), 2932 Ops.size()); 2933 // Transfer memoperands. 2934 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 2935 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 2936 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1); 2937 2938 return St; 2939 } 2940 2941 case Intrinsic::arm_neon_vld1: { 2942 unsigned DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16, 2943 ARM::VLD1d32, ARM::VLD1d64 }; 2944 unsigned QOpcodes[] = { ARM::VLD1q8Pseudo, ARM::VLD1q16Pseudo, 2945 ARM::VLD1q32Pseudo, ARM::VLD1q64Pseudo }; 2946 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0); 2947 } 2948 2949 case Intrinsic::arm_neon_vld2: { 2950 unsigned DOpcodes[] = { ARM::VLD2d8Pseudo, ARM::VLD2d16Pseudo, 2951 ARM::VLD2d32Pseudo, ARM::VLD1q64Pseudo }; 2952 unsigned QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo, 2953 ARM::VLD2q32Pseudo }; 2954 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0); 2955 } 2956 2957 case Intrinsic::arm_neon_vld3: { 2958 unsigned DOpcodes[] = { ARM::VLD3d8Pseudo, ARM::VLD3d16Pseudo, 2959 ARM::VLD3d32Pseudo, ARM::VLD1d64TPseudo }; 2960 unsigned QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD, 2961 ARM::VLD3q16Pseudo_UPD, 2962 ARM::VLD3q32Pseudo_UPD }; 2963 unsigned QOpcodes1[] = { ARM::VLD3q8oddPseudo, 2964 ARM::VLD3q16oddPseudo, 2965 ARM::VLD3q32oddPseudo }; 2966 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1); 2967 } 2968 2969 case Intrinsic::arm_neon_vld4: { 2970 unsigned DOpcodes[] = { ARM::VLD4d8Pseudo, ARM::VLD4d16Pseudo, 2971 ARM::VLD4d32Pseudo, ARM::VLD1d64QPseudo }; 2972 unsigned QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD, 2973 ARM::VLD4q16Pseudo_UPD, 2974 ARM::VLD4q32Pseudo_UPD }; 2975 unsigned QOpcodes1[] = { ARM::VLD4q8oddPseudo, 2976 ARM::VLD4q16oddPseudo, 2977 ARM::VLD4q32oddPseudo }; 2978 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1); 2979 } 2980 2981 case Intrinsic::arm_neon_vld2lane: { 2982 unsigned DOpcodes[] = { ARM::VLD2LNd8Pseudo, ARM::VLD2LNd16Pseudo, 2983 ARM::VLD2LNd32Pseudo }; 2984 unsigned QOpcodes[] = { ARM::VLD2LNq16Pseudo, ARM::VLD2LNq32Pseudo }; 2985 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes); 2986 } 2987 2988 case Intrinsic::arm_neon_vld3lane: { 2989 unsigned DOpcodes[] = { ARM::VLD3LNd8Pseudo, ARM::VLD3LNd16Pseudo, 2990 ARM::VLD3LNd32Pseudo }; 2991 unsigned QOpcodes[] = { ARM::VLD3LNq16Pseudo, ARM::VLD3LNq32Pseudo }; 2992 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes); 2993 } 2994 2995 case Intrinsic::arm_neon_vld4lane: { 2996 unsigned DOpcodes[] = { ARM::VLD4LNd8Pseudo, ARM::VLD4LNd16Pseudo, 2997 ARM::VLD4LNd32Pseudo }; 2998 unsigned QOpcodes[] = { ARM::VLD4LNq16Pseudo, ARM::VLD4LNq32Pseudo }; 2999 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes); 3000 } 3001 3002 case Intrinsic::arm_neon_vst1: { 3003 unsigned DOpcodes[] = { ARM::VST1d8, ARM::VST1d16, 3004 ARM::VST1d32, ARM::VST1d64 }; 3005 unsigned QOpcodes[] = { ARM::VST1q8Pseudo, ARM::VST1q16Pseudo, 3006 ARM::VST1q32Pseudo, ARM::VST1q64Pseudo }; 3007 return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0); 3008 } 3009 3010 case Intrinsic::arm_neon_vst2: { 3011 unsigned DOpcodes[] = { ARM::VST2d8Pseudo, ARM::VST2d16Pseudo, 3012 ARM::VST2d32Pseudo, ARM::VST1q64Pseudo }; 3013 unsigned QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo, 3014 ARM::VST2q32Pseudo }; 3015 return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0); 3016 } 3017 3018 case Intrinsic::arm_neon_vst3: { 3019 unsigned DOpcodes[] = { ARM::VST3d8Pseudo, ARM::VST3d16Pseudo, 3020 ARM::VST3d32Pseudo, ARM::VST1d64TPseudo }; 3021 unsigned QOpcodes0[] = { ARM::VST3q8Pseudo_UPD, 3022 ARM::VST3q16Pseudo_UPD, 3023 ARM::VST3q32Pseudo_UPD }; 3024 unsigned QOpcodes1[] = { ARM::VST3q8oddPseudo, 3025 ARM::VST3q16oddPseudo, 3026 ARM::VST3q32oddPseudo }; 3027 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1); 3028 } 3029 3030 case Intrinsic::arm_neon_vst4: { 3031 unsigned DOpcodes[] = { ARM::VST4d8Pseudo, ARM::VST4d16Pseudo, 3032 ARM::VST4d32Pseudo, ARM::VST1d64QPseudo }; 3033 unsigned QOpcodes0[] = { ARM::VST4q8Pseudo_UPD, 3034 ARM::VST4q16Pseudo_UPD, 3035 ARM::VST4q32Pseudo_UPD }; 3036 unsigned QOpcodes1[] = { ARM::VST4q8oddPseudo, 3037 ARM::VST4q16oddPseudo, 3038 ARM::VST4q32oddPseudo }; 3039 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1); 3040 } 3041 3042 case Intrinsic::arm_neon_vst2lane: { 3043 unsigned DOpcodes[] = { ARM::VST2LNd8Pseudo, ARM::VST2LNd16Pseudo, 3044 ARM::VST2LNd32Pseudo }; 3045 unsigned QOpcodes[] = { ARM::VST2LNq16Pseudo, ARM::VST2LNq32Pseudo }; 3046 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes); 3047 } 3048 3049 case Intrinsic::arm_neon_vst3lane: { 3050 unsigned DOpcodes[] = { ARM::VST3LNd8Pseudo, ARM::VST3LNd16Pseudo, 3051 ARM::VST3LNd32Pseudo }; 3052 unsigned QOpcodes[] = { ARM::VST3LNq16Pseudo, ARM::VST3LNq32Pseudo }; 3053 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes); 3054 } 3055 3056 case Intrinsic::arm_neon_vst4lane: { 3057 unsigned DOpcodes[] = { ARM::VST4LNd8Pseudo, ARM::VST4LNd16Pseudo, 3058 ARM::VST4LNd32Pseudo }; 3059 unsigned QOpcodes[] = { ARM::VST4LNq16Pseudo, ARM::VST4LNq32Pseudo }; 3060 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes); 3061 } 3062 } 3063 break; 3064 } 3065 3066 case ISD::INTRINSIC_WO_CHAIN: { 3067 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3068 switch (IntNo) { 3069 default: 3070 break; 3071 3072 case Intrinsic::arm_neon_vtbl2: 3073 return SelectVTBL(N, false, 2, ARM::VTBL2Pseudo); 3074 case Intrinsic::arm_neon_vtbl3: 3075 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo); 3076 case Intrinsic::arm_neon_vtbl4: 3077 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo); 3078 3079 case Intrinsic::arm_neon_vtbx2: 3080 return SelectVTBL(N, true, 2, ARM::VTBX2Pseudo); 3081 case Intrinsic::arm_neon_vtbx3: 3082 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo); 3083 case Intrinsic::arm_neon_vtbx4: 3084 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo); 3085 } 3086 break; 3087 } 3088 3089 case ARMISD::VTBL1: { 3090 DebugLoc dl = N->getDebugLoc(); 3091 EVT VT = N->getValueType(0); 3092 SmallVector<SDValue, 6> Ops; 3093 3094 Ops.push_back(N->getOperand(0)); 3095 Ops.push_back(N->getOperand(1)); 3096 Ops.push_back(getAL(CurDAG)); // Predicate 3097 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register 3098 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops.data(), Ops.size()); 3099 } 3100 case ARMISD::VTBL2: { 3101 DebugLoc dl = N->getDebugLoc(); 3102 EVT VT = N->getValueType(0); 3103 3104 // Form a REG_SEQUENCE to force register allocation. 3105 SDValue V0 = N->getOperand(0); 3106 SDValue V1 = N->getOperand(1); 3107 SDValue RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0); 3108 3109 SmallVector<SDValue, 6> Ops; 3110 Ops.push_back(RegSeq); 3111 Ops.push_back(N->getOperand(2)); 3112 Ops.push_back(getAL(CurDAG)); // Predicate 3113 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register 3114 return CurDAG->getMachineNode(ARM::VTBL2Pseudo, dl, VT, 3115 Ops.data(), Ops.size()); 3116 } 3117 3118 case ISD::CONCAT_VECTORS: 3119 return SelectConcatVector(N); 3120 3121 case ARMISD::ATOMOR64_DAG: 3122 return SelectAtomic64(N, ARM::ATOMOR6432); 3123 case ARMISD::ATOMXOR64_DAG: 3124 return SelectAtomic64(N, ARM::ATOMXOR6432); 3125 case ARMISD::ATOMADD64_DAG: 3126 return SelectAtomic64(N, ARM::ATOMADD6432); 3127 case ARMISD::ATOMSUB64_DAG: 3128 return SelectAtomic64(N, ARM::ATOMSUB6432); 3129 case ARMISD::ATOMNAND64_DAG: 3130 return SelectAtomic64(N, ARM::ATOMNAND6432); 3131 case ARMISD::ATOMAND64_DAG: 3132 return SelectAtomic64(N, ARM::ATOMAND6432); 3133 case ARMISD::ATOMSWAP64_DAG: 3134 return SelectAtomic64(N, ARM::ATOMSWAP6432); 3135 case ARMISD::ATOMCMPXCHG64_DAG: 3136 return SelectAtomic64(N, ARM::ATOMCMPXCHG6432); 3137 } 3138 3139 return SelectCode(N); 3140} 3141 3142bool ARMDAGToDAGISel:: 3143SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 3144 std::vector<SDValue> &OutOps) { 3145 assert(ConstraintCode == 'm' && "unexpected asm memory constraint"); 3146 // Require the address to be in a register. That is safe for all ARM 3147 // variants and it is hard to do anything much smarter without knowing 3148 // how the operand is used. 3149 OutOps.push_back(Op); 3150 return false; 3151} 3152 3153/// createARMISelDag - This pass converts a legalized DAG into a 3154/// ARM-specific DAG, ready for instruction scheduling. 3155/// 3156FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM, 3157 CodeGenOpt::Level OptLevel) { 3158 return new ARMDAGToDAGISel(TM, OptLevel); 3159} 3160