TargetLowering.cpp revision eb57ea7ea2378b77bc995371c1888193b960cd03
1//===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the TargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Target/TargetLowering.h" 15#include "llvm/Target/TargetData.h" 16#include "llvm/Target/TargetMachine.h" 17#include "llvm/Target/MRegisterInfo.h" 18#include "llvm/DerivedTypes.h" 19#include "llvm/CodeGen/SelectionDAG.h" 20#include "llvm/ADT/StringExtras.h" 21#include "llvm/ADT/STLExtras.h" 22#include "llvm/Support/MathExtras.h" 23#include "llvm/Target/TargetAsmInfo.h" 24using namespace llvm; 25 26/// InitLibcallNames - Set default libcall names. 27/// 28static void InitLibcallNames(const char **Names) { 29 Names[RTLIB::SHL_I32] = "__ashlsi3"; 30 Names[RTLIB::SHL_I64] = "__ashldi3"; 31 Names[RTLIB::SRL_I32] = "__lshrsi3"; 32 Names[RTLIB::SRL_I64] = "__lshrdi3"; 33 Names[RTLIB::SRA_I32] = "__ashrsi3"; 34 Names[RTLIB::SRA_I64] = "__ashrdi3"; 35 Names[RTLIB::MUL_I32] = "__mulsi3"; 36 Names[RTLIB::MUL_I64] = "__muldi3"; 37 Names[RTLIB::SDIV_I32] = "__divsi3"; 38 Names[RTLIB::SDIV_I64] = "__divdi3"; 39 Names[RTLIB::UDIV_I32] = "__udivsi3"; 40 Names[RTLIB::UDIV_I64] = "__udivdi3"; 41 Names[RTLIB::SREM_I32] = "__modsi3"; 42 Names[RTLIB::SREM_I64] = "__moddi3"; 43 Names[RTLIB::UREM_I32] = "__umodsi3"; 44 Names[RTLIB::UREM_I64] = "__umoddi3"; 45 Names[RTLIB::NEG_I32] = "__negsi2"; 46 Names[RTLIB::NEG_I64] = "__negdi2"; 47 Names[RTLIB::ADD_F32] = "__addsf3"; 48 Names[RTLIB::ADD_F64] = "__adddf3"; 49 Names[RTLIB::ADD_PPCF128] = "__gcc_qadd"; 50 Names[RTLIB::SUB_F32] = "__subsf3"; 51 Names[RTLIB::SUB_F64] = "__subdf3"; 52 Names[RTLIB::SUB_PPCF128] = "__gcc_qsub"; 53 Names[RTLIB::MUL_F32] = "__mulsf3"; 54 Names[RTLIB::MUL_F64] = "__muldf3"; 55 Names[RTLIB::MUL_PPCF128] = "__gcc_qmul"; 56 Names[RTLIB::DIV_F32] = "__divsf3"; 57 Names[RTLIB::DIV_F64] = "__divdf3"; 58 Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv"; 59 Names[RTLIB::REM_F32] = "fmodf"; 60 Names[RTLIB::REM_F64] = "fmod"; 61 Names[RTLIB::REM_PPCF128] = "fmodl"; 62 Names[RTLIB::NEG_F32] = "__negsf2"; 63 Names[RTLIB::NEG_F64] = "__negdf2"; 64 Names[RTLIB::POWI_F32] = "__powisf2"; 65 Names[RTLIB::POWI_F64] = "__powidf2"; 66 Names[RTLIB::POWI_F80] = "__powixf2"; 67 Names[RTLIB::POWI_PPCF128] = "__powitf2"; 68 Names[RTLIB::SQRT_F32] = "sqrtf"; 69 Names[RTLIB::SQRT_F64] = "sqrt"; 70 Names[RTLIB::SQRT_F80] = "sqrtl"; 71 Names[RTLIB::SQRT_PPCF128] = "sqrtl"; 72 Names[RTLIB::SIN_F32] = "sinf"; 73 Names[RTLIB::SIN_F64] = "sin"; 74 Names[RTLIB::COS_F32] = "cosf"; 75 Names[RTLIB::COS_F64] = "cos"; 76 Names[RTLIB::POW_F32] = "powf"; 77 Names[RTLIB::POW_F64] = "pow"; 78 Names[RTLIB::POW_F80] = "powl"; 79 Names[RTLIB::POW_PPCF128] = "powl"; 80 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2"; 81 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2"; 82 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi"; 83 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi"; 84 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi"; 85 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi"; 86 Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi"; 87 Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi"; 88 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi"; 89 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi"; 90 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi"; 91 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi"; 92 Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi"; 93 Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi"; 94 Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi"; 95 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf"; 96 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf"; 97 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf"; 98 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf"; 99 Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf"; 100 Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf"; 101 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf"; 102 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf"; 103 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf"; 104 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf"; 105 Names[RTLIB::OEQ_F32] = "__eqsf2"; 106 Names[RTLIB::OEQ_F64] = "__eqdf2"; 107 Names[RTLIB::UNE_F32] = "__nesf2"; 108 Names[RTLIB::UNE_F64] = "__nedf2"; 109 Names[RTLIB::OGE_F32] = "__gesf2"; 110 Names[RTLIB::OGE_F64] = "__gedf2"; 111 Names[RTLIB::OLT_F32] = "__ltsf2"; 112 Names[RTLIB::OLT_F64] = "__ltdf2"; 113 Names[RTLIB::OLE_F32] = "__lesf2"; 114 Names[RTLIB::OLE_F64] = "__ledf2"; 115 Names[RTLIB::OGT_F32] = "__gtsf2"; 116 Names[RTLIB::OGT_F64] = "__gtdf2"; 117 Names[RTLIB::UO_F32] = "__unordsf2"; 118 Names[RTLIB::UO_F64] = "__unorddf2"; 119 Names[RTLIB::O_F32] = "__unordsf2"; 120 Names[RTLIB::O_F64] = "__unorddf2"; 121} 122 123/// InitCmpLibcallCCs - Set default comparison libcall CC. 124/// 125static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 126 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL); 127 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 128 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 129 CCs[RTLIB::UNE_F32] = ISD::SETNE; 130 CCs[RTLIB::UNE_F64] = ISD::SETNE; 131 CCs[RTLIB::OGE_F32] = ISD::SETGE; 132 CCs[RTLIB::OGE_F64] = ISD::SETGE; 133 CCs[RTLIB::OLT_F32] = ISD::SETLT; 134 CCs[RTLIB::OLT_F64] = ISD::SETLT; 135 CCs[RTLIB::OLE_F32] = ISD::SETLE; 136 CCs[RTLIB::OLE_F64] = ISD::SETLE; 137 CCs[RTLIB::OGT_F32] = ISD::SETGT; 138 CCs[RTLIB::OGT_F64] = ISD::SETGT; 139 CCs[RTLIB::UO_F32] = ISD::SETNE; 140 CCs[RTLIB::UO_F64] = ISD::SETNE; 141 CCs[RTLIB::O_F32] = ISD::SETEQ; 142 CCs[RTLIB::O_F64] = ISD::SETEQ; 143} 144 145TargetLowering::TargetLowering(TargetMachine &tm) 146 : TM(tm), TD(TM.getTargetData()) { 147 assert(ISD::BUILTIN_OP_END <= 156 && 148 "Fixed size array in TargetLowering is not large enough!"); 149 // All operations default to being supported. 150 memset(OpActions, 0, sizeof(OpActions)); 151 memset(LoadXActions, 0, sizeof(LoadXActions)); 152 memset(&StoreXActions, 0, sizeof(StoreXActions)); 153 memset(&IndexedModeActions, 0, sizeof(IndexedModeActions)); 154 memset(&ConvertActions, 0, sizeof(ConvertActions)); 155 156 // Set all indexed load / store to expand. 157 for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { 158 for (unsigned IM = (unsigned)ISD::PRE_INC; 159 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 160 setIndexedLoadAction(IM, (MVT::ValueType)VT, Expand); 161 setIndexedStoreAction(IM, (MVT::ValueType)VT, Expand); 162 } 163 } 164 165 IsLittleEndian = TD->isLittleEndian(); 166 UsesGlobalOffsetTable = false; 167 ShiftAmountTy = SetCCResultTy = PointerTy = getValueType(TD->getIntPtrType()); 168 ShiftAmtHandling = Undefined; 169 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); 170 memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); 171 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; 172 allowUnalignedMemoryAccesses = false; 173 UseUnderscoreSetJmp = false; 174 UseUnderscoreLongJmp = false; 175 SelectIsExpensive = false; 176 IntDivIsCheap = false; 177 Pow2DivIsCheap = false; 178 StackPointerRegisterToSaveRestore = 0; 179 ExceptionPointerRegister = 0; 180 ExceptionSelectorRegister = 0; 181 SetCCResultContents = UndefinedSetCCResult; 182 SchedPreferenceInfo = SchedulingForLatency; 183 JumpBufSize = 0; 184 JumpBufAlignment = 0; 185 IfCvtBlockSizeLimit = 2; 186 187 InitLibcallNames(LibcallRoutineNames); 188 InitCmpLibcallCCs(CmpLibcallCCs); 189 190 // Tell Legalize whether the assembler supports DEBUG_LOC. 191 if (!TM.getTargetAsmInfo()->hasDotLocAndDotFile()) 192 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 193} 194 195TargetLowering::~TargetLowering() {} 196 197/// computeRegisterProperties - Once all of the register classes are added, 198/// this allows us to compute derived properties we expose. 199void TargetLowering::computeRegisterProperties() { 200 assert(MVT::LAST_VALUETYPE <= 32 && 201 "Too many value types for ValueTypeActions to hold!"); 202 203 // Everything defaults to needing one register. 204 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 205 NumRegistersForVT[i] = 1; 206 RegisterTypeForVT[i] = TransformToType[i] = i; 207 } 208 // ...except isVoid, which doesn't need any registers. 209 NumRegistersForVT[MVT::isVoid] = 0; 210 211 // Find the largest integer register class. 212 unsigned LargestIntReg = MVT::i128; 213 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg) 214 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 215 216 // Every integer value type larger than this largest register takes twice as 217 // many registers to represent as the previous ValueType. 218 for (MVT::ValueType ExpandedReg = LargestIntReg + 1; 219 MVT::isInteger(ExpandedReg); ++ExpandedReg) { 220 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 221 RegisterTypeForVT[ExpandedReg] = LargestIntReg; 222 TransformToType[ExpandedReg] = ExpandedReg - 1; 223 ValueTypeActions.setTypeAction(ExpandedReg, Expand); 224 } 225 226 // Inspect all of the ValueType's smaller than the largest integer 227 // register to see which ones need promotion. 228 MVT::ValueType LegalIntReg = LargestIntReg; 229 for (MVT::ValueType IntReg = LargestIntReg - 1; 230 IntReg >= MVT::i1; --IntReg) { 231 if (isTypeLegal(IntReg)) { 232 LegalIntReg = IntReg; 233 } else { 234 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = LegalIntReg; 235 ValueTypeActions.setTypeAction(IntReg, Promote); 236 } 237 } 238 239 // ppcf128 type is really two f64's. 240 if (!isTypeLegal(MVT::ppcf128)) { 241 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 242 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 243 TransformToType[MVT::ppcf128] = MVT::f64; 244 ValueTypeActions.setTypeAction(MVT::ppcf128, Expand); 245 } 246 247 // Decide how to handle f64. If the target does not have native f64 support, 248 // expand it to i64 and we will be generating soft float library calls. 249 if (!isTypeLegal(MVT::f64)) { 250 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 251 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 252 TransformToType[MVT::f64] = MVT::i64; 253 ValueTypeActions.setTypeAction(MVT::f64, Expand); 254 } 255 256 // Decide how to handle f32. If the target does not have native support for 257 // f32, promote it to f64 if it is legal. Otherwise, expand it to i32. 258 if (!isTypeLegal(MVT::f32)) { 259 if (isTypeLegal(MVT::f64)) { 260 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64]; 261 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64]; 262 TransformToType[MVT::f32] = MVT::f64; 263 ValueTypeActions.setTypeAction(MVT::f32, Promote); 264 } else { 265 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 266 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 267 TransformToType[MVT::f32] = MVT::i32; 268 ValueTypeActions.setTypeAction(MVT::f32, Expand); 269 } 270 } 271 272 // Loop over all of the vector value types to see which need transformations. 273 for (MVT::ValueType i = MVT::FIRST_VECTOR_VALUETYPE; 274 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) { 275 if (!isTypeLegal(i)) { 276 MVT::ValueType IntermediateVT, RegisterVT; 277 unsigned NumIntermediates; 278 NumRegistersForVT[i] = 279 getVectorTypeBreakdown(i, 280 IntermediateVT, NumIntermediates, 281 RegisterVT); 282 RegisterTypeForVT[i] = RegisterVT; 283 TransformToType[i] = MVT::Other; // this isn't actually used 284 ValueTypeActions.setTypeAction(i, Expand); 285 } 286 } 287} 288 289const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 290 return NULL; 291} 292 293/// getVectorTypeBreakdown - Vector types are broken down into some number of 294/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 295/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 296/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 297/// 298/// This method returns the number of registers needed, and the VT for each 299/// register. It also returns the VT and quantity of the intermediate values 300/// before they are promoted/expanded. 301/// 302unsigned TargetLowering::getVectorTypeBreakdown(MVT::ValueType VT, 303 MVT::ValueType &IntermediateVT, 304 unsigned &NumIntermediates, 305 MVT::ValueType &RegisterVT) const { 306 // Figure out the right, legal destination reg to copy into. 307 unsigned NumElts = MVT::getVectorNumElements(VT); 308 MVT::ValueType EltTy = MVT::getVectorElementType(VT); 309 310 unsigned NumVectorRegs = 1; 311 312 // Divide the input until we get to a supported size. This will always 313 // end with a scalar if the target doesn't support vectors. 314 while (NumElts > 1 && 315 !isTypeLegal(MVT::getVectorType(EltTy, NumElts))) { 316 NumElts >>= 1; 317 NumVectorRegs <<= 1; 318 } 319 320 NumIntermediates = NumVectorRegs; 321 322 MVT::ValueType NewVT = MVT::getVectorType(EltTy, NumElts); 323 if (!isTypeLegal(NewVT)) 324 NewVT = EltTy; 325 IntermediateVT = NewVT; 326 327 MVT::ValueType DestVT = getTypeToTransformTo(NewVT); 328 RegisterVT = DestVT; 329 if (DestVT < NewVT) { 330 // Value is expanded, e.g. i64 -> i16. 331 return NumVectorRegs*(MVT::getSizeInBits(NewVT)/MVT::getSizeInBits(DestVT)); 332 } else { 333 // Otherwise, promotion or legal types use the same number of registers as 334 // the vector decimated to the appropriate level. 335 return NumVectorRegs; 336 } 337 338 return 1; 339} 340 341//===----------------------------------------------------------------------===// 342// Optimization Methods 343//===----------------------------------------------------------------------===// 344 345/// ShrinkDemandedConstant - Check to see if the specified operand of the 346/// specified instruction is a constant integer. If so, check to see if there 347/// are any bits set in the constant that are not demanded. If so, shrink the 348/// constant and return true. 349bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op, 350 uint64_t Demanded) { 351 // FIXME: ISD::SELECT, ISD::SELECT_CC 352 switch(Op.getOpcode()) { 353 default: break; 354 case ISD::AND: 355 case ISD::OR: 356 case ISD::XOR: 357 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 358 if ((~Demanded & C->getValue()) != 0) { 359 MVT::ValueType VT = Op.getValueType(); 360 SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), 361 DAG.getConstant(Demanded & C->getValue(), 362 VT)); 363 return CombineTo(Op, New); 364 } 365 break; 366 } 367 return false; 368} 369 370/// SimplifyDemandedBits - Look at Op. At this point, we know that only the 371/// DemandedMask bits of the result of Op are ever used downstream. If we can 372/// use this information to simplify Op, create a new simplified DAG node and 373/// return true, returning the original and new nodes in Old and New. Otherwise, 374/// analyze the expression and return a mask of KnownOne and KnownZero bits for 375/// the expression (used to simplify the caller). The KnownZero/One bits may 376/// only be accurate for those bits in the DemandedMask. 377bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask, 378 uint64_t &KnownZero, 379 uint64_t &KnownOne, 380 TargetLoweringOpt &TLO, 381 unsigned Depth) const { 382 KnownZero = KnownOne = 0; // Don't know anything. 383 384 // The masks are not wide enough to represent this type! Should use APInt. 385 if (Op.getValueType() == MVT::i128) 386 return false; 387 388 // Other users may use these bits. 389 if (!Op.Val->hasOneUse()) { 390 if (Depth != 0) { 391 // If not at the root, Just compute the KnownZero/KnownOne bits to 392 // simplify things downstream. 393 TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 394 return false; 395 } 396 // If this is the root being simplified, allow it to have multiple uses, 397 // just set the DemandedMask to all bits. 398 DemandedMask = MVT::getIntVTBitMask(Op.getValueType()); 399 } else if (DemandedMask == 0) { 400 // Not demanding any bits from Op. 401 if (Op.getOpcode() != ISD::UNDEF) 402 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::UNDEF, Op.getValueType())); 403 return false; 404 } else if (Depth == 6) { // Limit search depth. 405 return false; 406 } 407 408 uint64_t KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut; 409 switch (Op.getOpcode()) { 410 case ISD::Constant: 411 // We know all of the bits for a constant! 412 KnownOne = cast<ConstantSDNode>(Op)->getValue() & DemandedMask; 413 KnownZero = ~KnownOne & DemandedMask; 414 return false; // Don't fall through, will infinitely loop. 415 case ISD::AND: 416 // If the RHS is a constant, check to see if the LHS would be zero without 417 // using the bits from the RHS. Below, we use knowledge about the RHS to 418 // simplify the LHS, here we're using information from the LHS to simplify 419 // the RHS. 420 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 421 uint64_t LHSZero, LHSOne; 422 TLO.DAG.ComputeMaskedBits(Op.getOperand(0), DemandedMask, 423 LHSZero, LHSOne, Depth+1); 424 // If the LHS already has zeros where RHSC does, this and is dead. 425 if ((LHSZero & DemandedMask) == (~RHSC->getValue() & DemandedMask)) 426 return TLO.CombineTo(Op, Op.getOperand(0)); 427 // If any of the set bits in the RHS are known zero on the LHS, shrink 428 // the constant. 429 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & DemandedMask)) 430 return true; 431 } 432 433 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero, 434 KnownOne, TLO, Depth+1)) 435 return true; 436 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 437 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownZero, 438 KnownZero2, KnownOne2, TLO, Depth+1)) 439 return true; 440 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 441 442 // If all of the demanded bits are known one on one side, return the other. 443 // These bits cannot contribute to the result of the 'and'. 444 if ((DemandedMask & ~KnownZero2 & KnownOne)==(DemandedMask & ~KnownZero2)) 445 return TLO.CombineTo(Op, Op.getOperand(0)); 446 if ((DemandedMask & ~KnownZero & KnownOne2)==(DemandedMask & ~KnownZero)) 447 return TLO.CombineTo(Op, Op.getOperand(1)); 448 // If all of the demanded bits in the inputs are known zeros, return zero. 449 if ((DemandedMask & (KnownZero|KnownZero2)) == DemandedMask) 450 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType())); 451 // If the RHS is a constant, see if we can simplify it. 452 if (TLO.ShrinkDemandedConstant(Op, DemandedMask & ~KnownZero2)) 453 return true; 454 455 // Output known-1 bits are only known if set in both the LHS & RHS. 456 KnownOne &= KnownOne2; 457 // Output known-0 are known to be clear if zero in either the LHS | RHS. 458 KnownZero |= KnownZero2; 459 break; 460 case ISD::OR: 461 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero, 462 KnownOne, TLO, Depth+1)) 463 return true; 464 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 465 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownOne, 466 KnownZero2, KnownOne2, TLO, Depth+1)) 467 return true; 468 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 469 470 // If all of the demanded bits are known zero on one side, return the other. 471 // These bits cannot contribute to the result of the 'or'. 472 if ((DemandedMask & ~KnownOne2 & KnownZero) == (DemandedMask & ~KnownOne2)) 473 return TLO.CombineTo(Op, Op.getOperand(0)); 474 if ((DemandedMask & ~KnownOne & KnownZero2) == (DemandedMask & ~KnownOne)) 475 return TLO.CombineTo(Op, Op.getOperand(1)); 476 // If all of the potentially set bits on one side are known to be set on 477 // the other side, just use the 'other' side. 478 if ((DemandedMask & (~KnownZero) & KnownOne2) == 479 (DemandedMask & (~KnownZero))) 480 return TLO.CombineTo(Op, Op.getOperand(0)); 481 if ((DemandedMask & (~KnownZero2) & KnownOne) == 482 (DemandedMask & (~KnownZero2))) 483 return TLO.CombineTo(Op, Op.getOperand(1)); 484 // If the RHS is a constant, see if we can simplify it. 485 if (TLO.ShrinkDemandedConstant(Op, DemandedMask)) 486 return true; 487 488 // Output known-0 bits are only known if clear in both the LHS & RHS. 489 KnownZero &= KnownZero2; 490 // Output known-1 are known to be set if set in either the LHS | RHS. 491 KnownOne |= KnownOne2; 492 break; 493 case ISD::XOR: 494 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero, 495 KnownOne, TLO, Depth+1)) 496 return true; 497 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 498 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, KnownZero2, 499 KnownOne2, TLO, Depth+1)) 500 return true; 501 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 502 503 // If all of the demanded bits are known zero on one side, return the other. 504 // These bits cannot contribute to the result of the 'xor'. 505 if ((DemandedMask & KnownZero) == DemandedMask) 506 return TLO.CombineTo(Op, Op.getOperand(0)); 507 if ((DemandedMask & KnownZero2) == DemandedMask) 508 return TLO.CombineTo(Op, Op.getOperand(1)); 509 510 // If all of the unknown bits are known to be zero on one side or the other 511 // (but not both) turn this into an *inclusive* or. 512 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 513 if ((DemandedMask & ~KnownZero & ~KnownZero2) == 0) 514 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(), 515 Op.getOperand(0), 516 Op.getOperand(1))); 517 518 // Output known-0 bits are known if clear or set in both the LHS & RHS. 519 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 520 // Output known-1 are known to be set if set in only one of the LHS, RHS. 521 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 522 523 // If all of the demanded bits on one side are known, and all of the set 524 // bits on that side are also known to be set on the other side, turn this 525 // into an AND, as we know the bits will be cleared. 526 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 527 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) { // all known 528 if ((KnownOne & KnownOne2) == KnownOne) { 529 MVT::ValueType VT = Op.getValueType(); 530 SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & DemandedMask, VT); 531 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0), 532 ANDC)); 533 } 534 } 535 536 // If the RHS is a constant, see if we can simplify it. 537 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1. 538 if (TLO.ShrinkDemandedConstant(Op, DemandedMask)) 539 return true; 540 541 KnownZero = KnownZeroOut; 542 KnownOne = KnownOneOut; 543 break; 544 case ISD::SETCC: 545 // If we know the result of a setcc has the top bits zero, use this info. 546 if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult) 547 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 548 break; 549 case ISD::SELECT: 550 if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero, 551 KnownOne, TLO, Depth+1)) 552 return true; 553 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero2, 554 KnownOne2, TLO, Depth+1)) 555 return true; 556 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 557 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 558 559 // If the operands are constants, see if we can simplify them. 560 if (TLO.ShrinkDemandedConstant(Op, DemandedMask)) 561 return true; 562 563 // Only known if known in both the LHS and RHS. 564 KnownOne &= KnownOne2; 565 KnownZero &= KnownZero2; 566 break; 567 case ISD::SELECT_CC: 568 if (SimplifyDemandedBits(Op.getOperand(3), DemandedMask, KnownZero, 569 KnownOne, TLO, Depth+1)) 570 return true; 571 if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero2, 572 KnownOne2, TLO, Depth+1)) 573 return true; 574 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 575 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 576 577 // If the operands are constants, see if we can simplify them. 578 if (TLO.ShrinkDemandedConstant(Op, DemandedMask)) 579 return true; 580 581 // Only known if known in both the LHS and RHS. 582 KnownOne &= KnownOne2; 583 KnownZero &= KnownZero2; 584 break; 585 case ISD::SHL: 586 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 587 unsigned ShAmt = SA->getValue(); 588 SDOperand InOp = Op.getOperand(0); 589 590 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 591 // single shift. We can do this if the bottom bits (which are shifted 592 // out) are never demanded. 593 if (InOp.getOpcode() == ISD::SRL && 594 isa<ConstantSDNode>(InOp.getOperand(1))) { 595 if (ShAmt && (DemandedMask & ((1ULL << ShAmt)-1)) == 0) { 596 unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue(); 597 unsigned Opc = ISD::SHL; 598 int Diff = ShAmt-C1; 599 if (Diff < 0) { 600 Diff = -Diff; 601 Opc = ISD::SRL; 602 } 603 604 SDOperand NewSA = 605 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 606 MVT::ValueType VT = Op.getValueType(); 607 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, 608 InOp.getOperand(0), NewSA)); 609 } 610 } 611 612 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask >> ShAmt, 613 KnownZero, KnownOne, TLO, Depth+1)) 614 return true; 615 KnownZero <<= SA->getValue(); 616 KnownOne <<= SA->getValue(); 617 KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero. 618 } 619 break; 620 case ISD::SRL: 621 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 622 MVT::ValueType VT = Op.getValueType(); 623 unsigned ShAmt = SA->getValue(); 624 uint64_t TypeMask = MVT::getIntVTBitMask(VT); 625 unsigned VTSize = MVT::getSizeInBits(VT); 626 SDOperand InOp = Op.getOperand(0); 627 628 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 629 // single shift. We can do this if the top bits (which are shifted out) 630 // are never demanded. 631 if (InOp.getOpcode() == ISD::SHL && 632 isa<ConstantSDNode>(InOp.getOperand(1))) { 633 if (ShAmt && (DemandedMask & (~0ULL << (VTSize-ShAmt))) == 0) { 634 unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue(); 635 unsigned Opc = ISD::SRL; 636 int Diff = ShAmt-C1; 637 if (Diff < 0) { 638 Diff = -Diff; 639 Opc = ISD::SHL; 640 } 641 642 SDOperand NewSA = 643 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 644 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, 645 InOp.getOperand(0), NewSA)); 646 } 647 } 648 649 // Compute the new bits that are at the top now. 650 if (SimplifyDemandedBits(InOp, (DemandedMask << ShAmt) & TypeMask, 651 KnownZero, KnownOne, TLO, Depth+1)) 652 return true; 653 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 654 KnownZero &= TypeMask; 655 KnownOne &= TypeMask; 656 KnownZero >>= ShAmt; 657 KnownOne >>= ShAmt; 658 659 uint64_t HighBits = (1ULL << ShAmt)-1; 660 HighBits <<= VTSize - ShAmt; 661 KnownZero |= HighBits; // High bits known zero. 662 } 663 break; 664 case ISD::SRA: 665 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 666 MVT::ValueType VT = Op.getValueType(); 667 unsigned ShAmt = SA->getValue(); 668 669 // Compute the new bits that are at the top now. 670 uint64_t TypeMask = MVT::getIntVTBitMask(VT); 671 672 uint64_t InDemandedMask = (DemandedMask << ShAmt) & TypeMask; 673 674 // If any of the demanded bits are produced by the sign extension, we also 675 // demand the input sign bit. 676 uint64_t HighBits = (1ULL << ShAmt)-1; 677 HighBits <<= MVT::getSizeInBits(VT) - ShAmt; 678 if (HighBits & DemandedMask) 679 InDemandedMask |= MVT::getIntVTSignBit(VT); 680 681 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask, 682 KnownZero, KnownOne, TLO, Depth+1)) 683 return true; 684 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 685 KnownZero &= TypeMask; 686 KnownOne &= TypeMask; 687 KnownZero >>= ShAmt; 688 KnownOne >>= ShAmt; 689 690 // Handle the sign bits. 691 uint64_t SignBit = MVT::getIntVTSignBit(VT); 692 SignBit >>= ShAmt; // Adjust to where it is now in the mask. 693 694 // If the input sign bit is known to be zero, or if none of the top bits 695 // are demanded, turn this into an unsigned shift right. 696 if ((KnownZero & SignBit) || (HighBits & ~DemandedMask) == HighBits) { 697 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, VT, Op.getOperand(0), 698 Op.getOperand(1))); 699 } else if (KnownOne & SignBit) { // New bits are known one. 700 KnownOne |= HighBits; 701 } 702 } 703 break; 704 case ISD::SIGN_EXTEND_INREG: { 705 MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 706 707 // Sign extension. Compute the demanded bits in the result that are not 708 // present in the input. 709 uint64_t NewBits = ~MVT::getIntVTBitMask(EVT) & DemandedMask; 710 711 // If none of the extended bits are demanded, eliminate the sextinreg. 712 if (NewBits == 0) 713 return TLO.CombineTo(Op, Op.getOperand(0)); 714 715 uint64_t InSignBit = MVT::getIntVTSignBit(EVT); 716 int64_t InputDemandedBits = DemandedMask & MVT::getIntVTBitMask(EVT); 717 718 // Since the sign extended bits are demanded, we know that the sign 719 // bit is demanded. 720 InputDemandedBits |= InSignBit; 721 722 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits, 723 KnownZero, KnownOne, TLO, Depth+1)) 724 return true; 725 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 726 727 // If the sign bit of the input is known set or clear, then we know the 728 // top bits of the result. 729 730 // If the input sign bit is known zero, convert this into a zero extension. 731 if (KnownZero & InSignBit) 732 return TLO.CombineTo(Op, 733 TLO.DAG.getZeroExtendInReg(Op.getOperand(0), EVT)); 734 735 if (KnownOne & InSignBit) { // Input sign bit known set 736 KnownOne |= NewBits; 737 KnownZero &= ~NewBits; 738 } else { // Input sign bit unknown 739 KnownZero &= ~NewBits; 740 KnownOne &= ~NewBits; 741 } 742 break; 743 } 744 case ISD::CTTZ: 745 case ISD::CTLZ: 746 case ISD::CTPOP: { 747 MVT::ValueType VT = Op.getValueType(); 748 unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1; 749 KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT); 750 KnownOne = 0; 751 break; 752 } 753 case ISD::LOAD: { 754 if (ISD::isZEXTLoad(Op.Val)) { 755 LoadSDNode *LD = cast<LoadSDNode>(Op); 756 MVT::ValueType VT = LD->getLoadedVT(); 757 KnownZero |= ~MVT::getIntVTBitMask(VT) & DemandedMask; 758 } 759 break; 760 } 761 case ISD::ZERO_EXTEND: { 762 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType()); 763 764 // If none of the top bits are demanded, convert this into an any_extend. 765 uint64_t NewBits = (~InMask) & DemandedMask; 766 if (NewBits == 0) 767 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, 768 Op.getValueType(), 769 Op.getOperand(0))); 770 771 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask, 772 KnownZero, KnownOne, TLO, Depth+1)) 773 return true; 774 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 775 KnownZero |= NewBits; 776 break; 777 } 778 case ISD::SIGN_EXTEND: { 779 MVT::ValueType InVT = Op.getOperand(0).getValueType(); 780 uint64_t InMask = MVT::getIntVTBitMask(InVT); 781 uint64_t InSignBit = MVT::getIntVTSignBit(InVT); 782 uint64_t NewBits = (~InMask) & DemandedMask; 783 784 // If none of the top bits are demanded, convert this into an any_extend. 785 if (NewBits == 0) 786 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND,Op.getValueType(), 787 Op.getOperand(0))); 788 789 // Since some of the sign extended bits are demanded, we know that the sign 790 // bit is demanded. 791 uint64_t InDemandedBits = DemandedMask & InMask; 792 InDemandedBits |= InSignBit; 793 794 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero, 795 KnownOne, TLO, Depth+1)) 796 return true; 797 798 // If the sign bit is known zero, convert this to a zero extend. 799 if (KnownZero & InSignBit) 800 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, 801 Op.getValueType(), 802 Op.getOperand(0))); 803 804 // If the sign bit is known one, the top bits match. 805 if (KnownOne & InSignBit) { 806 KnownOne |= NewBits; 807 KnownZero &= ~NewBits; 808 } else { // Otherwise, top bits aren't known. 809 KnownOne &= ~NewBits; 810 KnownZero &= ~NewBits; 811 } 812 break; 813 } 814 case ISD::ANY_EXTEND: { 815 uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType()); 816 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask, 817 KnownZero, KnownOne, TLO, Depth+1)) 818 return true; 819 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 820 break; 821 } 822 case ISD::TRUNCATE: { 823 // Simplify the input, using demanded bit information, and compute the known 824 // zero/one bits live out. 825 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, 826 KnownZero, KnownOne, TLO, Depth+1)) 827 return true; 828 829 // If the input is only used by this truncate, see if we can shrink it based 830 // on the known demanded bits. 831 if (Op.getOperand(0).Val->hasOneUse()) { 832 SDOperand In = Op.getOperand(0); 833 switch (In.getOpcode()) { 834 default: break; 835 case ISD::SRL: 836 // Shrink SRL by a constant if none of the high bits shifted in are 837 // demanded. 838 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){ 839 uint64_t HighBits = MVT::getIntVTBitMask(In.getValueType()); 840 HighBits &= ~MVT::getIntVTBitMask(Op.getValueType()); 841 HighBits >>= ShAmt->getValue(); 842 843 if (ShAmt->getValue() < MVT::getSizeInBits(Op.getValueType()) && 844 (DemandedMask & HighBits) == 0) { 845 // None of the shifted in bits are needed. Add a truncate of the 846 // shift input, then shift it. 847 SDOperand NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, 848 Op.getValueType(), 849 In.getOperand(0)); 850 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL,Op.getValueType(), 851 NewTrunc, In.getOperand(1))); 852 } 853 } 854 break; 855 } 856 } 857 858 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 859 uint64_t OutMask = MVT::getIntVTBitMask(Op.getValueType()); 860 KnownZero &= OutMask; 861 KnownOne &= OutMask; 862 break; 863 } 864 case ISD::AssertZext: { 865 MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 866 uint64_t InMask = MVT::getIntVTBitMask(VT); 867 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask, 868 KnownZero, KnownOne, TLO, Depth+1)) 869 return true; 870 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 871 KnownZero |= ~InMask & DemandedMask; 872 break; 873 } 874 case ISD::ADD: 875 case ISD::SUB: 876 case ISD::INTRINSIC_WO_CHAIN: 877 case ISD::INTRINSIC_W_CHAIN: 878 case ISD::INTRINSIC_VOID: 879 // Just use ComputeMaskedBits to compute output bits. 880 TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 881 break; 882 } 883 884 // If we know the value of all of the demanded bits, return this as a 885 // constant. 886 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) 887 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType())); 888 889 return false; 890} 891 892/// computeMaskedBitsForTargetNode - Determine which of the bits specified 893/// in Mask are known to be either zero or one and return them in the 894/// KnownZero/KnownOne bitsets. 895void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 896 uint64_t Mask, 897 uint64_t &KnownZero, 898 uint64_t &KnownOne, 899 const SelectionDAG &DAG, 900 unsigned Depth) const { 901 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 902 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 903 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 904 Op.getOpcode() == ISD::INTRINSIC_VOID) && 905 "Should use MaskedValueIsZero if you don't know whether Op" 906 " is a target node!"); 907 KnownZero = 0; 908 KnownOne = 0; 909} 910 911/// ComputeNumSignBitsForTargetNode - This method can be implemented by 912/// targets that want to expose additional information about sign bits to the 913/// DAG Combiner. 914unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDOperand Op, 915 unsigned Depth) const { 916 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 917 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 918 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 919 Op.getOpcode() == ISD::INTRINSIC_VOID) && 920 "Should use ComputeNumSignBits if you don't know whether Op" 921 " is a target node!"); 922 return 1; 923} 924 925 926/// SimplifySetCC - Try to simplify a setcc built with the specified operands 927/// and cc. If it is unable to simplify it, return a null SDOperand. 928SDOperand 929TargetLowering::SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, 930 ISD::CondCode Cond, bool foldBooleans, 931 DAGCombinerInfo &DCI) const { 932 SelectionDAG &DAG = DCI.DAG; 933 934 // These setcc operations always fold. 935 switch (Cond) { 936 default: break; 937 case ISD::SETFALSE: 938 case ISD::SETFALSE2: return DAG.getConstant(0, VT); 939 case ISD::SETTRUE: 940 case ISD::SETTRUE2: return DAG.getConstant(1, VT); 941 } 942 943 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val)) { 944 uint64_t C1 = N1C->getValue(); 945 if (isa<ConstantSDNode>(N0.Val)) { 946 return DAG.FoldSetCC(VT, N0, N1, Cond); 947 } else { 948 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 949 // equality comparison, then we're just comparing whether X itself is 950 // zero. 951 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) && 952 N0.getOperand(0).getOpcode() == ISD::CTLZ && 953 N0.getOperand(1).getOpcode() == ISD::Constant) { 954 unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getValue(); 955 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 956 ShAmt == Log2_32(MVT::getSizeInBits(N0.getValueType()))) { 957 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 958 // (srl (ctlz x), 5) == 0 -> X != 0 959 // (srl (ctlz x), 5) != 1 -> X != 0 960 Cond = ISD::SETNE; 961 } else { 962 // (srl (ctlz x), 5) != 0 -> X == 0 963 // (srl (ctlz x), 5) == 1 -> X == 0 964 Cond = ISD::SETEQ; 965 } 966 SDOperand Zero = DAG.getConstant(0, N0.getValueType()); 967 return DAG.getSetCC(VT, N0.getOperand(0).getOperand(0), 968 Zero, Cond); 969 } 970 } 971 972 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 973 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 974 unsigned InSize = MVT::getSizeInBits(N0.getOperand(0).getValueType()); 975 976 // If the comparison constant has bits in the upper part, the 977 // zero-extended value could never match. 978 if (C1 & (~0ULL << InSize)) { 979 unsigned VSize = MVT::getSizeInBits(N0.getValueType()); 980 switch (Cond) { 981 case ISD::SETUGT: 982 case ISD::SETUGE: 983 case ISD::SETEQ: return DAG.getConstant(0, VT); 984 case ISD::SETULT: 985 case ISD::SETULE: 986 case ISD::SETNE: return DAG.getConstant(1, VT); 987 case ISD::SETGT: 988 case ISD::SETGE: 989 // True if the sign bit of C1 is set. 990 return DAG.getConstant((C1 & (1ULL << (VSize-1))) != 0, VT); 991 case ISD::SETLT: 992 case ISD::SETLE: 993 // True if the sign bit of C1 isn't set. 994 return DAG.getConstant((C1 & (1ULL << (VSize-1))) == 0, VT); 995 default: 996 break; 997 } 998 } 999 1000 // Otherwise, we can perform the comparison with the low bits. 1001 switch (Cond) { 1002 case ISD::SETEQ: 1003 case ISD::SETNE: 1004 case ISD::SETUGT: 1005 case ISD::SETUGE: 1006 case ISD::SETULT: 1007 case ISD::SETULE: 1008 return DAG.getSetCC(VT, N0.getOperand(0), 1009 DAG.getConstant(C1, N0.getOperand(0).getValueType()), 1010 Cond); 1011 default: 1012 break; // todo, be more careful with signed comparisons 1013 } 1014 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 1015 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1016 MVT::ValueType ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 1017 unsigned ExtSrcTyBits = MVT::getSizeInBits(ExtSrcTy); 1018 MVT::ValueType ExtDstTy = N0.getValueType(); 1019 unsigned ExtDstTyBits = MVT::getSizeInBits(ExtDstTy); 1020 1021 // If the extended part has any inconsistent bits, it cannot ever 1022 // compare equal. In other words, they have to be all ones or all 1023 // zeros. 1024 uint64_t ExtBits = 1025 (~0ULL >> (64-ExtSrcTyBits)) & (~0ULL << (ExtDstTyBits-1)); 1026 if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits) 1027 return DAG.getConstant(Cond == ISD::SETNE, VT); 1028 1029 SDOperand ZextOp; 1030 MVT::ValueType Op0Ty = N0.getOperand(0).getValueType(); 1031 if (Op0Ty == ExtSrcTy) { 1032 ZextOp = N0.getOperand(0); 1033 } else { 1034 int64_t Imm = ~0ULL >> (64-ExtSrcTyBits); 1035 ZextOp = DAG.getNode(ISD::AND, Op0Ty, N0.getOperand(0), 1036 DAG.getConstant(Imm, Op0Ty)); 1037 } 1038 if (!DCI.isCalledByLegalizer()) 1039 DCI.AddToWorklist(ZextOp.Val); 1040 // Otherwise, make this a use of a zext. 1041 return DAG.getSetCC(VT, ZextOp, 1042 DAG.getConstant(C1 & (~0ULL>>(64-ExtSrcTyBits)), 1043 ExtDstTy), 1044 Cond); 1045 } else if ((N1C->getValue() == 0 || N1C->getValue() == 1) && 1046 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1047 1048 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 1049 if (N0.getOpcode() == ISD::SETCC) { 1050 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getValue() != 1); 1051 if (TrueWhenTrue) 1052 return N0; 1053 1054 // Invert the condition. 1055 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 1056 CC = ISD::getSetCCInverse(CC, 1057 MVT::isInteger(N0.getOperand(0).getValueType())); 1058 return DAG.getSetCC(VT, N0.getOperand(0), N0.getOperand(1), CC); 1059 } 1060 1061 if ((N0.getOpcode() == ISD::XOR || 1062 (N0.getOpcode() == ISD::AND && 1063 N0.getOperand(0).getOpcode() == ISD::XOR && 1064 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 1065 isa<ConstantSDNode>(N0.getOperand(1)) && 1066 cast<ConstantSDNode>(N0.getOperand(1))->getValue() == 1) { 1067 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 1068 // can only do this if the top bits are known zero. 1069 if (DAG.MaskedValueIsZero(N0, 1070 MVT::getIntVTBitMask(N0.getValueType())-1)){ 1071 // Okay, get the un-inverted input value. 1072 SDOperand Val; 1073 if (N0.getOpcode() == ISD::XOR) 1074 Val = N0.getOperand(0); 1075 else { 1076 assert(N0.getOpcode() == ISD::AND && 1077 N0.getOperand(0).getOpcode() == ISD::XOR); 1078 // ((X^1)&1)^1 -> X & 1 1079 Val = DAG.getNode(ISD::AND, N0.getValueType(), 1080 N0.getOperand(0).getOperand(0), 1081 N0.getOperand(1)); 1082 } 1083 return DAG.getSetCC(VT, Val, N1, 1084 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 1085 } 1086 } 1087 } 1088 1089 uint64_t MinVal, MaxVal; 1090 unsigned OperandBitSize = MVT::getSizeInBits(N1C->getValueType(0)); 1091 if (ISD::isSignedIntSetCC(Cond)) { 1092 MinVal = 1ULL << (OperandBitSize-1); 1093 if (OperandBitSize != 1) // Avoid X >> 64, which is undefined. 1094 MaxVal = ~0ULL >> (65-OperandBitSize); 1095 else 1096 MaxVal = 0; 1097 } else { 1098 MinVal = 0; 1099 MaxVal = ~0ULL >> (64-OperandBitSize); 1100 } 1101 1102 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 1103 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 1104 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true 1105 --C1; // X >= C0 --> X > (C0-1) 1106 return DAG.getSetCC(VT, N0, DAG.getConstant(C1, N1.getValueType()), 1107 (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT); 1108 } 1109 1110 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 1111 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true 1112 ++C1; // X <= C0 --> X < (C0+1) 1113 return DAG.getSetCC(VT, N0, DAG.getConstant(C1, N1.getValueType()), 1114 (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT); 1115 } 1116 1117 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal) 1118 return DAG.getConstant(0, VT); // X < MIN --> false 1119 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal) 1120 return DAG.getConstant(1, VT); // X >= MIN --> true 1121 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal) 1122 return DAG.getConstant(0, VT); // X > MAX --> false 1123 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal) 1124 return DAG.getConstant(1, VT); // X <= MAX --> true 1125 1126 // Canonicalize setgt X, Min --> setne X, Min 1127 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal) 1128 return DAG.getSetCC(VT, N0, N1, ISD::SETNE); 1129 // Canonicalize setlt X, Max --> setne X, Max 1130 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal) 1131 return DAG.getSetCC(VT, N0, N1, ISD::SETNE); 1132 1133 // If we have setult X, 1, turn it into seteq X, 0 1134 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1) 1135 return DAG.getSetCC(VT, N0, DAG.getConstant(MinVal, N0.getValueType()), 1136 ISD::SETEQ); 1137 // If we have setugt X, Max-1, turn it into seteq X, Max 1138 else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1) 1139 return DAG.getSetCC(VT, N0, DAG.getConstant(MaxVal, N0.getValueType()), 1140 ISD::SETEQ); 1141 1142 // If we have "setcc X, C0", check to see if we can shrink the immediate 1143 // by changing cc. 1144 1145 // SETUGT X, SINTMAX -> SETLT X, 0 1146 if (Cond == ISD::SETUGT && OperandBitSize != 1 && 1147 C1 == (~0ULL >> (65-OperandBitSize))) 1148 return DAG.getSetCC(VT, N0, DAG.getConstant(0, N1.getValueType()), 1149 ISD::SETLT); 1150 1151 // FIXME: Implement the rest of these. 1152 1153 // Fold bit comparisons when we can. 1154 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1155 VT == N0.getValueType() && N0.getOpcode() == ISD::AND) 1156 if (ConstantSDNode *AndRHS = 1157 dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1158 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 1159 // Perform the xform if the AND RHS is a single bit. 1160 if (isPowerOf2_64(AndRHS->getValue())) { 1161 return DAG.getNode(ISD::SRL, VT, N0, 1162 DAG.getConstant(Log2_64(AndRHS->getValue()), 1163 getShiftAmountTy())); 1164 } 1165 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getValue()) { 1166 // (X & 8) == 8 --> (X & 8) >> 3 1167 // Perform the xform if C1 is a single bit. 1168 if (isPowerOf2_64(C1)) { 1169 return DAG.getNode(ISD::SRL, VT, N0, 1170 DAG.getConstant(Log2_64(C1), getShiftAmountTy())); 1171 } 1172 } 1173 } 1174 } 1175 } else if (isa<ConstantSDNode>(N0.Val)) { 1176 // Ensure that the constant occurs on the RHS. 1177 return DAG.getSetCC(VT, N1, N0, ISD::getSetCCSwappedOperands(Cond)); 1178 } 1179 1180 if (isa<ConstantFPSDNode>(N0.Val)) { 1181 // Constant fold or commute setcc. 1182 SDOperand O = DAG.FoldSetCC(VT, N0, N1, Cond); 1183 if (O.Val) return O; 1184 } 1185 1186 if (N0 == N1) { 1187 // We can always fold X == X for integer setcc's. 1188 if (MVT::isInteger(N0.getValueType())) 1189 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1190 unsigned UOF = ISD::getUnorderedFlavor(Cond); 1191 if (UOF == 2) // FP operators that are undefined on NaNs. 1192 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1193 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond))) 1194 return DAG.getConstant(UOF, VT); 1195 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 1196 // if it is not already. 1197 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 1198 if (NewCond != Cond) 1199 return DAG.getSetCC(VT, N0, N1, NewCond); 1200 } 1201 1202 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1203 MVT::isInteger(N0.getValueType())) { 1204 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 1205 N0.getOpcode() == ISD::XOR) { 1206 // Simplify (X+Y) == (X+Z) --> Y == Z 1207 if (N0.getOpcode() == N1.getOpcode()) { 1208 if (N0.getOperand(0) == N1.getOperand(0)) 1209 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(1), Cond); 1210 if (N0.getOperand(1) == N1.getOperand(1)) 1211 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(0), Cond); 1212 if (DAG.isCommutativeBinOp(N0.getOpcode())) { 1213 // If X op Y == Y op X, try other combinations. 1214 if (N0.getOperand(0) == N1.getOperand(1)) 1215 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(0), Cond); 1216 if (N0.getOperand(1) == N1.getOperand(0)) 1217 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(1), Cond); 1218 } 1219 } 1220 1221 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) { 1222 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1223 // Turn (X+C1) == C2 --> X == C2-C1 1224 if (N0.getOpcode() == ISD::ADD && N0.Val->hasOneUse()) { 1225 return DAG.getSetCC(VT, N0.getOperand(0), 1226 DAG.getConstant(RHSC->getValue()-LHSR->getValue(), 1227 N0.getValueType()), Cond); 1228 } 1229 1230 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 1231 if (N0.getOpcode() == ISD::XOR) 1232 // If we know that all of the inverted bits are zero, don't bother 1233 // performing the inversion. 1234 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getValue())) 1235 return DAG.getSetCC(VT, N0.getOperand(0), 1236 DAG.getConstant(LHSR->getValue()^RHSC->getValue(), 1237 N0.getValueType()), Cond); 1238 } 1239 1240 // Turn (C1-X) == C2 --> X == C1-C2 1241 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 1242 if (N0.getOpcode() == ISD::SUB && N0.Val->hasOneUse()) { 1243 return DAG.getSetCC(VT, N0.getOperand(1), 1244 DAG.getConstant(SUBC->getValue()-RHSC->getValue(), 1245 N0.getValueType()), Cond); 1246 } 1247 } 1248 } 1249 1250 // Simplify (X+Z) == X --> Z == 0 1251 if (N0.getOperand(0) == N1) 1252 return DAG.getSetCC(VT, N0.getOperand(1), 1253 DAG.getConstant(0, N0.getValueType()), Cond); 1254 if (N0.getOperand(1) == N1) { 1255 if (DAG.isCommutativeBinOp(N0.getOpcode())) 1256 return DAG.getSetCC(VT, N0.getOperand(0), 1257 DAG.getConstant(0, N0.getValueType()), Cond); 1258 else if (N0.Val->hasOneUse()) { 1259 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!"); 1260 // (Z-X) == X --> Z == X<<1 1261 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), 1262 N1, 1263 DAG.getConstant(1, getShiftAmountTy())); 1264 if (!DCI.isCalledByLegalizer()) 1265 DCI.AddToWorklist(SH.Val); 1266 return DAG.getSetCC(VT, N0.getOperand(0), SH, Cond); 1267 } 1268 } 1269 } 1270 1271 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 1272 N1.getOpcode() == ISD::XOR) { 1273 // Simplify X == (X+Z) --> Z == 0 1274 if (N1.getOperand(0) == N0) { 1275 return DAG.getSetCC(VT, N1.getOperand(1), 1276 DAG.getConstant(0, N1.getValueType()), Cond); 1277 } else if (N1.getOperand(1) == N0) { 1278 if (DAG.isCommutativeBinOp(N1.getOpcode())) { 1279 return DAG.getSetCC(VT, N1.getOperand(0), 1280 DAG.getConstant(0, N1.getValueType()), Cond); 1281 } else if (N1.Val->hasOneUse()) { 1282 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!"); 1283 // X == (Z-X) --> X<<1 == Z 1284 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0, 1285 DAG.getConstant(1, getShiftAmountTy())); 1286 if (!DCI.isCalledByLegalizer()) 1287 DCI.AddToWorklist(SH.Val); 1288 return DAG.getSetCC(VT, SH, N1.getOperand(0), Cond); 1289 } 1290 } 1291 } 1292 } 1293 1294 // Fold away ALL boolean setcc's. 1295 SDOperand Temp; 1296 if (N0.getValueType() == MVT::i1 && foldBooleans) { 1297 switch (Cond) { 1298 default: assert(0 && "Unknown integer setcc!"); 1299 case ISD::SETEQ: // X == Y -> (X^Y)^1 1300 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, N1); 1301 N0 = DAG.getNode(ISD::XOR, MVT::i1, Temp, DAG.getConstant(1, MVT::i1)); 1302 if (!DCI.isCalledByLegalizer()) 1303 DCI.AddToWorklist(Temp.Val); 1304 break; 1305 case ISD::SETNE: // X != Y --> (X^Y) 1306 N0 = DAG.getNode(ISD::XOR, MVT::i1, N0, N1); 1307 break; 1308 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> X^1 & Y 1309 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> X^1 & Y 1310 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1)); 1311 N0 = DAG.getNode(ISD::AND, MVT::i1, N1, Temp); 1312 if (!DCI.isCalledByLegalizer()) 1313 DCI.AddToWorklist(Temp.Val); 1314 break; 1315 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> Y^1 & X 1316 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> Y^1 & X 1317 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1)); 1318 N0 = DAG.getNode(ISD::AND, MVT::i1, N0, Temp); 1319 if (!DCI.isCalledByLegalizer()) 1320 DCI.AddToWorklist(Temp.Val); 1321 break; 1322 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> X^1 | Y 1323 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> X^1 | Y 1324 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1)); 1325 N0 = DAG.getNode(ISD::OR, MVT::i1, N1, Temp); 1326 if (!DCI.isCalledByLegalizer()) 1327 DCI.AddToWorklist(Temp.Val); 1328 break; 1329 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> Y^1 | X 1330 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> Y^1 | X 1331 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1)); 1332 N0 = DAG.getNode(ISD::OR, MVT::i1, N0, Temp); 1333 break; 1334 } 1335 if (VT != MVT::i1) { 1336 if (!DCI.isCalledByLegalizer()) 1337 DCI.AddToWorklist(N0.Val); 1338 // FIXME: If running after legalize, we probably can't do this. 1339 N0 = DAG.getNode(ISD::ZERO_EXTEND, VT, N0); 1340 } 1341 return N0; 1342 } 1343 1344 // Could not fold it. 1345 return SDOperand(); 1346} 1347 1348SDOperand TargetLowering:: 1349PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { 1350 // Default implementation: no optimization. 1351 return SDOperand(); 1352} 1353 1354//===----------------------------------------------------------------------===// 1355// Inline Assembler Implementation Methods 1356//===----------------------------------------------------------------------===// 1357 1358TargetLowering::ConstraintType 1359TargetLowering::getConstraintType(const std::string &Constraint) const { 1360 // FIXME: lots more standard ones to handle. 1361 if (Constraint.size() == 1) { 1362 switch (Constraint[0]) { 1363 default: break; 1364 case 'r': return C_RegisterClass; 1365 case 'm': // memory 1366 case 'o': // offsetable 1367 case 'V': // not offsetable 1368 return C_Memory; 1369 case 'i': // Simple Integer or Relocatable Constant 1370 case 'n': // Simple Integer 1371 case 's': // Relocatable Constant 1372 case 'X': // Allow ANY value. 1373 case 'I': // Target registers. 1374 case 'J': 1375 case 'K': 1376 case 'L': 1377 case 'M': 1378 case 'N': 1379 case 'O': 1380 case 'P': 1381 return C_Other; 1382 } 1383 } 1384 1385 if (Constraint.size() > 1 && Constraint[0] == '{' && 1386 Constraint[Constraint.size()-1] == '}') 1387 return C_Register; 1388 return C_Unknown; 1389} 1390 1391/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 1392/// vector. If it is invalid, don't add anything to Ops. 1393void TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 1394 char ConstraintLetter, 1395 std::vector<SDOperand> &Ops, 1396 SelectionDAG &DAG) { 1397 switch (ConstraintLetter) { 1398 default: break; 1399 case 'X': // Allows any operand; labels (basic block) use this. 1400 if (Op.getOpcode() == ISD::BasicBlock) { 1401 Ops.push_back(Op); 1402 return; 1403 } 1404 // fall through 1405 case 'i': // Simple Integer or Relocatable Constant 1406 case 'n': // Simple Integer 1407 case 's': { // Relocatable Constant 1408 // These operands are interested in values of the form (GV+C), where C may 1409 // be folded in as an offset of GV, or it may be explicitly added. Also, it 1410 // is possible and fine if either GV or C are missing. 1411 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 1412 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 1413 1414 // If we have "(add GV, C)", pull out GV/C 1415 if (Op.getOpcode() == ISD::ADD) { 1416 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 1417 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 1418 if (C == 0 || GA == 0) { 1419 C = dyn_cast<ConstantSDNode>(Op.getOperand(0)); 1420 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1)); 1421 } 1422 if (C == 0 || GA == 0) 1423 C = 0, GA = 0; 1424 } 1425 1426 // If we find a valid operand, map to the TargetXXX version so that the 1427 // value itself doesn't get selected. 1428 if (GA) { // Either &GV or &GV+C 1429 if (ConstraintLetter != 'n') { 1430 int64_t Offs = GA->getOffset(); 1431 if (C) Offs += C->getValue(); 1432 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), 1433 Op.getValueType(), Offs)); 1434 return; 1435 } 1436 } 1437 if (C) { // just C, no GV. 1438 // Simple constants are not allowed for 's'. 1439 if (ConstraintLetter != 's') { 1440 Ops.push_back(DAG.getTargetConstant(C->getValue(), Op.getValueType())); 1441 return; 1442 } 1443 } 1444 break; 1445 } 1446 } 1447} 1448 1449std::vector<unsigned> TargetLowering:: 1450getRegClassForInlineAsmConstraint(const std::string &Constraint, 1451 MVT::ValueType VT) const { 1452 return std::vector<unsigned>(); 1453} 1454 1455 1456std::pair<unsigned, const TargetRegisterClass*> TargetLowering:: 1457getRegForInlineAsmConstraint(const std::string &Constraint, 1458 MVT::ValueType VT) const { 1459 if (Constraint[0] != '{') 1460 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 1461 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?"); 1462 1463 // Remove the braces from around the name. 1464 std::string RegName(Constraint.begin()+1, Constraint.end()-1); 1465 1466 // Figure out which register class contains this reg. 1467 const MRegisterInfo *RI = TM.getRegisterInfo(); 1468 for (MRegisterInfo::regclass_iterator RCI = RI->regclass_begin(), 1469 E = RI->regclass_end(); RCI != E; ++RCI) { 1470 const TargetRegisterClass *RC = *RCI; 1471 1472 // If none of the the value types for this register class are valid, we 1473 // can't use it. For example, 64-bit reg classes on 32-bit targets. 1474 bool isLegal = false; 1475 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 1476 I != E; ++I) { 1477 if (isTypeLegal(*I)) { 1478 isLegal = true; 1479 break; 1480 } 1481 } 1482 1483 if (!isLegal) continue; 1484 1485 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 1486 I != E; ++I) { 1487 if (StringsEqualNoCase(RegName, RI->get(*I).Name)) 1488 return std::make_pair(*I, RC); 1489 } 1490 } 1491 1492 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 1493} 1494 1495//===----------------------------------------------------------------------===// 1496// Loop Strength Reduction hooks 1497//===----------------------------------------------------------------------===// 1498 1499/// isLegalAddressingMode - Return true if the addressing mode represented 1500/// by AM is legal for this target, for a load/store of the specified type. 1501bool TargetLowering::isLegalAddressingMode(const AddrMode &AM, 1502 const Type *Ty) const { 1503 // The default implementation of this implements a conservative RISCy, r+r and 1504 // r+i addr mode. 1505 1506 // Allows a sign-extended 16-bit immediate field. 1507 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 1508 return false; 1509 1510 // No global is ever allowed as a base. 1511 if (AM.BaseGV) 1512 return false; 1513 1514 // Only support r+r, 1515 switch (AM.Scale) { 1516 case 0: // "r+i" or just "i", depending on HasBaseReg. 1517 break; 1518 case 1: 1519 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 1520 return false; 1521 // Otherwise we have r+r or r+i. 1522 break; 1523 case 2: 1524 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 1525 return false; 1526 // Allow 2*r as r+r. 1527 break; 1528 } 1529 1530 return true; 1531} 1532 1533// Magic for divide replacement 1534 1535struct ms { 1536 int64_t m; // magic number 1537 int64_t s; // shift amount 1538}; 1539 1540struct mu { 1541 uint64_t m; // magic number 1542 int64_t a; // add indicator 1543 int64_t s; // shift amount 1544}; 1545 1546/// magic - calculate the magic numbers required to codegen an integer sdiv as 1547/// a sequence of multiply and shifts. Requires that the divisor not be 0, 1, 1548/// or -1. 1549static ms magic32(int32_t d) { 1550 int32_t p; 1551 uint32_t ad, anc, delta, q1, r1, q2, r2, t; 1552 const uint32_t two31 = 0x80000000U; 1553 struct ms mag; 1554 1555 ad = abs(d); 1556 t = two31 + ((uint32_t)d >> 31); 1557 anc = t - 1 - t%ad; // absolute value of nc 1558 p = 31; // initialize p 1559 q1 = two31/anc; // initialize q1 = 2p/abs(nc) 1560 r1 = two31 - q1*anc; // initialize r1 = rem(2p,abs(nc)) 1561 q2 = two31/ad; // initialize q2 = 2p/abs(d) 1562 r2 = two31 - q2*ad; // initialize r2 = rem(2p,abs(d)) 1563 do { 1564 p = p + 1; 1565 q1 = 2*q1; // update q1 = 2p/abs(nc) 1566 r1 = 2*r1; // update r1 = rem(2p/abs(nc)) 1567 if (r1 >= anc) { // must be unsigned comparison 1568 q1 = q1 + 1; 1569 r1 = r1 - anc; 1570 } 1571 q2 = 2*q2; // update q2 = 2p/abs(d) 1572 r2 = 2*r2; // update r2 = rem(2p/abs(d)) 1573 if (r2 >= ad) { // must be unsigned comparison 1574 q2 = q2 + 1; 1575 r2 = r2 - ad; 1576 } 1577 delta = ad - r2; 1578 } while (q1 < delta || (q1 == delta && r1 == 0)); 1579 1580 mag.m = (int32_t)(q2 + 1); // make sure to sign extend 1581 if (d < 0) mag.m = -mag.m; // resulting magic number 1582 mag.s = p - 32; // resulting shift 1583 return mag; 1584} 1585 1586/// magicu - calculate the magic numbers required to codegen an integer udiv as 1587/// a sequence of multiply, add and shifts. Requires that the divisor not be 0. 1588static mu magicu32(uint32_t d) { 1589 int32_t p; 1590 uint32_t nc, delta, q1, r1, q2, r2; 1591 struct mu magu; 1592 magu.a = 0; // initialize "add" indicator 1593 nc = - 1 - (-d)%d; 1594 p = 31; // initialize p 1595 q1 = 0x80000000/nc; // initialize q1 = 2p/nc 1596 r1 = 0x80000000 - q1*nc; // initialize r1 = rem(2p,nc) 1597 q2 = 0x7FFFFFFF/d; // initialize q2 = (2p-1)/d 1598 r2 = 0x7FFFFFFF - q2*d; // initialize r2 = rem((2p-1),d) 1599 do { 1600 p = p + 1; 1601 if (r1 >= nc - r1 ) { 1602 q1 = 2*q1 + 1; // update q1 1603 r1 = 2*r1 - nc; // update r1 1604 } 1605 else { 1606 q1 = 2*q1; // update q1 1607 r1 = 2*r1; // update r1 1608 } 1609 if (r2 + 1 >= d - r2) { 1610 if (q2 >= 0x7FFFFFFF) magu.a = 1; 1611 q2 = 2*q2 + 1; // update q2 1612 r2 = 2*r2 + 1 - d; // update r2 1613 } 1614 else { 1615 if (q2 >= 0x80000000) magu.a = 1; 1616 q2 = 2*q2; // update q2 1617 r2 = 2*r2 + 1; // update r2 1618 } 1619 delta = d - 1 - r2; 1620 } while (p < 64 && (q1 < delta || (q1 == delta && r1 == 0))); 1621 magu.m = q2 + 1; // resulting magic number 1622 magu.s = p - 32; // resulting shift 1623 return magu; 1624} 1625 1626/// magic - calculate the magic numbers required to codegen an integer sdiv as 1627/// a sequence of multiply and shifts. Requires that the divisor not be 0, 1, 1628/// or -1. 1629static ms magic64(int64_t d) { 1630 int64_t p; 1631 uint64_t ad, anc, delta, q1, r1, q2, r2, t; 1632 const uint64_t two63 = 9223372036854775808ULL; // 2^63 1633 struct ms mag; 1634 1635 ad = d >= 0 ? d : -d; 1636 t = two63 + ((uint64_t)d >> 63); 1637 anc = t - 1 - t%ad; // absolute value of nc 1638 p = 63; // initialize p 1639 q1 = two63/anc; // initialize q1 = 2p/abs(nc) 1640 r1 = two63 - q1*anc; // initialize r1 = rem(2p,abs(nc)) 1641 q2 = two63/ad; // initialize q2 = 2p/abs(d) 1642 r2 = two63 - q2*ad; // initialize r2 = rem(2p,abs(d)) 1643 do { 1644 p = p + 1; 1645 q1 = 2*q1; // update q1 = 2p/abs(nc) 1646 r1 = 2*r1; // update r1 = rem(2p/abs(nc)) 1647 if (r1 >= anc) { // must be unsigned comparison 1648 q1 = q1 + 1; 1649 r1 = r1 - anc; 1650 } 1651 q2 = 2*q2; // update q2 = 2p/abs(d) 1652 r2 = 2*r2; // update r2 = rem(2p/abs(d)) 1653 if (r2 >= ad) { // must be unsigned comparison 1654 q2 = q2 + 1; 1655 r2 = r2 - ad; 1656 } 1657 delta = ad - r2; 1658 } while (q1 < delta || (q1 == delta && r1 == 0)); 1659 1660 mag.m = q2 + 1; 1661 if (d < 0) mag.m = -mag.m; // resulting magic number 1662 mag.s = p - 64; // resulting shift 1663 return mag; 1664} 1665 1666/// magicu - calculate the magic numbers required to codegen an integer udiv as 1667/// a sequence of multiply, add and shifts. Requires that the divisor not be 0. 1668static mu magicu64(uint64_t d) 1669{ 1670 int64_t p; 1671 uint64_t nc, delta, q1, r1, q2, r2; 1672 struct mu magu; 1673 magu.a = 0; // initialize "add" indicator 1674 nc = - 1 - (-d)%d; 1675 p = 63; // initialize p 1676 q1 = 0x8000000000000000ull/nc; // initialize q1 = 2p/nc 1677 r1 = 0x8000000000000000ull - q1*nc; // initialize r1 = rem(2p,nc) 1678 q2 = 0x7FFFFFFFFFFFFFFFull/d; // initialize q2 = (2p-1)/d 1679 r2 = 0x7FFFFFFFFFFFFFFFull - q2*d; // initialize r2 = rem((2p-1),d) 1680 do { 1681 p = p + 1; 1682 if (r1 >= nc - r1 ) { 1683 q1 = 2*q1 + 1; // update q1 1684 r1 = 2*r1 - nc; // update r1 1685 } 1686 else { 1687 q1 = 2*q1; // update q1 1688 r1 = 2*r1; // update r1 1689 } 1690 if (r2 + 1 >= d - r2) { 1691 if (q2 >= 0x7FFFFFFFFFFFFFFFull) magu.a = 1; 1692 q2 = 2*q2 + 1; // update q2 1693 r2 = 2*r2 + 1 - d; // update r2 1694 } 1695 else { 1696 if (q2 >= 0x8000000000000000ull) magu.a = 1; 1697 q2 = 2*q2; // update q2 1698 r2 = 2*r2 + 1; // update r2 1699 } 1700 delta = d - 1 - r2; 1701 } while (p < 128 && (q1 < delta || (q1 == delta && r1 == 0))); 1702 magu.m = q2 + 1; // resulting magic number 1703 magu.s = p - 64; // resulting shift 1704 return magu; 1705} 1706 1707/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant, 1708/// return a DAG expression to select that will generate the same value by 1709/// multiplying by a magic number. See: 1710/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 1711SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 1712 std::vector<SDNode*>* Created) const { 1713 MVT::ValueType VT = N->getValueType(0); 1714 1715 // Check to see if we can do this. 1716 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) 1717 return SDOperand(); // BuildSDIV only operates on i32 or i64 1718 1719 int64_t d = cast<ConstantSDNode>(N->getOperand(1))->getSignExtended(); 1720 ms magics = (VT == MVT::i32) ? magic32(d) : magic64(d); 1721 1722 // Multiply the numerator (operand 0) by the magic value 1723 SDOperand Q; 1724 if (isOperationLegal(ISD::MULHS, VT)) 1725 Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0), 1726 DAG.getConstant(magics.m, VT)); 1727 else if (isOperationLegal(ISD::SMUL_LOHI, VT)) 1728 Q = SDOperand(DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(VT, VT), 1729 N->getOperand(0), 1730 DAG.getConstant(magics.m, VT)).Val, 1); 1731 else 1732 return SDOperand(); // No mulhs or equvialent 1733 // If d > 0 and m < 0, add the numerator 1734 if (d > 0 && magics.m < 0) { 1735 Q = DAG.getNode(ISD::ADD, VT, Q, N->getOperand(0)); 1736 if (Created) 1737 Created->push_back(Q.Val); 1738 } 1739 // If d < 0 and m > 0, subtract the numerator. 1740 if (d < 0 && magics.m > 0) { 1741 Q = DAG.getNode(ISD::SUB, VT, Q, N->getOperand(0)); 1742 if (Created) 1743 Created->push_back(Q.Val); 1744 } 1745 // Shift right algebraic if shift value is nonzero 1746 if (magics.s > 0) { 1747 Q = DAG.getNode(ISD::SRA, VT, Q, 1748 DAG.getConstant(magics.s, getShiftAmountTy())); 1749 if (Created) 1750 Created->push_back(Q.Val); 1751 } 1752 // Extract the sign bit and add it to the quotient 1753 SDOperand T = 1754 DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(MVT::getSizeInBits(VT)-1, 1755 getShiftAmountTy())); 1756 if (Created) 1757 Created->push_back(T.Val); 1758 return DAG.getNode(ISD::ADD, VT, Q, T); 1759} 1760 1761/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant, 1762/// return a DAG expression to select that will generate the same value by 1763/// multiplying by a magic number. See: 1764/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 1765SDOperand TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 1766 std::vector<SDNode*>* Created) const { 1767 MVT::ValueType VT = N->getValueType(0); 1768 1769 // Check to see if we can do this. 1770 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) 1771 return SDOperand(); // BuildUDIV only operates on i32 or i64 1772 1773 uint64_t d = cast<ConstantSDNode>(N->getOperand(1))->getValue(); 1774 mu magics = (VT == MVT::i32) ? magicu32(d) : magicu64(d); 1775 1776 // Multiply the numerator (operand 0) by the magic value 1777 SDOperand Q; 1778 if (isOperationLegal(ISD::MULHU, VT)) 1779 Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0), 1780 DAG.getConstant(magics.m, VT)); 1781 else if (isOperationLegal(ISD::UMUL_LOHI, VT)) 1782 Q = SDOperand(DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(VT, VT), 1783 N->getOperand(0), 1784 DAG.getConstant(magics.m, VT)).Val, 1); 1785 else 1786 return SDOperand(); // No mulhu or equvialent 1787 if (Created) 1788 Created->push_back(Q.Val); 1789 1790 if (magics.a == 0) { 1791 return DAG.getNode(ISD::SRL, VT, Q, 1792 DAG.getConstant(magics.s, getShiftAmountTy())); 1793 } else { 1794 SDOperand NPQ = DAG.getNode(ISD::SUB, VT, N->getOperand(0), Q); 1795 if (Created) 1796 Created->push_back(NPQ.Val); 1797 NPQ = DAG.getNode(ISD::SRL, VT, NPQ, 1798 DAG.getConstant(1, getShiftAmountTy())); 1799 if (Created) 1800 Created->push_back(NPQ.Val); 1801 NPQ = DAG.getNode(ISD::ADD, VT, NPQ, Q); 1802 if (Created) 1803 Created->push_back(NPQ.Val); 1804 return DAG.getNode(ISD::SRL, VT, NPQ, 1805 DAG.getConstant(magics.s-1, getShiftAmountTy())); 1806 } 1807} 1808