TargetLowering.cpp revision 83ec4b6711980242ef3c55a4fa36b2d7a39c1bfb
1//===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the TargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Target/TargetAsmInfo.h" 15#include "llvm/Target/TargetLowering.h" 16#include "llvm/Target/TargetSubtarget.h" 17#include "llvm/Target/TargetData.h" 18#include "llvm/Target/TargetMachine.h" 19#include "llvm/Target/TargetRegisterInfo.h" 20#include "llvm/GlobalVariable.h" 21#include "llvm/DerivedTypes.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/SelectionDAG.h" 24#include "llvm/ADT/StringExtras.h" 25#include "llvm/ADT/STLExtras.h" 26#include "llvm/Support/MathExtras.h" 27using namespace llvm; 28 29/// InitLibcallNames - Set default libcall names. 30/// 31static void InitLibcallNames(const char **Names) { 32 Names[RTLIB::SHL_I32] = "__ashlsi3"; 33 Names[RTLIB::SHL_I64] = "__ashldi3"; 34 Names[RTLIB::SRL_I32] = "__lshrsi3"; 35 Names[RTLIB::SRL_I64] = "__lshrdi3"; 36 Names[RTLIB::SRA_I32] = "__ashrsi3"; 37 Names[RTLIB::SRA_I64] = "__ashrdi3"; 38 Names[RTLIB::MUL_I32] = "__mulsi3"; 39 Names[RTLIB::MUL_I64] = "__muldi3"; 40 Names[RTLIB::SDIV_I32] = "__divsi3"; 41 Names[RTLIB::SDIV_I64] = "__divdi3"; 42 Names[RTLIB::UDIV_I32] = "__udivsi3"; 43 Names[RTLIB::UDIV_I64] = "__udivdi3"; 44 Names[RTLIB::SREM_I32] = "__modsi3"; 45 Names[RTLIB::SREM_I64] = "__moddi3"; 46 Names[RTLIB::UREM_I32] = "__umodsi3"; 47 Names[RTLIB::UREM_I64] = "__umoddi3"; 48 Names[RTLIB::NEG_I32] = "__negsi2"; 49 Names[RTLIB::NEG_I64] = "__negdi2"; 50 Names[RTLIB::ADD_F32] = "__addsf3"; 51 Names[RTLIB::ADD_F64] = "__adddf3"; 52 Names[RTLIB::ADD_F80] = "__addxf3"; 53 Names[RTLIB::ADD_PPCF128] = "__gcc_qadd"; 54 Names[RTLIB::SUB_F32] = "__subsf3"; 55 Names[RTLIB::SUB_F64] = "__subdf3"; 56 Names[RTLIB::SUB_F80] = "__subxf3"; 57 Names[RTLIB::SUB_PPCF128] = "__gcc_qsub"; 58 Names[RTLIB::MUL_F32] = "__mulsf3"; 59 Names[RTLIB::MUL_F64] = "__muldf3"; 60 Names[RTLIB::MUL_F80] = "__mulxf3"; 61 Names[RTLIB::MUL_PPCF128] = "__gcc_qmul"; 62 Names[RTLIB::DIV_F32] = "__divsf3"; 63 Names[RTLIB::DIV_F64] = "__divdf3"; 64 Names[RTLIB::DIV_F80] = "__divxf3"; 65 Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv"; 66 Names[RTLIB::REM_F32] = "fmodf"; 67 Names[RTLIB::REM_F64] = "fmod"; 68 Names[RTLIB::REM_F80] = "fmodl"; 69 Names[RTLIB::REM_PPCF128] = "fmodl"; 70 Names[RTLIB::POWI_F32] = "__powisf2"; 71 Names[RTLIB::POWI_F64] = "__powidf2"; 72 Names[RTLIB::POWI_F80] = "__powixf2"; 73 Names[RTLIB::POWI_PPCF128] = "__powitf2"; 74 Names[RTLIB::SQRT_F32] = "sqrtf"; 75 Names[RTLIB::SQRT_F64] = "sqrt"; 76 Names[RTLIB::SQRT_F80] = "sqrtl"; 77 Names[RTLIB::SQRT_PPCF128] = "sqrtl"; 78 Names[RTLIB::SIN_F32] = "sinf"; 79 Names[RTLIB::SIN_F64] = "sin"; 80 Names[RTLIB::SIN_F80] = "sinl"; 81 Names[RTLIB::SIN_PPCF128] = "sinl"; 82 Names[RTLIB::COS_F32] = "cosf"; 83 Names[RTLIB::COS_F64] = "cos"; 84 Names[RTLIB::COS_F80] = "cosl"; 85 Names[RTLIB::COS_PPCF128] = "cosl"; 86 Names[RTLIB::POW_F32] = "powf"; 87 Names[RTLIB::POW_F64] = "pow"; 88 Names[RTLIB::POW_F80] = "powl"; 89 Names[RTLIB::POW_PPCF128] = "powl"; 90 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2"; 91 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2"; 92 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi"; 93 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi"; 94 Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti"; 95 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi"; 96 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi"; 97 Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti"; 98 Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi"; 99 Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti"; 100 Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi"; 101 Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti"; 102 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi"; 103 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi"; 104 Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti"; 105 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi"; 106 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi"; 107 Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti"; 108 Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi"; 109 Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi"; 110 Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti"; 111 Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi"; 112 Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti"; 113 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf"; 114 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf"; 115 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf"; 116 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf"; 117 Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf"; 118 Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf"; 119 Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf"; 120 Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf"; 121 Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf"; 122 Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf"; 123 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf"; 124 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf"; 125 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf"; 126 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf"; 127 Names[RTLIB::OEQ_F32] = "__eqsf2"; 128 Names[RTLIB::OEQ_F64] = "__eqdf2"; 129 Names[RTLIB::UNE_F32] = "__nesf2"; 130 Names[RTLIB::UNE_F64] = "__nedf2"; 131 Names[RTLIB::OGE_F32] = "__gesf2"; 132 Names[RTLIB::OGE_F64] = "__gedf2"; 133 Names[RTLIB::OLT_F32] = "__ltsf2"; 134 Names[RTLIB::OLT_F64] = "__ltdf2"; 135 Names[RTLIB::OLE_F32] = "__lesf2"; 136 Names[RTLIB::OLE_F64] = "__ledf2"; 137 Names[RTLIB::OGT_F32] = "__gtsf2"; 138 Names[RTLIB::OGT_F64] = "__gtdf2"; 139 Names[RTLIB::UO_F32] = "__unordsf2"; 140 Names[RTLIB::UO_F64] = "__unorddf2"; 141 Names[RTLIB::O_F32] = "__unordsf2"; 142 Names[RTLIB::O_F64] = "__unorddf2"; 143} 144 145/// InitCmpLibcallCCs - Set default comparison libcall CC. 146/// 147static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 148 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL); 149 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 150 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 151 CCs[RTLIB::UNE_F32] = ISD::SETNE; 152 CCs[RTLIB::UNE_F64] = ISD::SETNE; 153 CCs[RTLIB::OGE_F32] = ISD::SETGE; 154 CCs[RTLIB::OGE_F64] = ISD::SETGE; 155 CCs[RTLIB::OLT_F32] = ISD::SETLT; 156 CCs[RTLIB::OLT_F64] = ISD::SETLT; 157 CCs[RTLIB::OLE_F32] = ISD::SETLE; 158 CCs[RTLIB::OLE_F64] = ISD::SETLE; 159 CCs[RTLIB::OGT_F32] = ISD::SETGT; 160 CCs[RTLIB::OGT_F64] = ISD::SETGT; 161 CCs[RTLIB::UO_F32] = ISD::SETNE; 162 CCs[RTLIB::UO_F64] = ISD::SETNE; 163 CCs[RTLIB::O_F32] = ISD::SETEQ; 164 CCs[RTLIB::O_F64] = ISD::SETEQ; 165} 166 167TargetLowering::TargetLowering(TargetMachine &tm) 168 : TM(tm), TD(TM.getTargetData()) { 169 assert(ISD::BUILTIN_OP_END <= OpActionsCapacity && 170 "Fixed size array in TargetLowering is not large enough!"); 171 // All operations default to being supported. 172 memset(OpActions, 0, sizeof(OpActions)); 173 memset(LoadXActions, 0, sizeof(LoadXActions)); 174 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 175 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 176 memset(ConvertActions, 0, sizeof(ConvertActions)); 177 178 // Set default actions for various operations. 179 for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { 180 // Default all indexed load / store to expand. 181 for (unsigned IM = (unsigned)ISD::PRE_INC; 182 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 183 setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand); 184 setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand); 185 } 186 187 // These operations default to expand. 188 setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand); 189 } 190 191 // Most targets ignore the @llvm.prefetch intrinsic. 192 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 193 194 // ConstantFP nodes default to expand. Targets can either change this to 195 // Legal, in which case all fp constants are legal, or use addLegalFPImmediate 196 // to optimize expansions for certain constants. 197 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 198 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 199 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 200 201 // Default ISD::TRAP to expand (which turns it into abort). 202 setOperationAction(ISD::TRAP, MVT::Other, Expand); 203 204 IsLittleEndian = TD->isLittleEndian(); 205 UsesGlobalOffsetTable = false; 206 ShiftAmountTy = PointerTy = getValueType(TD->getIntPtrType()); 207 ShiftAmtHandling = Undefined; 208 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); 209 memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); 210 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; 211 allowUnalignedMemoryAccesses = false; 212 UseUnderscoreSetJmp = false; 213 UseUnderscoreLongJmp = false; 214 SelectIsExpensive = false; 215 IntDivIsCheap = false; 216 Pow2DivIsCheap = false; 217 StackPointerRegisterToSaveRestore = 0; 218 ExceptionPointerRegister = 0; 219 ExceptionSelectorRegister = 0; 220 SetCCResultContents = UndefinedSetCCResult; 221 SchedPreferenceInfo = SchedulingForLatency; 222 JumpBufSize = 0; 223 JumpBufAlignment = 0; 224 IfCvtBlockSizeLimit = 2; 225 IfCvtDupBlockSizeLimit = 0; 226 PrefLoopAlignment = 0; 227 228 InitLibcallNames(LibcallRoutineNames); 229 InitCmpLibcallCCs(CmpLibcallCCs); 230 231 // Tell Legalize whether the assembler supports DEBUG_LOC. 232 if (!TM.getTargetAsmInfo()->hasDotLocAndDotFile()) 233 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 234} 235 236TargetLowering::~TargetLowering() {} 237 238/// computeRegisterProperties - Once all of the register classes are added, 239/// this allows us to compute derived properties we expose. 240void TargetLowering::computeRegisterProperties() { 241 assert(MVT::LAST_VALUETYPE <= 32 && 242 "Too many value types for ValueTypeActions to hold!"); 243 244 // Everything defaults to needing one register. 245 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 246 NumRegistersForVT[i] = 1; 247 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 248 } 249 // ...except isVoid, which doesn't need any registers. 250 NumRegistersForVT[MVT::isVoid] = 0; 251 252 // Find the largest integer register class. 253 unsigned LargestIntReg = MVT::i128; 254 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg) 255 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 256 257 // Every integer value type larger than this largest register takes twice as 258 // many registers to represent as the previous ValueType. 259 for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) { 260 MVT EVT = (MVT::SimpleValueType)ExpandedReg; 261 if (!EVT.isInteger()) 262 break; 263 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 264 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 265 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 266 ValueTypeActions.setTypeAction(EVT, Expand); 267 } 268 269 // Inspect all of the ValueType's smaller than the largest integer 270 // register to see which ones need promotion. 271 unsigned LegalIntReg = LargestIntReg; 272 for (unsigned IntReg = LargestIntReg - 1; 273 IntReg >= (unsigned)MVT::i1; --IntReg) { 274 MVT IVT = (MVT::SimpleValueType)IntReg; 275 if (isTypeLegal(IVT)) { 276 LegalIntReg = IntReg; 277 } else { 278 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 279 (MVT::SimpleValueType)LegalIntReg; 280 ValueTypeActions.setTypeAction(IVT, Promote); 281 } 282 } 283 284 // ppcf128 type is really two f64's. 285 if (!isTypeLegal(MVT::ppcf128)) { 286 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 287 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 288 TransformToType[MVT::ppcf128] = MVT::f64; 289 ValueTypeActions.setTypeAction(MVT::ppcf128, Expand); 290 } 291 292 // Decide how to handle f64. If the target does not have native f64 support, 293 // expand it to i64 and we will be generating soft float library calls. 294 if (!isTypeLegal(MVT::f64)) { 295 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 296 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 297 TransformToType[MVT::f64] = MVT::i64; 298 ValueTypeActions.setTypeAction(MVT::f64, Expand); 299 } 300 301 // Decide how to handle f32. If the target does not have native support for 302 // f32, promote it to f64 if it is legal. Otherwise, expand it to i32. 303 if (!isTypeLegal(MVT::f32)) { 304 if (isTypeLegal(MVT::f64)) { 305 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64]; 306 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64]; 307 TransformToType[MVT::f32] = MVT::f64; 308 ValueTypeActions.setTypeAction(MVT::f32, Promote); 309 } else { 310 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 311 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 312 TransformToType[MVT::f32] = MVT::i32; 313 ValueTypeActions.setTypeAction(MVT::f32, Expand); 314 } 315 } 316 317 // Loop over all of the vector value types to see which need transformations. 318 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 319 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 320 MVT VT = (MVT::SimpleValueType)i; 321 if (!isTypeLegal(VT)) { 322 MVT IntermediateVT, RegisterVT; 323 unsigned NumIntermediates; 324 NumRegistersForVT[i] = 325 getVectorTypeBreakdown(VT, 326 IntermediateVT, NumIntermediates, 327 RegisterVT); 328 RegisterTypeForVT[i] = RegisterVT; 329 TransformToType[i] = MVT::Other; // this isn't actually used 330 ValueTypeActions.setTypeAction(VT, Expand); 331 } 332 } 333} 334 335const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 336 return NULL; 337} 338 339 340MVT TargetLowering::getSetCCResultType(const SDOperand &) const { 341 return getValueType(TD->getIntPtrType()); 342} 343 344 345/// getVectorTypeBreakdown - Vector types are broken down into some number of 346/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 347/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 348/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 349/// 350/// This method returns the number of registers needed, and the VT for each 351/// register. It also returns the VT and quantity of the intermediate values 352/// before they are promoted/expanded. 353/// 354unsigned TargetLowering::getVectorTypeBreakdown(MVT VT, 355 MVT &IntermediateVT, 356 unsigned &NumIntermediates, 357 MVT &RegisterVT) const { 358 // Figure out the right, legal destination reg to copy into. 359 unsigned NumElts = VT.getVectorNumElements(); 360 MVT EltTy = VT.getVectorElementType(); 361 362 unsigned NumVectorRegs = 1; 363 364 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we 365 // could break down into LHS/RHS like LegalizeDAG does. 366 if (!isPowerOf2_32(NumElts)) { 367 NumVectorRegs = NumElts; 368 NumElts = 1; 369 } 370 371 // Divide the input until we get to a supported size. This will always 372 // end with a scalar if the target doesn't support vectors. 373 while (NumElts > 1 && !isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) { 374 NumElts >>= 1; 375 NumVectorRegs <<= 1; 376 } 377 378 NumIntermediates = NumVectorRegs; 379 380 MVT NewVT = MVT::getVectorVT(EltTy, NumElts); 381 if (!isTypeLegal(NewVT)) 382 NewVT = EltTy; 383 IntermediateVT = NewVT; 384 385 MVT DestVT = getTypeToTransformTo(NewVT); 386 RegisterVT = DestVT; 387 if (DestVT < NewVT) { 388 // Value is expanded, e.g. i64 -> i16. 389 return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits()); 390 } else { 391 // Otherwise, promotion or legal types use the same number of registers as 392 // the vector decimated to the appropriate level. 393 return NumVectorRegs; 394 } 395 396 return 1; 397} 398 399/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 400/// function arguments in the caller parameter area. This is the actual 401/// alignment, not its logarithm. 402unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const { 403 return TD->getCallFrameTypeAlignment(Ty); 404} 405 406SDOperand TargetLowering::getPICJumpTableRelocBase(SDOperand Table, 407 SelectionDAG &DAG) const { 408 if (usesGlobalOffsetTable()) 409 return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); 410 return Table; 411} 412 413//===----------------------------------------------------------------------===// 414// Optimization Methods 415//===----------------------------------------------------------------------===// 416 417/// ShrinkDemandedConstant - Check to see if the specified operand of the 418/// specified instruction is a constant integer. If so, check to see if there 419/// are any bits set in the constant that are not demanded. If so, shrink the 420/// constant and return true. 421bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op, 422 const APInt &Demanded) { 423 // FIXME: ISD::SELECT, ISD::SELECT_CC 424 switch(Op.getOpcode()) { 425 default: break; 426 case ISD::AND: 427 case ISD::OR: 428 case ISD::XOR: 429 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 430 if (C->getAPIntValue().intersects(~Demanded)) { 431 MVT VT = Op.getValueType(); 432 SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), 433 DAG.getConstant(Demanded & 434 C->getAPIntValue(), 435 VT)); 436 return CombineTo(Op, New); 437 } 438 break; 439 } 440 return false; 441} 442 443/// SimplifyDemandedBits - Look at Op. At this point, we know that only the 444/// DemandedMask bits of the result of Op are ever used downstream. If we can 445/// use this information to simplify Op, create a new simplified DAG node and 446/// return true, returning the original and new nodes in Old and New. Otherwise, 447/// analyze the expression and return a mask of KnownOne and KnownZero bits for 448/// the expression (used to simplify the caller). The KnownZero/One bits may 449/// only be accurate for those bits in the DemandedMask. 450bool TargetLowering::SimplifyDemandedBits(SDOperand Op, 451 const APInt &DemandedMask, 452 APInt &KnownZero, 453 APInt &KnownOne, 454 TargetLoweringOpt &TLO, 455 unsigned Depth) const { 456 unsigned BitWidth = DemandedMask.getBitWidth(); 457 assert(Op.getValueSizeInBits() == BitWidth && 458 "Mask size mismatches value type size!"); 459 APInt NewMask = DemandedMask; 460 461 // Don't know anything. 462 KnownZero = KnownOne = APInt(BitWidth, 0); 463 464 // Other users may use these bits. 465 if (!Op.Val->hasOneUse()) { 466 if (Depth != 0) { 467 // If not at the root, Just compute the KnownZero/KnownOne bits to 468 // simplify things downstream. 469 TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 470 return false; 471 } 472 // If this is the root being simplified, allow it to have multiple uses, 473 // just set the NewMask to all bits. 474 NewMask = APInt::getAllOnesValue(BitWidth); 475 } else if (DemandedMask == 0) { 476 // Not demanding any bits from Op. 477 if (Op.getOpcode() != ISD::UNDEF) 478 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::UNDEF, Op.getValueType())); 479 return false; 480 } else if (Depth == 6) { // Limit search depth. 481 return false; 482 } 483 484 APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut; 485 switch (Op.getOpcode()) { 486 case ISD::Constant: 487 // We know all of the bits for a constant! 488 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & NewMask; 489 KnownZero = ~KnownOne & NewMask; 490 return false; // Don't fall through, will infinitely loop. 491 case ISD::AND: 492 // If the RHS is a constant, check to see if the LHS would be zero without 493 // using the bits from the RHS. Below, we use knowledge about the RHS to 494 // simplify the LHS, here we're using information from the LHS to simplify 495 // the RHS. 496 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 497 APInt LHSZero, LHSOne; 498 TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask, 499 LHSZero, LHSOne, Depth+1); 500 // If the LHS already has zeros where RHSC does, this and is dead. 501 if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask)) 502 return TLO.CombineTo(Op, Op.getOperand(0)); 503 // If any of the set bits in the RHS are known zero on the LHS, shrink 504 // the constant. 505 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask)) 506 return true; 507 } 508 509 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 510 KnownOne, TLO, Depth+1)) 511 return true; 512 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 513 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask, 514 KnownZero2, KnownOne2, TLO, Depth+1)) 515 return true; 516 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 517 518 // If all of the demanded bits are known one on one side, return the other. 519 // These bits cannot contribute to the result of the 'and'. 520 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 521 return TLO.CombineTo(Op, Op.getOperand(0)); 522 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 523 return TLO.CombineTo(Op, Op.getOperand(1)); 524 // If all of the demanded bits in the inputs are known zeros, return zero. 525 if ((NewMask & (KnownZero|KnownZero2)) == NewMask) 526 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType())); 527 // If the RHS is a constant, see if we can simplify it. 528 if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask)) 529 return true; 530 531 // Output known-1 bits are only known if set in both the LHS & RHS. 532 KnownOne &= KnownOne2; 533 // Output known-0 are known to be clear if zero in either the LHS | RHS. 534 KnownZero |= KnownZero2; 535 break; 536 case ISD::OR: 537 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 538 KnownOne, TLO, Depth+1)) 539 return true; 540 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 541 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask, 542 KnownZero2, KnownOne2, TLO, Depth+1)) 543 return true; 544 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 545 546 // If all of the demanded bits are known zero on one side, return the other. 547 // These bits cannot contribute to the result of the 'or'. 548 if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask)) 549 return TLO.CombineTo(Op, Op.getOperand(0)); 550 if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask)) 551 return TLO.CombineTo(Op, Op.getOperand(1)); 552 // If all of the potentially set bits on one side are known to be set on 553 // the other side, just use the 'other' side. 554 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 555 return TLO.CombineTo(Op, Op.getOperand(0)); 556 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 557 return TLO.CombineTo(Op, Op.getOperand(1)); 558 // If the RHS is a constant, see if we can simplify it. 559 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 560 return true; 561 562 // Output known-0 bits are only known if clear in both the LHS & RHS. 563 KnownZero &= KnownZero2; 564 // Output known-1 are known to be set if set in either the LHS | RHS. 565 KnownOne |= KnownOne2; 566 break; 567 case ISD::XOR: 568 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 569 KnownOne, TLO, Depth+1)) 570 return true; 571 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 572 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2, 573 KnownOne2, TLO, Depth+1)) 574 return true; 575 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 576 577 // If all of the demanded bits are known zero on one side, return the other. 578 // These bits cannot contribute to the result of the 'xor'. 579 if ((KnownZero & NewMask) == NewMask) 580 return TLO.CombineTo(Op, Op.getOperand(0)); 581 if ((KnownZero2 & NewMask) == NewMask) 582 return TLO.CombineTo(Op, Op.getOperand(1)); 583 584 // If all of the unknown bits are known to be zero on one side or the other 585 // (but not both) turn this into an *inclusive* or. 586 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 587 if ((NewMask & ~KnownZero & ~KnownZero2) == 0) 588 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(), 589 Op.getOperand(0), 590 Op.getOperand(1))); 591 592 // Output known-0 bits are known if clear or set in both the LHS & RHS. 593 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 594 // Output known-1 are known to be set if set in only one of the LHS, RHS. 595 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 596 597 // If all of the demanded bits on one side are known, and all of the set 598 // bits on that side are also known to be set on the other side, turn this 599 // into an AND, as we know the bits will be cleared. 600 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 601 if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known 602 if ((KnownOne & KnownOne2) == KnownOne) { 603 MVT VT = Op.getValueType(); 604 SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT); 605 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0), 606 ANDC)); 607 } 608 } 609 610 // If the RHS is a constant, see if we can simplify it. 611 // for XOR, we prefer to force bits to 1 if they will make a -1. 612 // if we can't force bits, try to shrink constant 613 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 614 APInt Expanded = C->getAPIntValue() | (~NewMask); 615 // if we can expand it to have all bits set, do it 616 if (Expanded.isAllOnesValue()) { 617 if (Expanded != C->getAPIntValue()) { 618 MVT VT = Op.getValueType(); 619 SDOperand New = TLO.DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), 620 TLO.DAG.getConstant(Expanded, VT)); 621 return TLO.CombineTo(Op, New); 622 } 623 // if it already has all the bits set, nothing to change 624 // but don't shrink either! 625 } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) { 626 return true; 627 } 628 } 629 630 KnownZero = KnownZeroOut; 631 KnownOne = KnownOneOut; 632 break; 633 case ISD::SELECT: 634 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero, 635 KnownOne, TLO, Depth+1)) 636 return true; 637 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2, 638 KnownOne2, TLO, Depth+1)) 639 return true; 640 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 641 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 642 643 // If the operands are constants, see if we can simplify them. 644 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 645 return true; 646 647 // Only known if known in both the LHS and RHS. 648 KnownOne &= KnownOne2; 649 KnownZero &= KnownZero2; 650 break; 651 case ISD::SELECT_CC: 652 if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero, 653 KnownOne, TLO, Depth+1)) 654 return true; 655 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2, 656 KnownOne2, TLO, Depth+1)) 657 return true; 658 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 659 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 660 661 // If the operands are constants, see if we can simplify them. 662 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 663 return true; 664 665 // Only known if known in both the LHS and RHS. 666 KnownOne &= KnownOne2; 667 KnownZero &= KnownZero2; 668 break; 669 case ISD::SHL: 670 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 671 unsigned ShAmt = SA->getValue(); 672 SDOperand InOp = Op.getOperand(0); 673 674 // If the shift count is an invalid immediate, don't do anything. 675 if (ShAmt >= BitWidth) 676 break; 677 678 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 679 // single shift. We can do this if the bottom bits (which are shifted 680 // out) are never demanded. 681 if (InOp.getOpcode() == ISD::SRL && 682 isa<ConstantSDNode>(InOp.getOperand(1))) { 683 if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) { 684 unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue(); 685 unsigned Opc = ISD::SHL; 686 int Diff = ShAmt-C1; 687 if (Diff < 0) { 688 Diff = -Diff; 689 Opc = ISD::SRL; 690 } 691 692 SDOperand NewSA = 693 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 694 MVT VT = Op.getValueType(); 695 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, 696 InOp.getOperand(0), NewSA)); 697 } 698 } 699 700 if (SimplifyDemandedBits(Op.getOperand(0), NewMask.lshr(ShAmt), 701 KnownZero, KnownOne, TLO, Depth+1)) 702 return true; 703 KnownZero <<= SA->getValue(); 704 KnownOne <<= SA->getValue(); 705 // low bits known zero. 706 KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getValue()); 707 } 708 break; 709 case ISD::SRL: 710 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 711 MVT VT = Op.getValueType(); 712 unsigned ShAmt = SA->getValue(); 713 unsigned VTSize = VT.getSizeInBits(); 714 SDOperand InOp = Op.getOperand(0); 715 716 // If the shift count is an invalid immediate, don't do anything. 717 if (ShAmt >= BitWidth) 718 break; 719 720 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 721 // single shift. We can do this if the top bits (which are shifted out) 722 // are never demanded. 723 if (InOp.getOpcode() == ISD::SHL && 724 isa<ConstantSDNode>(InOp.getOperand(1))) { 725 if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) { 726 unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue(); 727 unsigned Opc = ISD::SRL; 728 int Diff = ShAmt-C1; 729 if (Diff < 0) { 730 Diff = -Diff; 731 Opc = ISD::SHL; 732 } 733 734 SDOperand NewSA = 735 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 736 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, 737 InOp.getOperand(0), NewSA)); 738 } 739 } 740 741 // Compute the new bits that are at the top now. 742 if (SimplifyDemandedBits(InOp, (NewMask << ShAmt), 743 KnownZero, KnownOne, TLO, Depth+1)) 744 return true; 745 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 746 KnownZero = KnownZero.lshr(ShAmt); 747 KnownOne = KnownOne.lshr(ShAmt); 748 749 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 750 KnownZero |= HighBits; // High bits known zero. 751 } 752 break; 753 case ISD::SRA: 754 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 755 MVT VT = Op.getValueType(); 756 unsigned ShAmt = SA->getValue(); 757 758 // If the shift count is an invalid immediate, don't do anything. 759 if (ShAmt >= BitWidth) 760 break; 761 762 APInt InDemandedMask = (NewMask << ShAmt); 763 764 // If any of the demanded bits are produced by the sign extension, we also 765 // demand the input sign bit. 766 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 767 if (HighBits.intersects(NewMask)) 768 InDemandedMask |= APInt::getSignBit(VT.getSizeInBits()); 769 770 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask, 771 KnownZero, KnownOne, TLO, Depth+1)) 772 return true; 773 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 774 KnownZero = KnownZero.lshr(ShAmt); 775 KnownOne = KnownOne.lshr(ShAmt); 776 777 // Handle the sign bit, adjusted to where it is now in the mask. 778 APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt); 779 780 // If the input sign bit is known to be zero, or if none of the top bits 781 // are demanded, turn this into an unsigned shift right. 782 if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) { 783 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, VT, Op.getOperand(0), 784 Op.getOperand(1))); 785 } else if (KnownOne.intersects(SignBit)) { // New bits are known one. 786 KnownOne |= HighBits; 787 } 788 } 789 break; 790 case ISD::SIGN_EXTEND_INREG: { 791 MVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 792 793 // Sign extension. Compute the demanded bits in the result that are not 794 // present in the input. 795 APInt NewBits = APInt::getHighBitsSet(BitWidth, 796 BitWidth - EVT.getSizeInBits()) & 797 NewMask; 798 799 // If none of the extended bits are demanded, eliminate the sextinreg. 800 if (NewBits == 0) 801 return TLO.CombineTo(Op, Op.getOperand(0)); 802 803 APInt InSignBit = APInt::getSignBit(EVT.getSizeInBits()); 804 InSignBit.zext(BitWidth); 805 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, 806 EVT.getSizeInBits()) & 807 NewMask; 808 809 // Since the sign extended bits are demanded, we know that the sign 810 // bit is demanded. 811 InputDemandedBits |= InSignBit; 812 813 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits, 814 KnownZero, KnownOne, TLO, Depth+1)) 815 return true; 816 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 817 818 // If the sign bit of the input is known set or clear, then we know the 819 // top bits of the result. 820 821 // If the input sign bit is known zero, convert this into a zero extension. 822 if (KnownZero.intersects(InSignBit)) 823 return TLO.CombineTo(Op, 824 TLO.DAG.getZeroExtendInReg(Op.getOperand(0), EVT)); 825 826 if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 827 KnownOne |= NewBits; 828 KnownZero &= ~NewBits; 829 } else { // Input sign bit unknown 830 KnownZero &= ~NewBits; 831 KnownOne &= ~NewBits; 832 } 833 break; 834 } 835 case ISD::ZERO_EXTEND: { 836 unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits(); 837 APInt InMask = NewMask; 838 InMask.trunc(OperandBitWidth); 839 840 // If none of the top bits are demanded, convert this into an any_extend. 841 APInt NewBits = 842 APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask; 843 if (!NewBits.intersects(NewMask)) 844 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, 845 Op.getValueType(), 846 Op.getOperand(0))); 847 848 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 849 KnownZero, KnownOne, TLO, Depth+1)) 850 return true; 851 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 852 KnownZero.zext(BitWidth); 853 KnownOne.zext(BitWidth); 854 KnownZero |= NewBits; 855 break; 856 } 857 case ISD::SIGN_EXTEND: { 858 MVT InVT = Op.getOperand(0).getValueType(); 859 unsigned InBits = InVT.getSizeInBits(); 860 APInt InMask = APInt::getLowBitsSet(BitWidth, InBits); 861 APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits); 862 APInt NewBits = ~InMask & NewMask; 863 864 // If none of the top bits are demanded, convert this into an any_extend. 865 if (NewBits == 0) 866 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND,Op.getValueType(), 867 Op.getOperand(0))); 868 869 // Since some of the sign extended bits are demanded, we know that the sign 870 // bit is demanded. 871 APInt InDemandedBits = InMask & NewMask; 872 InDemandedBits |= InSignBit; 873 InDemandedBits.trunc(InBits); 874 875 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero, 876 KnownOne, TLO, Depth+1)) 877 return true; 878 KnownZero.zext(BitWidth); 879 KnownOne.zext(BitWidth); 880 881 // If the sign bit is known zero, convert this to a zero extend. 882 if (KnownZero.intersects(InSignBit)) 883 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, 884 Op.getValueType(), 885 Op.getOperand(0))); 886 887 // If the sign bit is known one, the top bits match. 888 if (KnownOne.intersects(InSignBit)) { 889 KnownOne |= NewBits; 890 KnownZero &= ~NewBits; 891 } else { // Otherwise, top bits aren't known. 892 KnownOne &= ~NewBits; 893 KnownZero &= ~NewBits; 894 } 895 break; 896 } 897 case ISD::ANY_EXTEND: { 898 unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits(); 899 APInt InMask = NewMask; 900 InMask.trunc(OperandBitWidth); 901 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 902 KnownZero, KnownOne, TLO, Depth+1)) 903 return true; 904 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 905 KnownZero.zext(BitWidth); 906 KnownOne.zext(BitWidth); 907 break; 908 } 909 case ISD::TRUNCATE: { 910 // Simplify the input, using demanded bit information, and compute the known 911 // zero/one bits live out. 912 APInt TruncMask = NewMask; 913 TruncMask.zext(Op.getOperand(0).getValueSizeInBits()); 914 if (SimplifyDemandedBits(Op.getOperand(0), TruncMask, 915 KnownZero, KnownOne, TLO, Depth+1)) 916 return true; 917 KnownZero.trunc(BitWidth); 918 KnownOne.trunc(BitWidth); 919 920 // If the input is only used by this truncate, see if we can shrink it based 921 // on the known demanded bits. 922 if (Op.getOperand(0).Val->hasOneUse()) { 923 SDOperand In = Op.getOperand(0); 924 unsigned InBitWidth = In.getValueSizeInBits(); 925 switch (In.getOpcode()) { 926 default: break; 927 case ISD::SRL: 928 // Shrink SRL by a constant if none of the high bits shifted in are 929 // demanded. 930 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){ 931 APInt HighBits = APInt::getHighBitsSet(InBitWidth, 932 InBitWidth - BitWidth); 933 HighBits = HighBits.lshr(ShAmt->getValue()); 934 HighBits.trunc(BitWidth); 935 936 if (ShAmt->getValue() < BitWidth && !(HighBits & NewMask)) { 937 // None of the shifted in bits are needed. Add a truncate of the 938 // shift input, then shift it. 939 SDOperand NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, 940 Op.getValueType(), 941 In.getOperand(0)); 942 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL,Op.getValueType(), 943 NewTrunc, In.getOperand(1))); 944 } 945 } 946 break; 947 } 948 } 949 950 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 951 break; 952 } 953 case ISD::AssertZext: { 954 MVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 955 APInt InMask = APInt::getLowBitsSet(BitWidth, 956 VT.getSizeInBits()); 957 if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask, 958 KnownZero, KnownOne, TLO, Depth+1)) 959 return true; 960 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 961 KnownZero |= ~InMask & NewMask; 962 break; 963 } 964 case ISD::BIT_CONVERT: 965#if 0 966 // If this is an FP->Int bitcast and if the sign bit is the only thing that 967 // is demanded, turn this into a FGETSIGN. 968 if (NewMask == MVT::getIntegerVTSignBit(Op.getValueType()) && 969 MVT::isFloatingPoint(Op.getOperand(0).getValueType()) && 970 !MVT::isVector(Op.getOperand(0).getValueType())) { 971 // Only do this xform if FGETSIGN is valid or if before legalize. 972 if (!TLO.AfterLegalize || 973 isOperationLegal(ISD::FGETSIGN, Op.getValueType())) { 974 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 975 // place. We expect the SHL to be eliminated by other optimizations. 976 SDOperand Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(), 977 Op.getOperand(0)); 978 unsigned ShVal = Op.getValueType().getSizeInBits()-1; 979 SDOperand ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy()); 980 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(), 981 Sign, ShAmt)); 982 } 983 } 984#endif 985 break; 986 default: 987 // Just use ComputeMaskedBits to compute output bits. 988 TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth); 989 break; 990 } 991 992 // If we know the value of all of the demanded bits, return this as a 993 // constant. 994 if ((NewMask & (KnownZero|KnownOne)) == NewMask) 995 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType())); 996 997 return false; 998} 999 1000/// computeMaskedBitsForTargetNode - Determine which of the bits specified 1001/// in Mask are known to be either zero or one and return them in the 1002/// KnownZero/KnownOne bitsets. 1003void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 1004 const APInt &Mask, 1005 APInt &KnownZero, 1006 APInt &KnownOne, 1007 const SelectionDAG &DAG, 1008 unsigned Depth) const { 1009 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1010 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1011 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1012 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1013 "Should use MaskedValueIsZero if you don't know whether Op" 1014 " is a target node!"); 1015 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 1016} 1017 1018/// ComputeNumSignBitsForTargetNode - This method can be implemented by 1019/// targets that want to expose additional information about sign bits to the 1020/// DAG Combiner. 1021unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDOperand Op, 1022 unsigned Depth) const { 1023 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1024 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1025 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1026 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1027 "Should use ComputeNumSignBits if you don't know whether Op" 1028 " is a target node!"); 1029 return 1; 1030} 1031 1032 1033/// SimplifySetCC - Try to simplify a setcc built with the specified operands 1034/// and cc. If it is unable to simplify it, return a null SDOperand. 1035SDOperand 1036TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, 1037 ISD::CondCode Cond, bool foldBooleans, 1038 DAGCombinerInfo &DCI) const { 1039 SelectionDAG &DAG = DCI.DAG; 1040 1041 // These setcc operations always fold. 1042 switch (Cond) { 1043 default: break; 1044 case ISD::SETFALSE: 1045 case ISD::SETFALSE2: return DAG.getConstant(0, VT); 1046 case ISD::SETTRUE: 1047 case ISD::SETTRUE2: return DAG.getConstant(1, VT); 1048 } 1049 1050 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val)) { 1051 const APInt &C1 = N1C->getAPIntValue(); 1052 if (isa<ConstantSDNode>(N0.Val)) { 1053 return DAG.FoldSetCC(VT, N0, N1, Cond); 1054 } else { 1055 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 1056 // equality comparison, then we're just comparing whether X itself is 1057 // zero. 1058 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) && 1059 N0.getOperand(0).getOpcode() == ISD::CTLZ && 1060 N0.getOperand(1).getOpcode() == ISD::Constant) { 1061 unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getValue(); 1062 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1063 ShAmt == Log2_32(N0.getValueType().getSizeInBits())) { 1064 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 1065 // (srl (ctlz x), 5) == 0 -> X != 0 1066 // (srl (ctlz x), 5) != 1 -> X != 0 1067 Cond = ISD::SETNE; 1068 } else { 1069 // (srl (ctlz x), 5) != 0 -> X == 0 1070 // (srl (ctlz x), 5) == 1 -> X == 0 1071 Cond = ISD::SETEQ; 1072 } 1073 SDOperand Zero = DAG.getConstant(0, N0.getValueType()); 1074 return DAG.getSetCC(VT, N0.getOperand(0).getOperand(0), 1075 Zero, Cond); 1076 } 1077 } 1078 1079 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 1080 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 1081 unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits(); 1082 1083 // If the comparison constant has bits in the upper part, the 1084 // zero-extended value could never match. 1085 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 1086 C1.getBitWidth() - InSize))) { 1087 switch (Cond) { 1088 case ISD::SETUGT: 1089 case ISD::SETUGE: 1090 case ISD::SETEQ: return DAG.getConstant(0, VT); 1091 case ISD::SETULT: 1092 case ISD::SETULE: 1093 case ISD::SETNE: return DAG.getConstant(1, VT); 1094 case ISD::SETGT: 1095 case ISD::SETGE: 1096 // True if the sign bit of C1 is set. 1097 return DAG.getConstant(C1.isNegative(), VT); 1098 case ISD::SETLT: 1099 case ISD::SETLE: 1100 // True if the sign bit of C1 isn't set. 1101 return DAG.getConstant(C1.isNonNegative(), VT); 1102 default: 1103 break; 1104 } 1105 } 1106 1107 // Otherwise, we can perform the comparison with the low bits. 1108 switch (Cond) { 1109 case ISD::SETEQ: 1110 case ISD::SETNE: 1111 case ISD::SETUGT: 1112 case ISD::SETUGE: 1113 case ISD::SETULT: 1114 case ISD::SETULE: 1115 return DAG.getSetCC(VT, N0.getOperand(0), 1116 DAG.getConstant(APInt(C1).trunc(InSize), 1117 N0.getOperand(0).getValueType()), 1118 Cond); 1119 default: 1120 break; // todo, be more careful with signed comparisons 1121 } 1122 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 1123 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1124 MVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 1125 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 1126 MVT ExtDstTy = N0.getValueType(); 1127 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 1128 1129 // If the extended part has any inconsistent bits, it cannot ever 1130 // compare equal. In other words, they have to be all ones or all 1131 // zeros. 1132 APInt ExtBits = 1133 APInt::getHighBitsSet(ExtDstTyBits, ExtDstTyBits - ExtSrcTyBits); 1134 if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits) 1135 return DAG.getConstant(Cond == ISD::SETNE, VT); 1136 1137 SDOperand ZextOp; 1138 MVT Op0Ty = N0.getOperand(0).getValueType(); 1139 if (Op0Ty == ExtSrcTy) { 1140 ZextOp = N0.getOperand(0); 1141 } else { 1142 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 1143 ZextOp = DAG.getNode(ISD::AND, Op0Ty, N0.getOperand(0), 1144 DAG.getConstant(Imm, Op0Ty)); 1145 } 1146 if (!DCI.isCalledByLegalizer()) 1147 DCI.AddToWorklist(ZextOp.Val); 1148 // Otherwise, make this a use of a zext. 1149 return DAG.getSetCC(VT, ZextOp, 1150 DAG.getConstant(C1 & APInt::getLowBitsSet( 1151 ExtDstTyBits, 1152 ExtSrcTyBits), 1153 ExtDstTy), 1154 Cond); 1155 } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) && 1156 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1157 1158 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 1159 if (N0.getOpcode() == ISD::SETCC) { 1160 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getValue() != 1); 1161 if (TrueWhenTrue) 1162 return N0; 1163 1164 // Invert the condition. 1165 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 1166 CC = ISD::getSetCCInverse(CC, 1167 N0.getOperand(0).getValueType().isInteger()); 1168 return DAG.getSetCC(VT, N0.getOperand(0), N0.getOperand(1), CC); 1169 } 1170 1171 if ((N0.getOpcode() == ISD::XOR || 1172 (N0.getOpcode() == ISD::AND && 1173 N0.getOperand(0).getOpcode() == ISD::XOR && 1174 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 1175 isa<ConstantSDNode>(N0.getOperand(1)) && 1176 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) { 1177 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 1178 // can only do this if the top bits are known zero. 1179 unsigned BitWidth = N0.getValueSizeInBits(); 1180 if (DAG.MaskedValueIsZero(N0, 1181 APInt::getHighBitsSet(BitWidth, 1182 BitWidth-1))) { 1183 // Okay, get the un-inverted input value. 1184 SDOperand Val; 1185 if (N0.getOpcode() == ISD::XOR) 1186 Val = N0.getOperand(0); 1187 else { 1188 assert(N0.getOpcode() == ISD::AND && 1189 N0.getOperand(0).getOpcode() == ISD::XOR); 1190 // ((X^1)&1)^1 -> X & 1 1191 Val = DAG.getNode(ISD::AND, N0.getValueType(), 1192 N0.getOperand(0).getOperand(0), 1193 N0.getOperand(1)); 1194 } 1195 return DAG.getSetCC(VT, Val, N1, 1196 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 1197 } 1198 } 1199 } 1200 1201 APInt MinVal, MaxVal; 1202 unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits(); 1203 if (ISD::isSignedIntSetCC(Cond)) { 1204 MinVal = APInt::getSignedMinValue(OperandBitSize); 1205 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 1206 } else { 1207 MinVal = APInt::getMinValue(OperandBitSize); 1208 MaxVal = APInt::getMaxValue(OperandBitSize); 1209 } 1210 1211 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 1212 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 1213 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true 1214 // X >= C0 --> X > (C0-1) 1215 return DAG.getSetCC(VT, N0, DAG.getConstant(C1-1, N1.getValueType()), 1216 (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT); 1217 } 1218 1219 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 1220 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true 1221 // X <= C0 --> X < (C0+1) 1222 return DAG.getSetCC(VT, N0, DAG.getConstant(C1+1, N1.getValueType()), 1223 (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT); 1224 } 1225 1226 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal) 1227 return DAG.getConstant(0, VT); // X < MIN --> false 1228 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal) 1229 return DAG.getConstant(1, VT); // X >= MIN --> true 1230 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal) 1231 return DAG.getConstant(0, VT); // X > MAX --> false 1232 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal) 1233 return DAG.getConstant(1, VT); // X <= MAX --> true 1234 1235 // Canonicalize setgt X, Min --> setne X, Min 1236 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal) 1237 return DAG.getSetCC(VT, N0, N1, ISD::SETNE); 1238 // Canonicalize setlt X, Max --> setne X, Max 1239 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal) 1240 return DAG.getSetCC(VT, N0, N1, ISD::SETNE); 1241 1242 // If we have setult X, 1, turn it into seteq X, 0 1243 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1) 1244 return DAG.getSetCC(VT, N0, DAG.getConstant(MinVal, N0.getValueType()), 1245 ISD::SETEQ); 1246 // If we have setugt X, Max-1, turn it into seteq X, Max 1247 else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1) 1248 return DAG.getSetCC(VT, N0, DAG.getConstant(MaxVal, N0.getValueType()), 1249 ISD::SETEQ); 1250 1251 // If we have "setcc X, C0", check to see if we can shrink the immediate 1252 // by changing cc. 1253 1254 // SETUGT X, SINTMAX -> SETLT X, 0 1255 if (Cond == ISD::SETUGT && OperandBitSize != 1 && 1256 C1 == (~0ULL >> (65-OperandBitSize))) 1257 return DAG.getSetCC(VT, N0, DAG.getConstant(0, N1.getValueType()), 1258 ISD::SETLT); 1259 1260 // FIXME: Implement the rest of these. 1261 1262 // Fold bit comparisons when we can. 1263 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1264 VT == N0.getValueType() && N0.getOpcode() == ISD::AND) 1265 if (ConstantSDNode *AndRHS = 1266 dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1267 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 1268 // Perform the xform if the AND RHS is a single bit. 1269 if (isPowerOf2_64(AndRHS->getValue())) { 1270 return DAG.getNode(ISD::SRL, VT, N0, 1271 DAG.getConstant(Log2_64(AndRHS->getValue()), 1272 getShiftAmountTy())); 1273 } 1274 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getValue()) { 1275 // (X & 8) == 8 --> (X & 8) >> 3 1276 // Perform the xform if C1 is a single bit. 1277 if (C1.isPowerOf2()) { 1278 return DAG.getNode(ISD::SRL, VT, N0, 1279 DAG.getConstant(C1.logBase2(), getShiftAmountTy())); 1280 } 1281 } 1282 } 1283 } 1284 } else if (isa<ConstantSDNode>(N0.Val)) { 1285 // Ensure that the constant occurs on the RHS. 1286 return DAG.getSetCC(VT, N1, N0, ISD::getSetCCSwappedOperands(Cond)); 1287 } 1288 1289 if (isa<ConstantFPSDNode>(N0.Val)) { 1290 // Constant fold or commute setcc. 1291 SDOperand O = DAG.FoldSetCC(VT, N0, N1, Cond); 1292 if (O.Val) return O; 1293 } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.Val)) { 1294 // If the RHS of an FP comparison is a constant, simplify it away in 1295 // some cases. 1296 if (CFP->getValueAPF().isNaN()) { 1297 // If an operand is known to be a nan, we can fold it. 1298 switch (ISD::getUnorderedFlavor(Cond)) { 1299 default: assert(0 && "Unknown flavor!"); 1300 case 0: // Known false. 1301 return DAG.getConstant(0, VT); 1302 case 1: // Known true. 1303 return DAG.getConstant(1, VT); 1304 case 2: // Undefined. 1305 return DAG.getNode(ISD::UNDEF, VT); 1306 } 1307 } 1308 1309 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 1310 // constant if knowing that the operand is non-nan is enough. We prefer to 1311 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 1312 // materialize 0.0. 1313 if (Cond == ISD::SETO || Cond == ISD::SETUO) 1314 return DAG.getSetCC(VT, N0, N0, Cond); 1315 } 1316 1317 if (N0 == N1) { 1318 // We can always fold X == X for integer setcc's. 1319 if (N0.getValueType().isInteger()) 1320 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1321 unsigned UOF = ISD::getUnorderedFlavor(Cond); 1322 if (UOF == 2) // FP operators that are undefined on NaNs. 1323 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1324 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond))) 1325 return DAG.getConstant(UOF, VT); 1326 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 1327 // if it is not already. 1328 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 1329 if (NewCond != Cond) 1330 return DAG.getSetCC(VT, N0, N1, NewCond); 1331 } 1332 1333 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1334 N0.getValueType().isInteger()) { 1335 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 1336 N0.getOpcode() == ISD::XOR) { 1337 // Simplify (X+Y) == (X+Z) --> Y == Z 1338 if (N0.getOpcode() == N1.getOpcode()) { 1339 if (N0.getOperand(0) == N1.getOperand(0)) 1340 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(1), Cond); 1341 if (N0.getOperand(1) == N1.getOperand(1)) 1342 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(0), Cond); 1343 if (DAG.isCommutativeBinOp(N0.getOpcode())) { 1344 // If X op Y == Y op X, try other combinations. 1345 if (N0.getOperand(0) == N1.getOperand(1)) 1346 return DAG.getSetCC(VT, N0.getOperand(1), N1.getOperand(0), Cond); 1347 if (N0.getOperand(1) == N1.getOperand(0)) 1348 return DAG.getSetCC(VT, N0.getOperand(0), N1.getOperand(1), Cond); 1349 } 1350 } 1351 1352 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) { 1353 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1354 // Turn (X+C1) == C2 --> X == C2-C1 1355 if (N0.getOpcode() == ISD::ADD && N0.Val->hasOneUse()) { 1356 return DAG.getSetCC(VT, N0.getOperand(0), 1357 DAG.getConstant(RHSC->getValue()-LHSR->getValue(), 1358 N0.getValueType()), Cond); 1359 } 1360 1361 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 1362 if (N0.getOpcode() == ISD::XOR) 1363 // If we know that all of the inverted bits are zero, don't bother 1364 // performing the inversion. 1365 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 1366 return 1367 DAG.getSetCC(VT, N0.getOperand(0), 1368 DAG.getConstant(LHSR->getAPIntValue() ^ 1369 RHSC->getAPIntValue(), 1370 N0.getValueType()), 1371 Cond); 1372 } 1373 1374 // Turn (C1-X) == C2 --> X == C1-C2 1375 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 1376 if (N0.getOpcode() == ISD::SUB && N0.Val->hasOneUse()) { 1377 return 1378 DAG.getSetCC(VT, N0.getOperand(1), 1379 DAG.getConstant(SUBC->getAPIntValue() - 1380 RHSC->getAPIntValue(), 1381 N0.getValueType()), 1382 Cond); 1383 } 1384 } 1385 } 1386 1387 // Simplify (X+Z) == X --> Z == 0 1388 if (N0.getOperand(0) == N1) 1389 return DAG.getSetCC(VT, N0.getOperand(1), 1390 DAG.getConstant(0, N0.getValueType()), Cond); 1391 if (N0.getOperand(1) == N1) { 1392 if (DAG.isCommutativeBinOp(N0.getOpcode())) 1393 return DAG.getSetCC(VT, N0.getOperand(0), 1394 DAG.getConstant(0, N0.getValueType()), Cond); 1395 else if (N0.Val->hasOneUse()) { 1396 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!"); 1397 // (Z-X) == X --> Z == X<<1 1398 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), 1399 N1, 1400 DAG.getConstant(1, getShiftAmountTy())); 1401 if (!DCI.isCalledByLegalizer()) 1402 DCI.AddToWorklist(SH.Val); 1403 return DAG.getSetCC(VT, N0.getOperand(0), SH, Cond); 1404 } 1405 } 1406 } 1407 1408 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 1409 N1.getOpcode() == ISD::XOR) { 1410 // Simplify X == (X+Z) --> Z == 0 1411 if (N1.getOperand(0) == N0) { 1412 return DAG.getSetCC(VT, N1.getOperand(1), 1413 DAG.getConstant(0, N1.getValueType()), Cond); 1414 } else if (N1.getOperand(1) == N0) { 1415 if (DAG.isCommutativeBinOp(N1.getOpcode())) { 1416 return DAG.getSetCC(VT, N1.getOperand(0), 1417 DAG.getConstant(0, N1.getValueType()), Cond); 1418 } else if (N1.Val->hasOneUse()) { 1419 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!"); 1420 // X == (Z-X) --> X<<1 == Z 1421 SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0, 1422 DAG.getConstant(1, getShiftAmountTy())); 1423 if (!DCI.isCalledByLegalizer()) 1424 DCI.AddToWorklist(SH.Val); 1425 return DAG.getSetCC(VT, SH, N1.getOperand(0), Cond); 1426 } 1427 } 1428 } 1429 } 1430 1431 // Fold away ALL boolean setcc's. 1432 SDOperand Temp; 1433 if (N0.getValueType() == MVT::i1 && foldBooleans) { 1434 switch (Cond) { 1435 default: assert(0 && "Unknown integer setcc!"); 1436 case ISD::SETEQ: // X == Y -> (X^Y)^1 1437 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, N1); 1438 N0 = DAG.getNode(ISD::XOR, MVT::i1, Temp, DAG.getConstant(1, MVT::i1)); 1439 if (!DCI.isCalledByLegalizer()) 1440 DCI.AddToWorklist(Temp.Val); 1441 break; 1442 case ISD::SETNE: // X != Y --> (X^Y) 1443 N0 = DAG.getNode(ISD::XOR, MVT::i1, N0, N1); 1444 break; 1445 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> X^1 & Y 1446 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> X^1 & Y 1447 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1)); 1448 N0 = DAG.getNode(ISD::AND, MVT::i1, N1, Temp); 1449 if (!DCI.isCalledByLegalizer()) 1450 DCI.AddToWorklist(Temp.Val); 1451 break; 1452 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> Y^1 & X 1453 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> Y^1 & X 1454 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1)); 1455 N0 = DAG.getNode(ISD::AND, MVT::i1, N0, Temp); 1456 if (!DCI.isCalledByLegalizer()) 1457 DCI.AddToWorklist(Temp.Val); 1458 break; 1459 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> X^1 | Y 1460 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> X^1 | Y 1461 Temp = DAG.getNode(ISD::XOR, MVT::i1, N0, DAG.getConstant(1, MVT::i1)); 1462 N0 = DAG.getNode(ISD::OR, MVT::i1, N1, Temp); 1463 if (!DCI.isCalledByLegalizer()) 1464 DCI.AddToWorklist(Temp.Val); 1465 break; 1466 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> Y^1 | X 1467 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> Y^1 | X 1468 Temp = DAG.getNode(ISD::XOR, MVT::i1, N1, DAG.getConstant(1, MVT::i1)); 1469 N0 = DAG.getNode(ISD::OR, MVT::i1, N0, Temp); 1470 break; 1471 } 1472 if (VT != MVT::i1) { 1473 if (!DCI.isCalledByLegalizer()) 1474 DCI.AddToWorklist(N0.Val); 1475 // FIXME: If running after legalize, we probably can't do this. 1476 N0 = DAG.getNode(ISD::ZERO_EXTEND, VT, N0); 1477 } 1478 return N0; 1479 } 1480 1481 // Could not fold it. 1482 return SDOperand(); 1483} 1484 1485/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 1486/// node is a GlobalAddress + offset. 1487bool TargetLowering::isGAPlusOffset(SDNode *N, GlobalValue* &GA, 1488 int64_t &Offset) const { 1489 if (isa<GlobalAddressSDNode>(N)) { 1490 GA = cast<GlobalAddressSDNode>(N)->getGlobal(); 1491 return true; 1492 } 1493 1494 if (N->getOpcode() == ISD::ADD) { 1495 SDOperand N1 = N->getOperand(0); 1496 SDOperand N2 = N->getOperand(1); 1497 if (isGAPlusOffset(N1.Val, GA, Offset)) { 1498 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 1499 if (V) { 1500 Offset += V->getSignExtended(); 1501 return true; 1502 } 1503 } else if (isGAPlusOffset(N2.Val, GA, Offset)) { 1504 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 1505 if (V) { 1506 Offset += V->getSignExtended(); 1507 return true; 1508 } 1509 } 1510 } 1511 return false; 1512} 1513 1514 1515/// isConsecutiveLoad - Return true if LD (which must be a LoadSDNode) is 1516/// loading 'Bytes' bytes from a location that is 'Dist' units away from the 1517/// location that the 'Base' load is loading from. 1518bool TargetLowering::isConsecutiveLoad(SDNode *LD, SDNode *Base, 1519 unsigned Bytes, int Dist, 1520 const MachineFrameInfo *MFI) const { 1521 if (LD->getOperand(0).Val != Base->getOperand(0).Val) 1522 return false; 1523 MVT VT = LD->getValueType(0); 1524 if (VT.getSizeInBits() / 8 != Bytes) 1525 return false; 1526 1527 SDOperand Loc = LD->getOperand(1); 1528 SDOperand BaseLoc = Base->getOperand(1); 1529 if (Loc.getOpcode() == ISD::FrameIndex) { 1530 if (BaseLoc.getOpcode() != ISD::FrameIndex) 1531 return false; 1532 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 1533 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 1534 int FS = MFI->getObjectSize(FI); 1535 int BFS = MFI->getObjectSize(BFI); 1536 if (FS != BFS || FS != (int)Bytes) return false; 1537 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 1538 } 1539 1540 GlobalValue *GV1 = NULL; 1541 GlobalValue *GV2 = NULL; 1542 int64_t Offset1 = 0; 1543 int64_t Offset2 = 0; 1544 bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1); 1545 bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2); 1546 if (isGA1 && isGA2 && GV1 == GV2) 1547 return Offset1 == (Offset2 + Dist*Bytes); 1548 return false; 1549} 1550 1551 1552SDOperand TargetLowering:: 1553PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { 1554 // Default implementation: no optimization. 1555 return SDOperand(); 1556} 1557 1558//===----------------------------------------------------------------------===// 1559// Inline Assembler Implementation Methods 1560//===----------------------------------------------------------------------===// 1561 1562 1563TargetLowering::ConstraintType 1564TargetLowering::getConstraintType(const std::string &Constraint) const { 1565 // FIXME: lots more standard ones to handle. 1566 if (Constraint.size() == 1) { 1567 switch (Constraint[0]) { 1568 default: break; 1569 case 'r': return C_RegisterClass; 1570 case 'm': // memory 1571 case 'o': // offsetable 1572 case 'V': // not offsetable 1573 return C_Memory; 1574 case 'i': // Simple Integer or Relocatable Constant 1575 case 'n': // Simple Integer 1576 case 's': // Relocatable Constant 1577 case 'X': // Allow ANY value. 1578 case 'I': // Target registers. 1579 case 'J': 1580 case 'K': 1581 case 'L': 1582 case 'M': 1583 case 'N': 1584 case 'O': 1585 case 'P': 1586 return C_Other; 1587 } 1588 } 1589 1590 if (Constraint.size() > 1 && Constraint[0] == '{' && 1591 Constraint[Constraint.size()-1] == '}') 1592 return C_Register; 1593 return C_Unknown; 1594} 1595 1596/// LowerXConstraint - try to replace an X constraint, which matches anything, 1597/// with another that has more specific requirements based on the type of the 1598/// corresponding operand. 1599const char *TargetLowering::LowerXConstraint(MVT ConstraintVT) const{ 1600 if (ConstraintVT.isInteger()) 1601 return "r"; 1602 if (ConstraintVT.isFloatingPoint()) 1603 return "f"; // works for many targets 1604 return 0; 1605} 1606 1607/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 1608/// vector. If it is invalid, don't add anything to Ops. 1609void TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, 1610 char ConstraintLetter, 1611 std::vector<SDOperand> &Ops, 1612 SelectionDAG &DAG) const { 1613 switch (ConstraintLetter) { 1614 default: break; 1615 case 'X': // Allows any operand; labels (basic block) use this. 1616 if (Op.getOpcode() == ISD::BasicBlock) { 1617 Ops.push_back(Op); 1618 return; 1619 } 1620 // fall through 1621 case 'i': // Simple Integer or Relocatable Constant 1622 case 'n': // Simple Integer 1623 case 's': { // Relocatable Constant 1624 // These operands are interested in values of the form (GV+C), where C may 1625 // be folded in as an offset of GV, or it may be explicitly added. Also, it 1626 // is possible and fine if either GV or C are missing. 1627 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 1628 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 1629 1630 // If we have "(add GV, C)", pull out GV/C 1631 if (Op.getOpcode() == ISD::ADD) { 1632 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 1633 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 1634 if (C == 0 || GA == 0) { 1635 C = dyn_cast<ConstantSDNode>(Op.getOperand(0)); 1636 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1)); 1637 } 1638 if (C == 0 || GA == 0) 1639 C = 0, GA = 0; 1640 } 1641 1642 // If we find a valid operand, map to the TargetXXX version so that the 1643 // value itself doesn't get selected. 1644 if (GA) { // Either &GV or &GV+C 1645 if (ConstraintLetter != 'n') { 1646 int64_t Offs = GA->getOffset(); 1647 if (C) Offs += C->getValue(); 1648 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), 1649 Op.getValueType(), Offs)); 1650 return; 1651 } 1652 } 1653 if (C) { // just C, no GV. 1654 // Simple constants are not allowed for 's'. 1655 if (ConstraintLetter != 's') { 1656 Ops.push_back(DAG.getTargetConstant(C->getValue(), Op.getValueType())); 1657 return; 1658 } 1659 } 1660 break; 1661 } 1662 } 1663} 1664 1665std::vector<unsigned> TargetLowering:: 1666getRegClassForInlineAsmConstraint(const std::string &Constraint, 1667 MVT VT) const { 1668 return std::vector<unsigned>(); 1669} 1670 1671 1672std::pair<unsigned, const TargetRegisterClass*> TargetLowering:: 1673getRegForInlineAsmConstraint(const std::string &Constraint, 1674 MVT VT) const { 1675 if (Constraint[0] != '{') 1676 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 1677 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?"); 1678 1679 // Remove the braces from around the name. 1680 std::string RegName(Constraint.begin()+1, Constraint.end()-1); 1681 1682 // Figure out which register class contains this reg. 1683 const TargetRegisterInfo *RI = TM.getRegisterInfo(); 1684 for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(), 1685 E = RI->regclass_end(); RCI != E; ++RCI) { 1686 const TargetRegisterClass *RC = *RCI; 1687 1688 // If none of the the value types for this register class are valid, we 1689 // can't use it. For example, 64-bit reg classes on 32-bit targets. 1690 bool isLegal = false; 1691 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 1692 I != E; ++I) { 1693 if (isTypeLegal(*I)) { 1694 isLegal = true; 1695 break; 1696 } 1697 } 1698 1699 if (!isLegal) continue; 1700 1701 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 1702 I != E; ++I) { 1703 if (StringsEqualNoCase(RegName, RI->get(*I).AsmName)) 1704 return std::make_pair(*I, RC); 1705 } 1706 } 1707 1708 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 1709} 1710 1711//===----------------------------------------------------------------------===// 1712// Constraint Selection. 1713 1714/// getConstraintGenerality - Return an integer indicating how general CT 1715/// is. 1716static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 1717 switch (CT) { 1718 default: assert(0 && "Unknown constraint type!"); 1719 case TargetLowering::C_Other: 1720 case TargetLowering::C_Unknown: 1721 return 0; 1722 case TargetLowering::C_Register: 1723 return 1; 1724 case TargetLowering::C_RegisterClass: 1725 return 2; 1726 case TargetLowering::C_Memory: 1727 return 3; 1728 } 1729} 1730 1731/// ChooseConstraint - If there are multiple different constraints that we 1732/// could pick for this operand (e.g. "imr") try to pick the 'best' one. 1733/// This is somewhat tricky: constraints fall into four classes: 1734/// Other -> immediates and magic values 1735/// Register -> one specific register 1736/// RegisterClass -> a group of regs 1737/// Memory -> memory 1738/// Ideally, we would pick the most specific constraint possible: if we have 1739/// something that fits into a register, we would pick it. The problem here 1740/// is that if we have something that could either be in a register or in 1741/// memory that use of the register could cause selection of *other* 1742/// operands to fail: they might only succeed if we pick memory. Because of 1743/// this the heuristic we use is: 1744/// 1745/// 1) If there is an 'other' constraint, and if the operand is valid for 1746/// that constraint, use it. This makes us take advantage of 'i' 1747/// constraints when available. 1748/// 2) Otherwise, pick the most general constraint present. This prefers 1749/// 'm' over 'r', for example. 1750/// 1751static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 1752 const TargetLowering &TLI, 1753 SDOperand Op, SelectionDAG *DAG) { 1754 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 1755 unsigned BestIdx = 0; 1756 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 1757 int BestGenerality = -1; 1758 1759 // Loop over the options, keeping track of the most general one. 1760 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 1761 TargetLowering::ConstraintType CType = 1762 TLI.getConstraintType(OpInfo.Codes[i]); 1763 1764 // If this is an 'other' constraint, see if the operand is valid for it. 1765 // For example, on X86 we might have an 'rI' constraint. If the operand 1766 // is an integer in the range [0..31] we want to use I (saving a load 1767 // of a register), otherwise we must use 'r'. 1768 if (CType == TargetLowering::C_Other && Op.Val) { 1769 assert(OpInfo.Codes[i].size() == 1 && 1770 "Unhandled multi-letter 'other' constraint"); 1771 std::vector<SDOperand> ResultOps; 1772 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0], 1773 ResultOps, *DAG); 1774 if (!ResultOps.empty()) { 1775 BestType = CType; 1776 BestIdx = i; 1777 break; 1778 } 1779 } 1780 1781 // This constraint letter is more general than the previous one, use it. 1782 int Generality = getConstraintGenerality(CType); 1783 if (Generality > BestGenerality) { 1784 BestType = CType; 1785 BestIdx = i; 1786 BestGenerality = Generality; 1787 } 1788 } 1789 1790 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 1791 OpInfo.ConstraintType = BestType; 1792} 1793 1794/// ComputeConstraintToUse - Determines the constraint code and constraint 1795/// type to use for the specific AsmOperandInfo, setting 1796/// OpInfo.ConstraintCode and OpInfo.ConstraintType. 1797void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 1798 SDOperand Op, 1799 SelectionDAG *DAG) const { 1800 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 1801 1802 // Single-letter constraints ('r') are very common. 1803 if (OpInfo.Codes.size() == 1) { 1804 OpInfo.ConstraintCode = OpInfo.Codes[0]; 1805 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 1806 } else { 1807 ChooseConstraint(OpInfo, *this, Op, DAG); 1808 } 1809 1810 // 'X' matches anything. 1811 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 1812 // Labels and constants are handled elsewhere ('X' is the only thing 1813 // that matches labels). 1814 if (isa<BasicBlock>(OpInfo.CallOperandVal) || 1815 isa<ConstantInt>(OpInfo.CallOperandVal)) 1816 return; 1817 1818 // Otherwise, try to resolve it to something we know about by looking at 1819 // the actual operand type. 1820 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 1821 OpInfo.ConstraintCode = Repl; 1822 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 1823 } 1824 } 1825} 1826 1827//===----------------------------------------------------------------------===// 1828// Loop Strength Reduction hooks 1829//===----------------------------------------------------------------------===// 1830 1831/// isLegalAddressingMode - Return true if the addressing mode represented 1832/// by AM is legal for this target, for a load/store of the specified type. 1833bool TargetLowering::isLegalAddressingMode(const AddrMode &AM, 1834 const Type *Ty) const { 1835 // The default implementation of this implements a conservative RISCy, r+r and 1836 // r+i addr mode. 1837 1838 // Allows a sign-extended 16-bit immediate field. 1839 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 1840 return false; 1841 1842 // No global is ever allowed as a base. 1843 if (AM.BaseGV) 1844 return false; 1845 1846 // Only support r+r, 1847 switch (AM.Scale) { 1848 case 0: // "r+i" or just "i", depending on HasBaseReg. 1849 break; 1850 case 1: 1851 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 1852 return false; 1853 // Otherwise we have r+r or r+i. 1854 break; 1855 case 2: 1856 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 1857 return false; 1858 // Allow 2*r as r+r. 1859 break; 1860 } 1861 1862 return true; 1863} 1864 1865// Magic for divide replacement 1866 1867struct ms { 1868 int64_t m; // magic number 1869 int64_t s; // shift amount 1870}; 1871 1872struct mu { 1873 uint64_t m; // magic number 1874 int64_t a; // add indicator 1875 int64_t s; // shift amount 1876}; 1877 1878/// magic - calculate the magic numbers required to codegen an integer sdiv as 1879/// a sequence of multiply and shifts. Requires that the divisor not be 0, 1, 1880/// or -1. 1881static ms magic32(int32_t d) { 1882 int32_t p; 1883 uint32_t ad, anc, delta, q1, r1, q2, r2, t; 1884 const uint32_t two31 = 0x80000000U; 1885 struct ms mag; 1886 1887 ad = abs(d); 1888 t = two31 + ((uint32_t)d >> 31); 1889 anc = t - 1 - t%ad; // absolute value of nc 1890 p = 31; // initialize p 1891 q1 = two31/anc; // initialize q1 = 2p/abs(nc) 1892 r1 = two31 - q1*anc; // initialize r1 = rem(2p,abs(nc)) 1893 q2 = two31/ad; // initialize q2 = 2p/abs(d) 1894 r2 = two31 - q2*ad; // initialize r2 = rem(2p,abs(d)) 1895 do { 1896 p = p + 1; 1897 q1 = 2*q1; // update q1 = 2p/abs(nc) 1898 r1 = 2*r1; // update r1 = rem(2p/abs(nc)) 1899 if (r1 >= anc) { // must be unsigned comparison 1900 q1 = q1 + 1; 1901 r1 = r1 - anc; 1902 } 1903 q2 = 2*q2; // update q2 = 2p/abs(d) 1904 r2 = 2*r2; // update r2 = rem(2p/abs(d)) 1905 if (r2 >= ad) { // must be unsigned comparison 1906 q2 = q2 + 1; 1907 r2 = r2 - ad; 1908 } 1909 delta = ad - r2; 1910 } while (q1 < delta || (q1 == delta && r1 == 0)); 1911 1912 mag.m = (int32_t)(q2 + 1); // make sure to sign extend 1913 if (d < 0) mag.m = -mag.m; // resulting magic number 1914 mag.s = p - 32; // resulting shift 1915 return mag; 1916} 1917 1918/// magicu - calculate the magic numbers required to codegen an integer udiv as 1919/// a sequence of multiply, add and shifts. Requires that the divisor not be 0. 1920static mu magicu32(uint32_t d) { 1921 int32_t p; 1922 uint32_t nc, delta, q1, r1, q2, r2; 1923 struct mu magu; 1924 magu.a = 0; // initialize "add" indicator 1925 nc = - 1 - (-d)%d; 1926 p = 31; // initialize p 1927 q1 = 0x80000000/nc; // initialize q1 = 2p/nc 1928 r1 = 0x80000000 - q1*nc; // initialize r1 = rem(2p,nc) 1929 q2 = 0x7FFFFFFF/d; // initialize q2 = (2p-1)/d 1930 r2 = 0x7FFFFFFF - q2*d; // initialize r2 = rem((2p-1),d) 1931 do { 1932 p = p + 1; 1933 if (r1 >= nc - r1 ) { 1934 q1 = 2*q1 + 1; // update q1 1935 r1 = 2*r1 - nc; // update r1 1936 } 1937 else { 1938 q1 = 2*q1; // update q1 1939 r1 = 2*r1; // update r1 1940 } 1941 if (r2 + 1 >= d - r2) { 1942 if (q2 >= 0x7FFFFFFF) magu.a = 1; 1943 q2 = 2*q2 + 1; // update q2 1944 r2 = 2*r2 + 1 - d; // update r2 1945 } 1946 else { 1947 if (q2 >= 0x80000000) magu.a = 1; 1948 q2 = 2*q2; // update q2 1949 r2 = 2*r2 + 1; // update r2 1950 } 1951 delta = d - 1 - r2; 1952 } while (p < 64 && (q1 < delta || (q1 == delta && r1 == 0))); 1953 magu.m = q2 + 1; // resulting magic number 1954 magu.s = p - 32; // resulting shift 1955 return magu; 1956} 1957 1958/// magic - calculate the magic numbers required to codegen an integer sdiv as 1959/// a sequence of multiply and shifts. Requires that the divisor not be 0, 1, 1960/// or -1. 1961static ms magic64(int64_t d) { 1962 int64_t p; 1963 uint64_t ad, anc, delta, q1, r1, q2, r2, t; 1964 const uint64_t two63 = 9223372036854775808ULL; // 2^63 1965 struct ms mag; 1966 1967 ad = d >= 0 ? d : -d; 1968 t = two63 + ((uint64_t)d >> 63); 1969 anc = t - 1 - t%ad; // absolute value of nc 1970 p = 63; // initialize p 1971 q1 = two63/anc; // initialize q1 = 2p/abs(nc) 1972 r1 = two63 - q1*anc; // initialize r1 = rem(2p,abs(nc)) 1973 q2 = two63/ad; // initialize q2 = 2p/abs(d) 1974 r2 = two63 - q2*ad; // initialize r2 = rem(2p,abs(d)) 1975 do { 1976 p = p + 1; 1977 q1 = 2*q1; // update q1 = 2p/abs(nc) 1978 r1 = 2*r1; // update r1 = rem(2p/abs(nc)) 1979 if (r1 >= anc) { // must be unsigned comparison 1980 q1 = q1 + 1; 1981 r1 = r1 - anc; 1982 } 1983 q2 = 2*q2; // update q2 = 2p/abs(d) 1984 r2 = 2*r2; // update r2 = rem(2p/abs(d)) 1985 if (r2 >= ad) { // must be unsigned comparison 1986 q2 = q2 + 1; 1987 r2 = r2 - ad; 1988 } 1989 delta = ad - r2; 1990 } while (q1 < delta || (q1 == delta && r1 == 0)); 1991 1992 mag.m = q2 + 1; 1993 if (d < 0) mag.m = -mag.m; // resulting magic number 1994 mag.s = p - 64; // resulting shift 1995 return mag; 1996} 1997 1998/// magicu - calculate the magic numbers required to codegen an integer udiv as 1999/// a sequence of multiply, add and shifts. Requires that the divisor not be 0. 2000static mu magicu64(uint64_t d) 2001{ 2002 int64_t p; 2003 uint64_t nc, delta, q1, r1, q2, r2; 2004 struct mu magu; 2005 magu.a = 0; // initialize "add" indicator 2006 nc = - 1 - (-d)%d; 2007 p = 63; // initialize p 2008 q1 = 0x8000000000000000ull/nc; // initialize q1 = 2p/nc 2009 r1 = 0x8000000000000000ull - q1*nc; // initialize r1 = rem(2p,nc) 2010 q2 = 0x7FFFFFFFFFFFFFFFull/d; // initialize q2 = (2p-1)/d 2011 r2 = 0x7FFFFFFFFFFFFFFFull - q2*d; // initialize r2 = rem((2p-1),d) 2012 do { 2013 p = p + 1; 2014 if (r1 >= nc - r1 ) { 2015 q1 = 2*q1 + 1; // update q1 2016 r1 = 2*r1 - nc; // update r1 2017 } 2018 else { 2019 q1 = 2*q1; // update q1 2020 r1 = 2*r1; // update r1 2021 } 2022 if (r2 + 1 >= d - r2) { 2023 if (q2 >= 0x7FFFFFFFFFFFFFFFull) magu.a = 1; 2024 q2 = 2*q2 + 1; // update q2 2025 r2 = 2*r2 + 1 - d; // update r2 2026 } 2027 else { 2028 if (q2 >= 0x8000000000000000ull) magu.a = 1; 2029 q2 = 2*q2; // update q2 2030 r2 = 2*r2 + 1; // update r2 2031 } 2032 delta = d - 1 - r2; 2033 } while (p < 128 && (q1 < delta || (q1 == delta && r1 == 0))); 2034 magu.m = q2 + 1; // resulting magic number 2035 magu.s = p - 64; // resulting shift 2036 return magu; 2037} 2038 2039/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant, 2040/// return a DAG expression to select that will generate the same value by 2041/// multiplying by a magic number. See: 2042/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 2043SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 2044 std::vector<SDNode*>* Created) const { 2045 MVT VT = N->getValueType(0); 2046 2047 // Check to see if we can do this. 2048 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) 2049 return SDOperand(); // BuildSDIV only operates on i32 or i64 2050 2051 int64_t d = cast<ConstantSDNode>(N->getOperand(1))->getSignExtended(); 2052 ms magics = (VT == MVT::i32) ? magic32(d) : magic64(d); 2053 2054 // Multiply the numerator (operand 0) by the magic value 2055 SDOperand Q; 2056 if (isOperationLegal(ISD::MULHS, VT)) 2057 Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0), 2058 DAG.getConstant(magics.m, VT)); 2059 else if (isOperationLegal(ISD::SMUL_LOHI, VT)) 2060 Q = SDOperand(DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(VT, VT), 2061 N->getOperand(0), 2062 DAG.getConstant(magics.m, VT)).Val, 1); 2063 else 2064 return SDOperand(); // No mulhs or equvialent 2065 // If d > 0 and m < 0, add the numerator 2066 if (d > 0 && magics.m < 0) { 2067 Q = DAG.getNode(ISD::ADD, VT, Q, N->getOperand(0)); 2068 if (Created) 2069 Created->push_back(Q.Val); 2070 } 2071 // If d < 0 and m > 0, subtract the numerator. 2072 if (d < 0 && magics.m > 0) { 2073 Q = DAG.getNode(ISD::SUB, VT, Q, N->getOperand(0)); 2074 if (Created) 2075 Created->push_back(Q.Val); 2076 } 2077 // Shift right algebraic if shift value is nonzero 2078 if (magics.s > 0) { 2079 Q = DAG.getNode(ISD::SRA, VT, Q, 2080 DAG.getConstant(magics.s, getShiftAmountTy())); 2081 if (Created) 2082 Created->push_back(Q.Val); 2083 } 2084 // Extract the sign bit and add it to the quotient 2085 SDOperand T = 2086 DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(VT.getSizeInBits()-1, 2087 getShiftAmountTy())); 2088 if (Created) 2089 Created->push_back(T.Val); 2090 return DAG.getNode(ISD::ADD, VT, Q, T); 2091} 2092 2093/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant, 2094/// return a DAG expression to select that will generate the same value by 2095/// multiplying by a magic number. See: 2096/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 2097SDOperand TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 2098 std::vector<SDNode*>* Created) const { 2099 MVT VT = N->getValueType(0); 2100 2101 // Check to see if we can do this. 2102 if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) 2103 return SDOperand(); // BuildUDIV only operates on i32 or i64 2104 2105 uint64_t d = cast<ConstantSDNode>(N->getOperand(1))->getValue(); 2106 mu magics = (VT == MVT::i32) ? magicu32(d) : magicu64(d); 2107 2108 // Multiply the numerator (operand 0) by the magic value 2109 SDOperand Q; 2110 if (isOperationLegal(ISD::MULHU, VT)) 2111 Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0), 2112 DAG.getConstant(magics.m, VT)); 2113 else if (isOperationLegal(ISD::UMUL_LOHI, VT)) 2114 Q = SDOperand(DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(VT, VT), 2115 N->getOperand(0), 2116 DAG.getConstant(magics.m, VT)).Val, 1); 2117 else 2118 return SDOperand(); // No mulhu or equvialent 2119 if (Created) 2120 Created->push_back(Q.Val); 2121 2122 if (magics.a == 0) { 2123 return DAG.getNode(ISD::SRL, VT, Q, 2124 DAG.getConstant(magics.s, getShiftAmountTy())); 2125 } else { 2126 SDOperand NPQ = DAG.getNode(ISD::SUB, VT, N->getOperand(0), Q); 2127 if (Created) 2128 Created->push_back(NPQ.Val); 2129 NPQ = DAG.getNode(ISD::SRL, VT, NPQ, 2130 DAG.getConstant(1, getShiftAmountTy())); 2131 if (Created) 2132 Created->push_back(NPQ.Val); 2133 NPQ = DAG.getNode(ISD::ADD, VT, NPQ, Q); 2134 if (Created) 2135 Created->push_back(NPQ.Val); 2136 return DAG.getNode(ISD::SRL, VT, NPQ, 2137 DAG.getConstant(magics.s-1, getShiftAmountTy())); 2138 } 2139} 2140