TargetLowering.cpp revision 4217ca8dc175f7268a4335c8406dedd901e8e631
1//===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the TargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Target/TargetLowering.h" 15#include "llvm/Target/TargetMachine.h" 16#include "llvm/Target/MRegisterInfo.h" 17#include "llvm/CodeGen/SelectionDAG.h" 18#include "llvm/ADT/StringExtras.h" 19#include "llvm/Support/MathExtras.h" 20using namespace llvm; 21 22TargetLowering::TargetLowering(TargetMachine &tm) 23 : TM(tm), TD(TM.getTargetData()) { 24 assert(ISD::BUILTIN_OP_END <= 128 && 25 "Fixed size array in TargetLowering is not large enough!"); 26 // All operations default to being supported. 27 memset(OpActions, 0, sizeof(OpActions)); 28 29 IsLittleEndian = TD.isLittleEndian(); 30 ShiftAmountTy = SetCCResultTy = PointerTy = getValueType(TD.getIntPtrType()); 31 ShiftAmtHandling = Undefined; 32 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); 33 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; 34 allowUnalignedMemoryAccesses = false; 35 UseUnderscoreSetJmpLongJmp = false; 36 IntDivIsCheap = false; 37 Pow2DivIsCheap = false; 38 StackPointerRegisterToSaveRestore = 0; 39 SchedPreferenceInfo = SchedulingForLatency; 40} 41 42TargetLowering::~TargetLowering() {} 43 44/// setValueTypeAction - Set the action for a particular value type. This 45/// assumes an action has not already been set for this value type. 46static void SetValueTypeAction(MVT::ValueType VT, 47 TargetLowering::LegalizeAction Action, 48 TargetLowering &TLI, 49 MVT::ValueType *TransformToType, 50 TargetLowering::ValueTypeActionImpl &ValueTypeActions) { 51 ValueTypeActions.setTypeAction(VT, Action); 52 if (Action == TargetLowering::Promote) { 53 MVT::ValueType PromoteTo; 54 if (VT == MVT::f32) 55 PromoteTo = MVT::f64; 56 else { 57 unsigned LargerReg = VT+1; 58 while (!TLI.isTypeLegal((MVT::ValueType)LargerReg)) { 59 ++LargerReg; 60 assert(MVT::isInteger((MVT::ValueType)LargerReg) && 61 "Nothing to promote to??"); 62 } 63 PromoteTo = (MVT::ValueType)LargerReg; 64 } 65 66 assert(MVT::isInteger(VT) == MVT::isInteger(PromoteTo) && 67 MVT::isFloatingPoint(VT) == MVT::isFloatingPoint(PromoteTo) && 68 "Can only promote from int->int or fp->fp!"); 69 assert(VT < PromoteTo && "Must promote to a larger type!"); 70 TransformToType[VT] = PromoteTo; 71 } else if (Action == TargetLowering::Expand) { 72 assert((VT == MVT::Vector || MVT::isInteger(VT)) && VT > MVT::i8 && 73 "Cannot expand this type: target must support SOME integer reg!"); 74 // Expand to the next smaller integer type! 75 TransformToType[VT] = (MVT::ValueType)(VT-1); 76 } 77} 78 79 80/// computeRegisterProperties - Once all of the register classes are added, 81/// this allows us to compute derived properties we expose. 82void TargetLowering::computeRegisterProperties() { 83 assert(MVT::LAST_VALUETYPE <= 32 && 84 "Too many value types for ValueTypeActions to hold!"); 85 86 // Everything defaults to one. 87 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) 88 NumElementsForVT[i] = 1; 89 90 // Find the largest integer register class. 91 unsigned LargestIntReg = MVT::i128; 92 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg) 93 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 94 95 // Every integer value type larger than this largest register takes twice as 96 // many registers to represent as the previous ValueType. 97 unsigned ExpandedReg = LargestIntReg; ++LargestIntReg; 98 for (++ExpandedReg; MVT::isInteger((MVT::ValueType)ExpandedReg);++ExpandedReg) 99 NumElementsForVT[ExpandedReg] = 2*NumElementsForVT[ExpandedReg-1]; 100 101 // Inspect all of the ValueType's possible, deciding how to process them. 102 for (unsigned IntReg = MVT::i1; IntReg <= MVT::i128; ++IntReg) 103 // If we are expanding this type, expand it! 104 if (getNumElements((MVT::ValueType)IntReg) != 1) 105 SetValueTypeAction((MVT::ValueType)IntReg, Expand, *this, TransformToType, 106 ValueTypeActions); 107 else if (!isTypeLegal((MVT::ValueType)IntReg)) 108 // Otherwise, if we don't have native support, we must promote to a 109 // larger type. 110 SetValueTypeAction((MVT::ValueType)IntReg, Promote, *this, 111 TransformToType, ValueTypeActions); 112 else 113 TransformToType[(MVT::ValueType)IntReg] = (MVT::ValueType)IntReg; 114 115 // If the target does not have native support for F32, promote it to F64. 116 if (!isTypeLegal(MVT::f32)) 117 SetValueTypeAction(MVT::f32, Promote, *this, 118 TransformToType, ValueTypeActions); 119 else 120 TransformToType[MVT::f32] = MVT::f32; 121 122 // Set MVT::Vector to always be Expanded 123 SetValueTypeAction(MVT::Vector, Expand, *this, TransformToType, 124 ValueTypeActions); 125 126 assert(isTypeLegal(MVT::f64) && "Target does not support FP?"); 127 TransformToType[MVT::f64] = MVT::f64; 128} 129 130const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 131 return NULL; 132} 133 134//===----------------------------------------------------------------------===// 135// Optimization Methods 136//===----------------------------------------------------------------------===// 137 138/// ShrinkDemandedConstant - Check to see if the specified operand of the 139/// specified instruction is a constant integer. If so, check to see if there 140/// are any bits set in the constant that are not demanded. If so, shrink the 141/// constant and return true. 142bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op, 143 uint64_t Demanded) { 144 // FIXME: ISD::SELECT 145 switch(Op.getOpcode()) { 146 default: break; 147 case ISD::AND: 148 case ISD::OR: 149 case ISD::XOR: 150 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 151 if ((~Demanded & C->getValue()) != 0) { 152 MVT::ValueType VT = Op.getValueType(); 153 SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), 154 DAG.getConstant(Demanded & C->getValue(), 155 VT)); 156 return CombineTo(Op, New); 157 } 158 break; 159 } 160 return false; 161} 162 163/// SimplifyDemandedBits - Look at Op. At this point, we know that only the 164/// DemandedMask bits of the result of Op are ever used downstream. If we can 165/// use this information to simplify Op, create a new simplified DAG node and 166/// return true, returning the original and new nodes in Old and New. Otherwise, 167/// analyze the expression and return a mask of KnownOne and KnownZero bits for 168/// the expression (used to simplify the caller). The KnownZero/One bits may 169/// only be accurate for those bits in the DemandedMask. 170bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask, 171 uint64_t &KnownZero, 172 uint64_t &KnownOne, 173 TargetLoweringOpt &TLO, 174 unsigned Depth) const { 175 KnownZero = KnownOne = 0; // Don't know anything. 176 // Other users may use these bits. 177 if (!Op.Val->hasOneUse()) { 178 if (Depth != 0) { 179 // If not at the root, Just compute the KnownZero/KnownOne bits to 180 // simplify things downstream. 181 ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 182 return false; 183 } 184 // If this is the root being simplified, allow it to have multiple uses, 185 // just set the DemandedMask to all bits. 186 DemandedMask = MVT::getIntVTBitMask(Op.getValueType()); 187 } else if (DemandedMask == 0) { 188 // Not demanding any bits from Op. 189 if (Op.getOpcode() != ISD::UNDEF) 190 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::UNDEF, Op.getValueType())); 191 return false; 192 } else if (Depth == 6) { // Limit search depth. 193 return false; 194 } 195 196 uint64_t KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut; 197 switch (Op.getOpcode()) { 198 case ISD::Constant: 199 // We know all of the bits for a constant! 200 KnownOne = cast<ConstantSDNode>(Op)->getValue() & DemandedMask; 201 KnownZero = ~KnownOne & DemandedMask; 202 return false; 203 case ISD::AND: 204 // If either the LHS or the RHS are Zero, the result is zero. 205 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero, 206 KnownOne, TLO, Depth+1)) 207 return true; 208 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 209 // If something is known zero on the RHS, the bits aren't demanded on the 210 // LHS. 211 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownZero, 212 KnownZero2, KnownOne2, TLO, Depth+1)) 213 return true; 214 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 215 216 // If all of the demanded bits are known one on one side, return the other. 217 // These bits cannot contribute to the result of the 'and'. 218 if ((DemandedMask & ~KnownZero2 & KnownOne)==(DemandedMask & ~KnownZero2)) 219 return TLO.CombineTo(Op, Op.getOperand(0)); 220 if ((DemandedMask & ~KnownZero & KnownOne2)==(DemandedMask & ~KnownZero)) 221 return TLO.CombineTo(Op, Op.getOperand(1)); 222 // If all of the demanded bits in the inputs are known zeros, return zero. 223 if ((DemandedMask & (KnownZero|KnownZero2)) == DemandedMask) 224 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType())); 225 // If the RHS is a constant, see if we can simplify it. 226 if (TLO.ShrinkDemandedConstant(Op, DemandedMask & ~KnownZero2)) 227 return true; 228 229 // Output known-1 bits are only known if set in both the LHS & RHS. 230 KnownOne &= KnownOne2; 231 // Output known-0 are known to be clear if zero in either the LHS | RHS. 232 KnownZero |= KnownZero2; 233 break; 234 case ISD::OR: 235 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero, 236 KnownOne, TLO, Depth+1)) 237 return true; 238 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 239 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownOne, 240 KnownZero2, KnownOne2, TLO, Depth+1)) 241 return true; 242 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 243 244 // If all of the demanded bits are known zero on one side, return the other. 245 // These bits cannot contribute to the result of the 'or'. 246 if ((DemandedMask & ~KnownOne2 & KnownZero) == (DemandedMask & ~KnownOne2)) 247 return TLO.CombineTo(Op, Op.getOperand(0)); 248 if ((DemandedMask & ~KnownOne & KnownZero2) == (DemandedMask & ~KnownOne)) 249 return TLO.CombineTo(Op, Op.getOperand(1)); 250 // If all of the potentially set bits on one side are known to be set on 251 // the other side, just use the 'other' side. 252 if ((DemandedMask & (~KnownZero) & KnownOne2) == 253 (DemandedMask & (~KnownZero))) 254 return TLO.CombineTo(Op, Op.getOperand(0)); 255 if ((DemandedMask & (~KnownZero2) & KnownOne) == 256 (DemandedMask & (~KnownZero2))) 257 return TLO.CombineTo(Op, Op.getOperand(1)); 258 // If the RHS is a constant, see if we can simplify it. 259 if (TLO.ShrinkDemandedConstant(Op, DemandedMask)) 260 return true; 261 262 // Output known-0 bits are only known if clear in both the LHS & RHS. 263 KnownZero &= KnownZero2; 264 // Output known-1 are known to be set if set in either the LHS | RHS. 265 KnownOne |= KnownOne2; 266 break; 267 case ISD::XOR: 268 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero, 269 KnownOne, TLO, Depth+1)) 270 return true; 271 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 272 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, KnownZero2, 273 KnownOne2, TLO, Depth+1)) 274 return true; 275 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 276 277 // If all of the demanded bits are known zero on one side, return the other. 278 // These bits cannot contribute to the result of the 'xor'. 279 if ((DemandedMask & KnownZero) == DemandedMask) 280 return TLO.CombineTo(Op, Op.getOperand(0)); 281 if ((DemandedMask & KnownZero2) == DemandedMask) 282 return TLO.CombineTo(Op, Op.getOperand(1)); 283 284 // Output known-0 bits are known if clear or set in both the LHS & RHS. 285 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 286 // Output known-1 are known to be set if set in only one of the LHS, RHS. 287 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 288 289 // If all of the unknown bits are known to be zero on one side or the other 290 // (but not both) turn this into an *inclusive* or. 291 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 292 if (uint64_t UnknownBits = DemandedMask & ~(KnownZeroOut|KnownOneOut)) 293 if ((UnknownBits & (KnownZero|KnownZero2)) == UnknownBits) 294 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(), 295 Op.getOperand(0), 296 Op.getOperand(1))); 297 // If all of the demanded bits on one side are known, and all of the set 298 // bits on that side are also known to be set on the other side, turn this 299 // into an AND, as we know the bits will be cleared. 300 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 301 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) { // all known 302 if ((KnownOne & KnownOne2) == KnownOne) { 303 MVT::ValueType VT = Op.getValueType(); 304 SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & DemandedMask, VT); 305 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0), 306 ANDC)); 307 } 308 } 309 310 // If the RHS is a constant, see if we can simplify it. 311 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1. 312 if (TLO.ShrinkDemandedConstant(Op, DemandedMask)) 313 return true; 314 315 KnownZero = KnownZeroOut; 316 KnownOne = KnownOneOut; 317 break; 318 case ISD::SETCC: 319 // If we know the result of a setcc has the top bits zero, use this info. 320 if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult) 321 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 322 break; 323 case ISD::SELECT: 324 if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero, 325 KnownOne, TLO, Depth+1)) 326 return true; 327 if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero2, 328 KnownOne2, TLO, Depth+1)) 329 return true; 330 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 331 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 332 333 // If the operands are constants, see if we can simplify them. 334 if (TLO.ShrinkDemandedConstant(Op, DemandedMask)) 335 return true; 336 337 // Only known if known in both the LHS and RHS. 338 KnownOne &= KnownOne2; 339 KnownZero &= KnownZero2; 340 break; 341 case ISD::SHL: 342 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 343 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask >> SA->getValue(), 344 KnownZero, KnownOne, TLO, Depth+1)) 345 return true; 346 KnownZero <<= SA->getValue(); 347 KnownOne <<= SA->getValue(); 348 KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero. 349 } 350 break; 351 case ISD::SRL: 352 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 353 MVT::ValueType VT = Op.getValueType(); 354 unsigned ShAmt = SA->getValue(); 355 356 // Compute the new bits that are at the top now. 357 uint64_t HighBits = (1ULL << ShAmt)-1; 358 HighBits <<= MVT::getSizeInBits(VT) - ShAmt; 359 uint64_t TypeMask = MVT::getIntVTBitMask(VT); 360 361 if (SimplifyDemandedBits(Op.getOperand(0), 362 (DemandedMask << ShAmt) & TypeMask, 363 KnownZero, KnownOne, TLO, Depth+1)) 364 return true; 365 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 366 KnownZero &= TypeMask; 367 KnownOne &= TypeMask; 368 KnownZero >>= ShAmt; 369 KnownOne >>= ShAmt; 370 KnownZero |= HighBits; // high bits known zero. 371 } 372 break; 373 case ISD::SRA: 374 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 375 MVT::ValueType VT = Op.getValueType(); 376 unsigned ShAmt = SA->getValue(); 377 378 // Compute the new bits that are at the top now. 379 uint64_t HighBits = (1ULL << ShAmt)-1; 380 HighBits <<= MVT::getSizeInBits(VT) - ShAmt; 381 uint64_t TypeMask = MVT::getIntVTBitMask(VT); 382 383 if (SimplifyDemandedBits(Op.getOperand(0), 384 (DemandedMask << ShAmt) & TypeMask, 385 KnownZero, KnownOne, TLO, Depth+1)) 386 return true; 387 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 388 KnownZero &= TypeMask; 389 KnownOne &= TypeMask; 390 KnownZero >>= SA->getValue(); 391 KnownOne >>= SA->getValue(); 392 393 // Handle the sign bits. 394 uint64_t SignBit = MVT::getIntVTSignBit(VT); 395 SignBit >>= SA->getValue(); // Adjust to where it is now in the mask. 396 397 // If the input sign bit is known to be zero, or if none of the top bits 398 // are demanded, turn this into an unsigned shift right. 399 if ((KnownZero & SignBit) || (HighBits & ~DemandedMask) == HighBits) { 400 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, VT, Op.getOperand(0), 401 Op.getOperand(1))); 402 } else if (KnownOne & SignBit) { // New bits are known one. 403 KnownOne |= HighBits; 404 } 405 } 406 break; 407 case ISD::SIGN_EXTEND_INREG: { 408 MVT::ValueType VT = Op.getValueType(); 409 MVT::ValueType EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 410 411 // Sign or Zero extension. Compute the bits in the result that are not 412 // present in the input. 413 uint64_t NotIn = ~MVT::getIntVTBitMask(EVT); 414 uint64_t NewBits = MVT::getIntVTBitMask(VT) & NotIn; 415 416 // Sign extension. 417 uint64_t InSignBit = MVT::getIntVTSignBit(EVT); 418 int64_t InputDemandedBits = DemandedMask & MVT::getIntVTBitMask(EVT); 419 420 // If any of the sign extended bits are demanded, we know that the sign 421 // bit is demanded. 422 if (NewBits & DemandedMask) 423 InputDemandedBits |= InSignBit; 424 425 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits, 426 KnownZero, KnownOne, TLO, Depth+1)) 427 return true; 428 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 429 430 // If the sign bit of the input is known set or clear, then we know the 431 // top bits of the result. 432 433 // If the input sign bit is known zero, or if the NewBits are not demanded 434 // convert this into a zero extension. 435 if ((KnownZero & InSignBit) || (NewBits & ~DemandedMask) == NewBits) { 436 return TLO.CombineTo(Op, Op.getOperand(0)); 437 } else if (KnownOne & InSignBit) { // Input sign bit known set 438 KnownOne |= NewBits; 439 KnownZero &= ~NewBits; 440 } else { // Input sign bit unknown 441 KnownZero &= ~NewBits; 442 KnownOne &= ~NewBits; 443 } 444 break; 445 } 446 case ISD::ADD: 447 if (ConstantSDNode *AA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 448 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, KnownZero, 449 KnownOne, TLO, Depth+1)) 450 return true; 451 // Compute the KnownOne/KnownZero masks for the constant, so we can set 452 // KnownZero appropriately if we're adding a constant that has all low 453 // bits cleared. 454 ComputeMaskedBits(Op.getOperand(1), 455 MVT::getIntVTBitMask(Op.getValueType()), 456 KnownZero2, KnownOne2, Depth+1); 457 458 uint64_t KnownZeroOut = std::min(CountTrailingZeros_64(~KnownZero), 459 CountTrailingZeros_64(~KnownZero2)); 460 KnownZero = (1ULL << KnownZeroOut) - 1; 461 KnownOne = 0; 462 463 SDOperand SH = Op.getOperand(0); 464 // fold (add (shl x, c1), (shl c2, c1)) -> (shl (add x, c2), c1) 465 if (KnownZero && SH.getOpcode() == ISD::SHL && SH.Val->hasOneUse() && 466 Op.Val->hasOneUse()) { 467 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(SH.getOperand(1))) { 468 MVT::ValueType VT = Op.getValueType(); 469 unsigned ShiftAmt = SA->getValue(); 470 uint64_t AddAmt = AA->getValue(); 471 uint64_t AddShr = AddAmt >> ShiftAmt; 472 if (AddAmt == (AddShr << ShiftAmt)) { 473 SDOperand ADD = TLO.DAG.getNode(ISD::ADD, VT, SH.getOperand(0), 474 TLO.DAG.getConstant(AddShr, VT)); 475 SDOperand SHL = TLO.DAG.getNode(ISD::SHL, VT, ADD,SH.getOperand(1)); 476 return TLO.CombineTo(Op, SHL); 477 } 478 } 479 } 480 } 481 break; 482 case ISD::CTTZ: 483 case ISD::CTLZ: 484 case ISD::CTPOP: { 485 MVT::ValueType VT = Op.getValueType(); 486 unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1; 487 KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT); 488 KnownOne = 0; 489 break; 490 } 491 } 492 return false; 493} 494 495/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 496/// this predicate to simplify operations downstream. Mask is known to be zero 497/// for bits that V cannot have. 498bool TargetLowering::MaskedValueIsZero(SDOperand Op, uint64_t Mask, 499 unsigned Depth) const { 500 uint64_t KnownZero, KnownOne; 501 ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth); 502 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 503 return (KnownZero & Mask) == Mask; 504} 505 506/// ComputeMaskedBits - Determine which of the bits specified in Mask are 507/// known to be either zero or one and return them in the KnownZero/KnownOne 508/// bitsets. This code only analyzes bits in Mask, in order to short-circuit 509/// processing. 510void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask, 511 uint64_t &KnownZero, uint64_t &KnownOne, 512 unsigned Depth) const { 513 KnownZero = KnownOne = 0; // Don't know anything. 514 if (Depth == 6 || Mask == 0) 515 return; // Limit search depth. 516 517 uint64_t KnownZero2, KnownOne2; 518 519 switch (Op.getOpcode()) { 520 case ISD::Constant: 521 // We know all of the bits for a constant! 522 KnownOne = cast<ConstantSDNode>(Op)->getValue() & Mask; 523 KnownZero = ~KnownOne & Mask; 524 return; 525 case ISD::AND: 526 // If either the LHS or the RHS are Zero, the result is zero. 527 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); 528 Mask &= ~KnownZero; 529 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); 530 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 531 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 532 533 // Output known-1 bits are only known if set in both the LHS & RHS. 534 KnownOne &= KnownOne2; 535 // Output known-0 are known to be clear if zero in either the LHS | RHS. 536 KnownZero |= KnownZero2; 537 return; 538 case ISD::OR: 539 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); 540 Mask &= ~KnownOne; 541 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); 542 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 543 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 544 545 // Output known-0 bits are only known if clear in both the LHS & RHS. 546 KnownZero &= KnownZero2; 547 // Output known-1 are known to be set if set in either the LHS | RHS. 548 KnownOne |= KnownOne2; 549 return; 550 case ISD::XOR: { 551 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); 552 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); 553 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 554 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 555 556 // Output known-0 bits are known if clear or set in both the LHS & RHS. 557 uint64_t KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 558 // Output known-1 are known to be set if set in only one of the LHS, RHS. 559 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 560 KnownZero = KnownZeroOut; 561 return; 562 } 563 case ISD::SELECT: 564 ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero, KnownOne, Depth+1); 565 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero2, KnownOne2, Depth+1); 566 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 567 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 568 569 // Only known if known in both the LHS and RHS. 570 KnownOne &= KnownOne2; 571 KnownZero &= KnownZero2; 572 return; 573 case ISD::SELECT_CC: 574 ComputeMaskedBits(Op.getOperand(3), Mask, KnownZero, KnownOne, Depth+1); 575 ComputeMaskedBits(Op.getOperand(2), Mask, KnownZero2, KnownOne2, Depth+1); 576 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 577 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 578 579 // Only known if known in both the LHS and RHS. 580 KnownOne &= KnownOne2; 581 KnownZero &= KnownZero2; 582 return; 583 case ISD::SETCC: 584 // If we know the result of a setcc has the top bits zero, use this info. 585 if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult) 586 KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL); 587 return; 588 case ISD::SHL: 589 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 590 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 591 Mask >>= SA->getValue(); 592 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 593 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 594 KnownZero <<= SA->getValue(); 595 KnownOne <<= SA->getValue(); 596 KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero. 597 } 598 return; 599 case ISD::SRL: 600 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 601 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 602 uint64_t HighBits = (1ULL << SA->getValue())-1; 603 HighBits <<= MVT::getSizeInBits(Op.getValueType())-SA->getValue(); 604 Mask <<= SA->getValue(); 605 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 606 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 607 KnownZero >>= SA->getValue(); 608 KnownOne >>= SA->getValue(); 609 KnownZero |= HighBits; // high bits known zero. 610 } 611 return; 612 case ISD::SRA: 613 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 614 uint64_t HighBits = (1ULL << SA->getValue())-1; 615 HighBits <<= MVT::getSizeInBits(Op.getValueType())-SA->getValue(); 616 Mask <<= SA->getValue(); 617 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); 618 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 619 KnownZero >>= SA->getValue(); 620 KnownOne >>= SA->getValue(); 621 622 // Handle the sign bits. 623 uint64_t SignBit = 1ULL << (MVT::getSizeInBits(Op.getValueType())-1); 624 SignBit >>= SA->getValue(); // Adjust to where it is now in the mask. 625 626 if (KnownZero & SignBit) { // New bits are known zero. 627 KnownZero |= HighBits; 628 } else if (KnownOne & SignBit) { // New bits are known one. 629 KnownOne |= HighBits; 630 } 631 } 632 return; 633 case ISD::CTTZ: 634 case ISD::CTLZ: 635 case ISD::CTPOP: { 636 MVT::ValueType VT = Op.getValueType(); 637 unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1; 638 KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT); 639 KnownOne = 0; 640 return; 641 } 642 case ISD::ZEXTLOAD: { 643 unsigned SrcBits = 644 MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(3))->getVT()); 645 KnownZero |= ~((1ULL << SrcBits)-1); 646 return; 647 } 648 case ISD::ZERO_EXTEND: { 649 unsigned SrcBits = 650 MVT::getSizeInBits(Op.getOperand(0).getValueType()); 651 KnownZero |= ~((1ULL << SrcBits)-1); 652 return; 653 } 654 case ISD::ANY_EXTEND: { 655 unsigned SrcBits = 656 MVT::getSizeInBits(Op.getOperand(0).getValueType()); 657 KnownZero &= ((1ULL << SrcBits)-1); 658 KnownOne &= ((1ULL << SrcBits)-1); 659 return; 660 } 661 case ISD::AssertZext: { 662 unsigned SrcBits = 663 MVT::getSizeInBits(cast<VTSDNode>(Op.getOperand(1))->getVT()); 664 KnownZero |= ~((1ULL << SrcBits)-1); 665 return; 666 } 667 case ISD::ADD: { 668 // If either the LHS or the RHS are Zero, the result is zero. 669 ComputeMaskedBits(Op.getOperand(1), Mask, KnownZero, KnownOne, Depth+1); 670 ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero2, KnownOne2, Depth+1); 671 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 672 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 673 674 // Output known-0 bits are known if clear or set in both the low clear bits 675 // common to both LHS & RHS; 676 uint64_t KnownZeroOut = std::min(CountTrailingZeros_64(~KnownZero), 677 CountTrailingZeros_64(~KnownZero2)); 678 679 KnownZero = (1ULL << KnownZeroOut) - 1; 680 KnownOne = 0; 681 return; 682 } 683 case ISD::SUB: 684 // We know that the top bits of C-X are clear if X contains less bits 685 // than C (i.e. no wrap-around can happen). For example, 20-X is 686 // positive if we can prove that X is >= 0 and < 16. 687 return; 688 default: 689 // Allow the target to implement this method for its nodes. 690 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) 691 computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne); 692 return; 693 } 694} 695 696/// computeMaskedBitsForTargetNode - Determine which of the bits specified 697/// in Mask are known to be either zero or one and return them in the 698/// KnownZero/KnownOne bitsets. 699void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, 700 uint64_t Mask, 701 uint64_t &KnownZero, 702 uint64_t &KnownOne, 703 unsigned Depth) const { 704 assert(Op.getOpcode() >= ISD::BUILTIN_OP_END && 705 "Should use MaskedValueIsZero if you don't know whether Op" 706 " is a target node!"); 707 KnownZero = 0; 708 KnownOne = 0; 709} 710 711//===----------------------------------------------------------------------===// 712// Inline Assembler Implementation Methods 713//===----------------------------------------------------------------------===// 714 715TargetLowering::ConstraintType 716TargetLowering::getConstraintType(char ConstraintLetter) const { 717 // FIXME: lots more standard ones to handle. 718 switch (ConstraintLetter) { 719 default: return C_Unknown; 720 case 'r': return C_RegisterClass; 721 case 'i': // Simple Integer or Relocatable Constant 722 case 'n': // Simple Integer 723 case 's': // Relocatable Constant 724 case 'I': // Target registers. 725 case 'J': 726 case 'K': 727 case 'L': 728 case 'M': 729 case 'N': 730 case 'O': 731 case 'P': return C_Other; 732 } 733} 734 735bool TargetLowering::isOperandValidForConstraint(SDOperand Op, 736 char ConstraintLetter) { 737 switch (ConstraintLetter) { 738 default: return false; 739 case 'i': // Simple Integer or Relocatable Constant 740 case 'n': // Simple Integer 741 case 's': // Relocatable Constant 742 return true; // FIXME: not right. 743 } 744} 745 746 747std::vector<unsigned> TargetLowering:: 748getRegForInlineAsmConstraint(const std::string &Constraint, 749 MVT::ValueType VT) const { 750 // Not a physreg, must not be a register reference or something. 751 if (Constraint[0] != '{') return std::vector<unsigned>(); 752 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?"); 753 754 // Remove the braces from around the name. 755 std::string RegName(Constraint.begin()+1, Constraint.end()-1); 756 757 // Scan to see if this constraint is a register name. 758 const MRegisterInfo *RI = TM.getRegisterInfo(); 759 for (unsigned i = 1, e = RI->getNumRegs(); i != e; ++i) { 760 if (const char *Name = RI->get(i).Name) 761 if (StringsEqualNoCase(RegName, Name)) 762 return std::vector<unsigned>(1, i); 763 } 764 765 // Unknown physreg. 766 return std::vector<unsigned>(); 767} 768 769