DAGCombiner.cpp revision 43da6c7f13aedcc11530f9d81dbbb2ee07ad226a
1//===-- DAGCombiner.cpp - Implement a DAG node combiner -------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run 11// both before and after the DAG is legalized. 12// 13// This pass is not a substitute for the LLVM IR instcombine pass. This pass is 14// primarily intended to handle simplification opportunities that are implicit 15// in the LLVM IR and exposed by the various codegen lowering phases. 16// 17//===----------------------------------------------------------------------===// 18 19#define DEBUG_TYPE "dagcombine" 20#include "llvm/CodeGen/SelectionDAG.h" 21#include "llvm/DerivedTypes.h" 22#include "llvm/LLVMContext.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/Analysis/AliasAnalysis.h" 26#include "llvm/Target/TargetData.h" 27#include "llvm/Target/TargetLowering.h" 28#include "llvm/Target/TargetMachine.h" 29#include "llvm/Target/TargetOptions.h" 30#include "llvm/ADT/SmallPtrSet.h" 31#include "llvm/ADT/Statistic.h" 32#include "llvm/Support/CommandLine.h" 33#include "llvm/Support/Debug.h" 34#include "llvm/Support/ErrorHandling.h" 35#include "llvm/Support/MathExtras.h" 36#include "llvm/Support/raw_ostream.h" 37#include <algorithm> 38using namespace llvm; 39 40STATISTIC(NodesCombined , "Number of dag nodes combined"); 41STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created"); 42STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created"); 43STATISTIC(OpsNarrowed , "Number of load/op/store narrowed"); 44STATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int"); 45 46namespace { 47 static cl::opt<bool> 48 CombinerAA("combiner-alias-analysis", cl::Hidden, 49 cl::desc("Turn on alias analysis during testing")); 50 51 static cl::opt<bool> 52 CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden, 53 cl::desc("Include global information in alias analysis")); 54 55//------------------------------ DAGCombiner ---------------------------------// 56 57 class DAGCombiner { 58 SelectionDAG &DAG; 59 const TargetLowering &TLI; 60 CombineLevel Level; 61 CodeGenOpt::Level OptLevel; 62 bool LegalOperations; 63 bool LegalTypes; 64 65 // Worklist of all of the nodes that need to be simplified. 66 // 67 // This has the semantics that when adding to the worklist, 68 // the item added must be next to be processed. It should 69 // also only appear once. The naive approach to this takes 70 // linear time. 71 // 72 // To reduce the insert/remove time to logarithmic, we use 73 // a set and a vector to maintain our worklist. 74 // 75 // The set contains the items on the worklist, but does not 76 // maintain the order they should be visited. 77 // 78 // The vector maintains the order nodes should be visited, but may 79 // contain duplicate or removed nodes. When choosing a node to 80 // visit, we pop off the order stack until we find an item that is 81 // also in the contents set. All operations are O(log N). 82 SmallPtrSet<SDNode*, 64> WorkListContents; 83 SmallVector<SDNode*, 64> WorkListOrder; 84 85 // AA - Used for DAG load/store alias analysis. 86 AliasAnalysis &AA; 87 88 /// AddUsersToWorkList - When an instruction is simplified, add all users of 89 /// the instruction to the work lists because they might get more simplified 90 /// now. 91 /// 92 void AddUsersToWorkList(SDNode *N) { 93 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 94 UI != UE; ++UI) 95 AddToWorkList(*UI); 96 } 97 98 /// visit - call the node-specific routine that knows how to fold each 99 /// particular type of node. 100 SDValue visit(SDNode *N); 101 102 public: 103 /// AddToWorkList - Add to the work list making sure its instance is at the 104 /// back (next to be processed.) 105 void AddToWorkList(SDNode *N) { 106 WorkListContents.insert(N); 107 WorkListOrder.push_back(N); 108 } 109 110 /// removeFromWorkList - remove all instances of N from the worklist. 111 /// 112 void removeFromWorkList(SDNode *N) { 113 WorkListContents.erase(N); 114 } 115 116 SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, 117 bool AddTo = true); 118 119 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) { 120 return CombineTo(N, &Res, 1, AddTo); 121 } 122 123 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, 124 bool AddTo = true) { 125 SDValue To[] = { Res0, Res1 }; 126 return CombineTo(N, To, 2, AddTo); 127 } 128 129 void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO); 130 131 private: 132 133 /// SimplifyDemandedBits - Check the specified integer node value to see if 134 /// it can be simplified or if things it uses can be simplified by bit 135 /// propagation. If so, return true. 136 bool SimplifyDemandedBits(SDValue Op) { 137 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 138 APInt Demanded = APInt::getAllOnesValue(BitWidth); 139 return SimplifyDemandedBits(Op, Demanded); 140 } 141 142 bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded); 143 144 bool CombineToPreIndexedLoadStore(SDNode *N); 145 bool CombineToPostIndexedLoadStore(SDNode *N); 146 147 void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad); 148 SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace); 149 SDValue SExtPromoteOperand(SDValue Op, EVT PVT); 150 SDValue ZExtPromoteOperand(SDValue Op, EVT PVT); 151 SDValue PromoteIntBinOp(SDValue Op); 152 SDValue PromoteIntShiftOp(SDValue Op); 153 SDValue PromoteExtend(SDValue Op); 154 bool PromoteLoad(SDValue Op); 155 156 void ExtendSetCCUses(SmallVector<SDNode*, 4> SetCCs, 157 SDValue Trunc, SDValue ExtLoad, DebugLoc DL, 158 ISD::NodeType ExtType); 159 160 /// combine - call the node-specific routine that knows how to fold each 161 /// particular type of node. If that doesn't do anything, try the 162 /// target-specific DAG combines. 163 SDValue combine(SDNode *N); 164 165 // Visitation implementation - Implement dag node combining for different 166 // node types. The semantics are as follows: 167 // Return Value: 168 // SDValue.getNode() == 0 - No change was made 169 // SDValue.getNode() == N - N was replaced, is dead and has been handled. 170 // otherwise - N should be replaced by the returned Operand. 171 // 172 SDValue visitTokenFactor(SDNode *N); 173 SDValue visitMERGE_VALUES(SDNode *N); 174 SDValue visitADD(SDNode *N); 175 SDValue visitSUB(SDNode *N); 176 SDValue visitADDC(SDNode *N); 177 SDValue visitSUBC(SDNode *N); 178 SDValue visitADDE(SDNode *N); 179 SDValue visitSUBE(SDNode *N); 180 SDValue visitMUL(SDNode *N); 181 SDValue visitSDIV(SDNode *N); 182 SDValue visitUDIV(SDNode *N); 183 SDValue visitSREM(SDNode *N); 184 SDValue visitUREM(SDNode *N); 185 SDValue visitMULHU(SDNode *N); 186 SDValue visitMULHS(SDNode *N); 187 SDValue visitSMUL_LOHI(SDNode *N); 188 SDValue visitUMUL_LOHI(SDNode *N); 189 SDValue visitSMULO(SDNode *N); 190 SDValue visitUMULO(SDNode *N); 191 SDValue visitSDIVREM(SDNode *N); 192 SDValue visitUDIVREM(SDNode *N); 193 SDValue visitAND(SDNode *N); 194 SDValue visitOR(SDNode *N); 195 SDValue visitXOR(SDNode *N); 196 SDValue SimplifyVBinOp(SDNode *N); 197 SDValue visitSHL(SDNode *N); 198 SDValue visitSRA(SDNode *N); 199 SDValue visitSRL(SDNode *N); 200 SDValue visitCTLZ(SDNode *N); 201 SDValue visitCTLZ_ZERO_UNDEF(SDNode *N); 202 SDValue visitCTTZ(SDNode *N); 203 SDValue visitCTTZ_ZERO_UNDEF(SDNode *N); 204 SDValue visitCTPOP(SDNode *N); 205 SDValue visitSELECT(SDNode *N); 206 SDValue visitSELECT_CC(SDNode *N); 207 SDValue visitSETCC(SDNode *N); 208 SDValue visitSIGN_EXTEND(SDNode *N); 209 SDValue visitZERO_EXTEND(SDNode *N); 210 SDValue visitANY_EXTEND(SDNode *N); 211 SDValue visitSIGN_EXTEND_INREG(SDNode *N); 212 SDValue visitTRUNCATE(SDNode *N); 213 SDValue visitBITCAST(SDNode *N); 214 SDValue visitBUILD_PAIR(SDNode *N); 215 SDValue visitFADD(SDNode *N); 216 SDValue visitFSUB(SDNode *N); 217 SDValue visitFMUL(SDNode *N); 218 SDValue visitFMA(SDNode *N); 219 SDValue visitFDIV(SDNode *N); 220 SDValue visitFREM(SDNode *N); 221 SDValue visitFCOPYSIGN(SDNode *N); 222 SDValue visitSINT_TO_FP(SDNode *N); 223 SDValue visitUINT_TO_FP(SDNode *N); 224 SDValue visitFP_TO_SINT(SDNode *N); 225 SDValue visitFP_TO_UINT(SDNode *N); 226 SDValue visitFP_ROUND(SDNode *N); 227 SDValue visitFP_ROUND_INREG(SDNode *N); 228 SDValue visitFP_EXTEND(SDNode *N); 229 SDValue visitFNEG(SDNode *N); 230 SDValue visitFABS(SDNode *N); 231 SDValue visitFCEIL(SDNode *N); 232 SDValue visitFTRUNC(SDNode *N); 233 SDValue visitFFLOOR(SDNode *N); 234 SDValue visitBRCOND(SDNode *N); 235 SDValue visitBR_CC(SDNode *N); 236 SDValue visitLOAD(SDNode *N); 237 SDValue visitSTORE(SDNode *N); 238 SDValue visitINSERT_VECTOR_ELT(SDNode *N); 239 SDValue visitEXTRACT_VECTOR_ELT(SDNode *N); 240 SDValue visitBUILD_VECTOR(SDNode *N); 241 SDValue visitCONCAT_VECTORS(SDNode *N); 242 SDValue visitEXTRACT_SUBVECTOR(SDNode *N); 243 SDValue visitVECTOR_SHUFFLE(SDNode *N); 244 SDValue visitMEMBARRIER(SDNode *N); 245 246 SDValue XformToShuffleWithZero(SDNode *N); 247 SDValue ReassociateOps(unsigned Opc, DebugLoc DL, SDValue LHS, SDValue RHS); 248 249 SDValue visitShiftByConstant(SDNode *N, unsigned Amt); 250 251 bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS); 252 SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N); 253 SDValue SimplifySelect(DebugLoc DL, SDValue N0, SDValue N1, SDValue N2); 254 SDValue SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1, SDValue N2, 255 SDValue N3, ISD::CondCode CC, 256 bool NotExtCompare = false); 257 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 258 DebugLoc DL, bool foldBooleans = true); 259 SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 260 unsigned HiOp); 261 SDValue CombineConsecutiveLoads(SDNode *N, EVT VT); 262 SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT); 263 SDValue BuildSDIV(SDNode *N); 264 SDValue BuildUDIV(SDNode *N); 265 SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, 266 bool DemandHighBits = true); 267 SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1); 268 SDNode *MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL); 269 SDValue ReduceLoadWidth(SDNode *N); 270 SDValue ReduceLoadOpStoreWidth(SDNode *N); 271 SDValue TransformFPLoadStorePair(SDNode *N); 272 273 SDValue GetDemandedBits(SDValue V, const APInt &Mask); 274 275 /// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes, 276 /// looking for aliasing nodes and adding them to the Aliases vector. 277 void GatherAllAliases(SDNode *N, SDValue OriginalChain, 278 SmallVector<SDValue, 8> &Aliases); 279 280 /// isAlias - Return true if there is any possibility that the two addresses 281 /// overlap. 282 bool isAlias(SDValue Ptr1, int64_t Size1, 283 const Value *SrcValue1, int SrcValueOffset1, 284 unsigned SrcValueAlign1, 285 const MDNode *TBAAInfo1, 286 SDValue Ptr2, int64_t Size2, 287 const Value *SrcValue2, int SrcValueOffset2, 288 unsigned SrcValueAlign2, 289 const MDNode *TBAAInfo2) const; 290 291 /// FindAliasInfo - Extracts the relevant alias information from the memory 292 /// node. Returns true if the operand was a load. 293 bool FindAliasInfo(SDNode *N, 294 SDValue &Ptr, int64_t &Size, 295 const Value *&SrcValue, int &SrcValueOffset, 296 unsigned &SrcValueAlignment, 297 const MDNode *&TBAAInfo) const; 298 299 /// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, 300 /// looking for a better chain (aliasing node.) 301 SDValue FindBetterChain(SDNode *N, SDValue Chain); 302 303 public: 304 DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL) 305 : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes), 306 OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) {} 307 308 /// Run - runs the dag combiner on all nodes in the work list 309 void Run(CombineLevel AtLevel); 310 311 SelectionDAG &getDAG() const { return DAG; } 312 313 /// getShiftAmountTy - Returns a type large enough to hold any valid 314 /// shift amount - before type legalization these can be huge. 315 EVT getShiftAmountTy(EVT LHSTy) { 316 return LegalTypes ? TLI.getShiftAmountTy(LHSTy) : TLI.getPointerTy(); 317 } 318 319 /// isTypeLegal - This method returns true if we are running before type 320 /// legalization or if the specified VT is legal. 321 bool isTypeLegal(const EVT &VT) { 322 if (!LegalTypes) return true; 323 return TLI.isTypeLegal(VT); 324 } 325 }; 326} 327 328 329namespace { 330/// WorkListRemover - This class is a DAGUpdateListener that removes any deleted 331/// nodes from the worklist. 332class WorkListRemover : public SelectionDAG::DAGUpdateListener { 333 DAGCombiner &DC; 334public: 335 explicit WorkListRemover(DAGCombiner &dc) 336 : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {} 337 338 virtual void NodeDeleted(SDNode *N, SDNode *E) { 339 DC.removeFromWorkList(N); 340 } 341}; 342} 343 344//===----------------------------------------------------------------------===// 345// TargetLowering::DAGCombinerInfo implementation 346//===----------------------------------------------------------------------===// 347 348void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) { 349 ((DAGCombiner*)DC)->AddToWorkList(N); 350} 351 352void TargetLowering::DAGCombinerInfo::RemoveFromWorklist(SDNode *N) { 353 ((DAGCombiner*)DC)->removeFromWorkList(N); 354} 355 356SDValue TargetLowering::DAGCombinerInfo:: 357CombineTo(SDNode *N, const std::vector<SDValue> &To, bool AddTo) { 358 return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo); 359} 360 361SDValue TargetLowering::DAGCombinerInfo:: 362CombineTo(SDNode *N, SDValue Res, bool AddTo) { 363 return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo); 364} 365 366 367SDValue TargetLowering::DAGCombinerInfo:: 368CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) { 369 return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo); 370} 371 372void TargetLowering::DAGCombinerInfo:: 373CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { 374 return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO); 375} 376 377//===----------------------------------------------------------------------===// 378// Helper Functions 379//===----------------------------------------------------------------------===// 380 381/// isNegatibleForFree - Return 1 if we can compute the negated form of the 382/// specified expression for the same cost as the expression itself, or 2 if we 383/// can compute the negated form more cheaply than the expression itself. 384static char isNegatibleForFree(SDValue Op, bool LegalOperations, 385 const TargetLowering &TLI, 386 const TargetOptions *Options, 387 unsigned Depth = 0) { 388 // No compile time optimizations on this type. 389 if (Op.getValueType() == MVT::ppcf128) 390 return 0; 391 392 // fneg is removable even if it has multiple uses. 393 if (Op.getOpcode() == ISD::FNEG) return 2; 394 395 // Don't allow anything with multiple uses. 396 if (!Op.hasOneUse()) return 0; 397 398 // Don't recurse exponentially. 399 if (Depth > 6) return 0; 400 401 switch (Op.getOpcode()) { 402 default: return false; 403 case ISD::ConstantFP: 404 // Don't invert constant FP values after legalize. The negated constant 405 // isn't necessarily legal. 406 return LegalOperations ? 0 : 1; 407 case ISD::FADD: 408 // FIXME: determine better conditions for this xform. 409 if (!Options->UnsafeFPMath) return 0; 410 411 // After operation legalization, it might not be legal to create new FSUBs. 412 if (LegalOperations && 413 !TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType())) 414 return 0; 415 416 // fold (fsub (fadd A, B)) -> (fsub (fneg A), B) 417 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, 418 Options, Depth + 1)) 419 return V; 420 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 421 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, 422 Depth + 1); 423 case ISD::FSUB: 424 // We can't turn -(A-B) into B-A when we honor signed zeros. 425 if (!Options->UnsafeFPMath) return 0; 426 427 // fold (fneg (fsub A, B)) -> (fsub B, A) 428 return 1; 429 430 case ISD::FMUL: 431 case ISD::FDIV: 432 if (Options->HonorSignDependentRoundingFPMath()) return 0; 433 434 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y)) 435 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, 436 Options, Depth + 1)) 437 return V; 438 439 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, 440 Depth + 1); 441 442 case ISD::FP_EXTEND: 443 case ISD::FP_ROUND: 444 case ISD::FSIN: 445 return isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, Options, 446 Depth + 1); 447 } 448} 449 450/// GetNegatedExpression - If isNegatibleForFree returns true, this function 451/// returns the newly negated expression. 452static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG, 453 bool LegalOperations, unsigned Depth = 0) { 454 // fneg is removable even if it has multiple uses. 455 if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0); 456 457 // Don't allow anything with multiple uses. 458 assert(Op.hasOneUse() && "Unknown reuse!"); 459 460 assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree"); 461 switch (Op.getOpcode()) { 462 default: llvm_unreachable("Unknown code"); 463 case ISD::ConstantFP: { 464 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 465 V.changeSign(); 466 return DAG.getConstantFP(V, Op.getValueType()); 467 } 468 case ISD::FADD: 469 // FIXME: determine better conditions for this xform. 470 assert(DAG.getTarget().Options.UnsafeFPMath); 471 472 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 473 if (isNegatibleForFree(Op.getOperand(0), LegalOperations, 474 DAG.getTargetLoweringInfo(), 475 &DAG.getTarget().Options, Depth+1)) 476 return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(), 477 GetNegatedExpression(Op.getOperand(0), DAG, 478 LegalOperations, Depth+1), 479 Op.getOperand(1)); 480 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 481 return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(), 482 GetNegatedExpression(Op.getOperand(1), DAG, 483 LegalOperations, Depth+1), 484 Op.getOperand(0)); 485 case ISD::FSUB: 486 // We can't turn -(A-B) into B-A when we honor signed zeros. 487 assert(DAG.getTarget().Options.UnsafeFPMath); 488 489 // fold (fneg (fsub 0, B)) -> B 490 if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0))) 491 if (N0CFP->getValueAPF().isZero()) 492 return Op.getOperand(1); 493 494 // fold (fneg (fsub A, B)) -> (fsub B, A) 495 return DAG.getNode(ISD::FSUB, Op.getDebugLoc(), Op.getValueType(), 496 Op.getOperand(1), Op.getOperand(0)); 497 498 case ISD::FMUL: 499 case ISD::FDIV: 500 assert(!DAG.getTarget().Options.HonorSignDependentRoundingFPMath()); 501 502 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 503 if (isNegatibleForFree(Op.getOperand(0), LegalOperations, 504 DAG.getTargetLoweringInfo(), 505 &DAG.getTarget().Options, Depth+1)) 506 return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(), 507 GetNegatedExpression(Op.getOperand(0), DAG, 508 LegalOperations, Depth+1), 509 Op.getOperand(1)); 510 511 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 512 return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(), 513 Op.getOperand(0), 514 GetNegatedExpression(Op.getOperand(1), DAG, 515 LegalOperations, Depth+1)); 516 517 case ISD::FP_EXTEND: 518 case ISD::FSIN: 519 return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), Op.getValueType(), 520 GetNegatedExpression(Op.getOperand(0), DAG, 521 LegalOperations, Depth+1)); 522 case ISD::FP_ROUND: 523 return DAG.getNode(ISD::FP_ROUND, Op.getDebugLoc(), Op.getValueType(), 524 GetNegatedExpression(Op.getOperand(0), DAG, 525 LegalOperations, Depth+1), 526 Op.getOperand(1)); 527 } 528} 529 530 531// isSetCCEquivalent - Return true if this node is a setcc, or is a select_cc 532// that selects between the values 1 and 0, making it equivalent to a setcc. 533// Also, set the incoming LHS, RHS, and CC references to the appropriate 534// nodes based on the type of node we are checking. This simplifies life a 535// bit for the callers. 536static bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, 537 SDValue &CC) { 538 if (N.getOpcode() == ISD::SETCC) { 539 LHS = N.getOperand(0); 540 RHS = N.getOperand(1); 541 CC = N.getOperand(2); 542 return true; 543 } 544 if (N.getOpcode() == ISD::SELECT_CC && 545 N.getOperand(2).getOpcode() == ISD::Constant && 546 N.getOperand(3).getOpcode() == ISD::Constant && 547 cast<ConstantSDNode>(N.getOperand(2))->getAPIntValue() == 1 && 548 cast<ConstantSDNode>(N.getOperand(3))->isNullValue()) { 549 LHS = N.getOperand(0); 550 RHS = N.getOperand(1); 551 CC = N.getOperand(4); 552 return true; 553 } 554 return false; 555} 556 557// isOneUseSetCC - Return true if this is a SetCC-equivalent operation with only 558// one use. If this is true, it allows the users to invert the operation for 559// free when it is profitable to do so. 560static bool isOneUseSetCC(SDValue N) { 561 SDValue N0, N1, N2; 562 if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse()) 563 return true; 564 return false; 565} 566 567SDValue DAGCombiner::ReassociateOps(unsigned Opc, DebugLoc DL, 568 SDValue N0, SDValue N1) { 569 EVT VT = N0.getValueType(); 570 if (N0.getOpcode() == Opc && isa<ConstantSDNode>(N0.getOperand(1))) { 571 if (isa<ConstantSDNode>(N1)) { 572 // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2)) 573 SDValue OpNode = 574 DAG.FoldConstantArithmetic(Opc, VT, 575 cast<ConstantSDNode>(N0.getOperand(1)), 576 cast<ConstantSDNode>(N1)); 577 return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode); 578 } 579 if (N0.hasOneUse()) { 580 // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one use 581 SDValue OpNode = DAG.getNode(Opc, N0.getDebugLoc(), VT, 582 N0.getOperand(0), N1); 583 AddToWorkList(OpNode.getNode()); 584 return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1)); 585 } 586 } 587 588 if (N1.getOpcode() == Opc && isa<ConstantSDNode>(N1.getOperand(1))) { 589 if (isa<ConstantSDNode>(N0)) { 590 // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2)) 591 SDValue OpNode = 592 DAG.FoldConstantArithmetic(Opc, VT, 593 cast<ConstantSDNode>(N1.getOperand(1)), 594 cast<ConstantSDNode>(N0)); 595 return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode); 596 } 597 if (N1.hasOneUse()) { 598 // reassoc. (op y, (op x, c1)) -> (op (op x, y), c1) iff x+c1 has one use 599 SDValue OpNode = DAG.getNode(Opc, N0.getDebugLoc(), VT, 600 N1.getOperand(0), N0); 601 AddToWorkList(OpNode.getNode()); 602 return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1)); 603 } 604 } 605 606 return SDValue(); 607} 608 609SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, 610 bool AddTo) { 611 assert(N->getNumValues() == NumTo && "Broken CombineTo call!"); 612 ++NodesCombined; 613 DEBUG(dbgs() << "\nReplacing.1 "; 614 N->dump(&DAG); 615 dbgs() << "\nWith: "; 616 To[0].getNode()->dump(&DAG); 617 dbgs() << " and " << NumTo-1 << " other values\n"; 618 for (unsigned i = 0, e = NumTo; i != e; ++i) 619 assert((!To[i].getNode() || 620 N->getValueType(i) == To[i].getValueType()) && 621 "Cannot combine value to value of different type!")); 622 WorkListRemover DeadNodes(*this); 623 DAG.ReplaceAllUsesWith(N, To); 624 if (AddTo) { 625 // Push the new nodes and any users onto the worklist 626 for (unsigned i = 0, e = NumTo; i != e; ++i) { 627 if (To[i].getNode()) { 628 AddToWorkList(To[i].getNode()); 629 AddUsersToWorkList(To[i].getNode()); 630 } 631 } 632 } 633 634 // Finally, if the node is now dead, remove it from the graph. The node 635 // may not be dead if the replacement process recursively simplified to 636 // something else needing this node. 637 if (N->use_empty()) { 638 // Nodes can be reintroduced into the worklist. Make sure we do not 639 // process a node that has been replaced. 640 removeFromWorkList(N); 641 642 // Finally, since the node is now dead, remove it from the graph. 643 DAG.DeleteNode(N); 644 } 645 return SDValue(N, 0); 646} 647 648void DAGCombiner:: 649CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { 650 // Replace all uses. If any nodes become isomorphic to other nodes and 651 // are deleted, make sure to remove them from our worklist. 652 WorkListRemover DeadNodes(*this); 653 DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New); 654 655 // Push the new node and any (possibly new) users onto the worklist. 656 AddToWorkList(TLO.New.getNode()); 657 AddUsersToWorkList(TLO.New.getNode()); 658 659 // Finally, if the node is now dead, remove it from the graph. The node 660 // may not be dead if the replacement process recursively simplified to 661 // something else needing this node. 662 if (TLO.Old.getNode()->use_empty()) { 663 removeFromWorkList(TLO.Old.getNode()); 664 665 // If the operands of this node are only used by the node, they will now 666 // be dead. Make sure to visit them first to delete dead nodes early. 667 for (unsigned i = 0, e = TLO.Old.getNode()->getNumOperands(); i != e; ++i) 668 if (TLO.Old.getNode()->getOperand(i).getNode()->hasOneUse()) 669 AddToWorkList(TLO.Old.getNode()->getOperand(i).getNode()); 670 671 DAG.DeleteNode(TLO.Old.getNode()); 672 } 673} 674 675/// SimplifyDemandedBits - Check the specified integer node value to see if 676/// it can be simplified or if things it uses can be simplified by bit 677/// propagation. If so, return true. 678bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) { 679 TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations); 680 APInt KnownZero, KnownOne; 681 if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) 682 return false; 683 684 // Revisit the node. 685 AddToWorkList(Op.getNode()); 686 687 // Replace the old value with the new one. 688 ++NodesCombined; 689 DEBUG(dbgs() << "\nReplacing.2 "; 690 TLO.Old.getNode()->dump(&DAG); 691 dbgs() << "\nWith: "; 692 TLO.New.getNode()->dump(&DAG); 693 dbgs() << '\n'); 694 695 CommitTargetLoweringOpt(TLO); 696 return true; 697} 698 699void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) { 700 DebugLoc dl = Load->getDebugLoc(); 701 EVT VT = Load->getValueType(0); 702 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, SDValue(ExtLoad, 0)); 703 704 DEBUG(dbgs() << "\nReplacing.9 "; 705 Load->dump(&DAG); 706 dbgs() << "\nWith: "; 707 Trunc.getNode()->dump(&DAG); 708 dbgs() << '\n'); 709 WorkListRemover DeadNodes(*this); 710 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc); 711 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1)); 712 removeFromWorkList(Load); 713 DAG.DeleteNode(Load); 714 AddToWorkList(Trunc.getNode()); 715} 716 717SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) { 718 Replace = false; 719 DebugLoc dl = Op.getDebugLoc(); 720 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 721 EVT MemVT = LD->getMemoryVT(); 722 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) 723 ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD 724 : ISD::EXTLOAD) 725 : LD->getExtensionType(); 726 Replace = true; 727 return DAG.getExtLoad(ExtType, dl, PVT, 728 LD->getChain(), LD->getBasePtr(), 729 LD->getPointerInfo(), 730 MemVT, LD->isVolatile(), 731 LD->isNonTemporal(), LD->getAlignment()); 732 } 733 734 unsigned Opc = Op.getOpcode(); 735 switch (Opc) { 736 default: break; 737 case ISD::AssertSext: 738 return DAG.getNode(ISD::AssertSext, dl, PVT, 739 SExtPromoteOperand(Op.getOperand(0), PVT), 740 Op.getOperand(1)); 741 case ISD::AssertZext: 742 return DAG.getNode(ISD::AssertZext, dl, PVT, 743 ZExtPromoteOperand(Op.getOperand(0), PVT), 744 Op.getOperand(1)); 745 case ISD::Constant: { 746 unsigned ExtOpc = 747 Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 748 return DAG.getNode(ExtOpc, dl, PVT, Op); 749 } 750 } 751 752 if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT)) 753 return SDValue(); 754 return DAG.getNode(ISD::ANY_EXTEND, dl, PVT, Op); 755} 756 757SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) { 758 if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT)) 759 return SDValue(); 760 EVT OldVT = Op.getValueType(); 761 DebugLoc dl = Op.getDebugLoc(); 762 bool Replace = false; 763 SDValue NewOp = PromoteOperand(Op, PVT, Replace); 764 if (NewOp.getNode() == 0) 765 return SDValue(); 766 AddToWorkList(NewOp.getNode()); 767 768 if (Replace) 769 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); 770 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, NewOp.getValueType(), NewOp, 771 DAG.getValueType(OldVT)); 772} 773 774SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) { 775 EVT OldVT = Op.getValueType(); 776 DebugLoc dl = Op.getDebugLoc(); 777 bool Replace = false; 778 SDValue NewOp = PromoteOperand(Op, PVT, Replace); 779 if (NewOp.getNode() == 0) 780 return SDValue(); 781 AddToWorkList(NewOp.getNode()); 782 783 if (Replace) 784 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); 785 return DAG.getZeroExtendInReg(NewOp, dl, OldVT); 786} 787 788/// PromoteIntBinOp - Promote the specified integer binary operation if the 789/// target indicates it is beneficial. e.g. On x86, it's usually better to 790/// promote i16 operations to i32 since i16 instructions are longer. 791SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) { 792 if (!LegalOperations) 793 return SDValue(); 794 795 EVT VT = Op.getValueType(); 796 if (VT.isVector() || !VT.isInteger()) 797 return SDValue(); 798 799 // If operation type is 'undesirable', e.g. i16 on x86, consider 800 // promoting it. 801 unsigned Opc = Op.getOpcode(); 802 if (TLI.isTypeDesirableForOp(Opc, VT)) 803 return SDValue(); 804 805 EVT PVT = VT; 806 // Consult target whether it is a good idea to promote this operation and 807 // what's the right type to promote it to. 808 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 809 assert(PVT != VT && "Don't know what type to promote to!"); 810 811 bool Replace0 = false; 812 SDValue N0 = Op.getOperand(0); 813 SDValue NN0 = PromoteOperand(N0, PVT, Replace0); 814 if (NN0.getNode() == 0) 815 return SDValue(); 816 817 bool Replace1 = false; 818 SDValue N1 = Op.getOperand(1); 819 SDValue NN1; 820 if (N0 == N1) 821 NN1 = NN0; 822 else { 823 NN1 = PromoteOperand(N1, PVT, Replace1); 824 if (NN1.getNode() == 0) 825 return SDValue(); 826 } 827 828 AddToWorkList(NN0.getNode()); 829 if (NN1.getNode()) 830 AddToWorkList(NN1.getNode()); 831 832 if (Replace0) 833 ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode()); 834 if (Replace1) 835 ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode()); 836 837 DEBUG(dbgs() << "\nPromoting "; 838 Op.getNode()->dump(&DAG)); 839 DebugLoc dl = Op.getDebugLoc(); 840 return DAG.getNode(ISD::TRUNCATE, dl, VT, 841 DAG.getNode(Opc, dl, PVT, NN0, NN1)); 842 } 843 return SDValue(); 844} 845 846/// PromoteIntShiftOp - Promote the specified integer shift operation if the 847/// target indicates it is beneficial. e.g. On x86, it's usually better to 848/// promote i16 operations to i32 since i16 instructions are longer. 849SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) { 850 if (!LegalOperations) 851 return SDValue(); 852 853 EVT VT = Op.getValueType(); 854 if (VT.isVector() || !VT.isInteger()) 855 return SDValue(); 856 857 // If operation type is 'undesirable', e.g. i16 on x86, consider 858 // promoting it. 859 unsigned Opc = Op.getOpcode(); 860 if (TLI.isTypeDesirableForOp(Opc, VT)) 861 return SDValue(); 862 863 EVT PVT = VT; 864 // Consult target whether it is a good idea to promote this operation and 865 // what's the right type to promote it to. 866 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 867 assert(PVT != VT && "Don't know what type to promote to!"); 868 869 bool Replace = false; 870 SDValue N0 = Op.getOperand(0); 871 if (Opc == ISD::SRA) 872 N0 = SExtPromoteOperand(Op.getOperand(0), PVT); 873 else if (Opc == ISD::SRL) 874 N0 = ZExtPromoteOperand(Op.getOperand(0), PVT); 875 else 876 N0 = PromoteOperand(N0, PVT, Replace); 877 if (N0.getNode() == 0) 878 return SDValue(); 879 880 AddToWorkList(N0.getNode()); 881 if (Replace) 882 ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode()); 883 884 DEBUG(dbgs() << "\nPromoting "; 885 Op.getNode()->dump(&DAG)); 886 DebugLoc dl = Op.getDebugLoc(); 887 return DAG.getNode(ISD::TRUNCATE, dl, VT, 888 DAG.getNode(Opc, dl, PVT, N0, Op.getOperand(1))); 889 } 890 return SDValue(); 891} 892 893SDValue DAGCombiner::PromoteExtend(SDValue Op) { 894 if (!LegalOperations) 895 return SDValue(); 896 897 EVT VT = Op.getValueType(); 898 if (VT.isVector() || !VT.isInteger()) 899 return SDValue(); 900 901 // If operation type is 'undesirable', e.g. i16 on x86, consider 902 // promoting it. 903 unsigned Opc = Op.getOpcode(); 904 if (TLI.isTypeDesirableForOp(Opc, VT)) 905 return SDValue(); 906 907 EVT PVT = VT; 908 // Consult target whether it is a good idea to promote this operation and 909 // what's the right type to promote it to. 910 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 911 assert(PVT != VT && "Don't know what type to promote to!"); 912 // fold (aext (aext x)) -> (aext x) 913 // fold (aext (zext x)) -> (zext x) 914 // fold (aext (sext x)) -> (sext x) 915 DEBUG(dbgs() << "\nPromoting "; 916 Op.getNode()->dump(&DAG)); 917 return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), VT, Op.getOperand(0)); 918 } 919 return SDValue(); 920} 921 922bool DAGCombiner::PromoteLoad(SDValue Op) { 923 if (!LegalOperations) 924 return false; 925 926 EVT VT = Op.getValueType(); 927 if (VT.isVector() || !VT.isInteger()) 928 return false; 929 930 // If operation type is 'undesirable', e.g. i16 on x86, consider 931 // promoting it. 932 unsigned Opc = Op.getOpcode(); 933 if (TLI.isTypeDesirableForOp(Opc, VT)) 934 return false; 935 936 EVT PVT = VT; 937 // Consult target whether it is a good idea to promote this operation and 938 // what's the right type to promote it to. 939 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 940 assert(PVT != VT && "Don't know what type to promote to!"); 941 942 DebugLoc dl = Op.getDebugLoc(); 943 SDNode *N = Op.getNode(); 944 LoadSDNode *LD = cast<LoadSDNode>(N); 945 EVT MemVT = LD->getMemoryVT(); 946 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) 947 ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD 948 : ISD::EXTLOAD) 949 : LD->getExtensionType(); 950 SDValue NewLD = DAG.getExtLoad(ExtType, dl, PVT, 951 LD->getChain(), LD->getBasePtr(), 952 LD->getPointerInfo(), 953 MemVT, LD->isVolatile(), 954 LD->isNonTemporal(), LD->getAlignment()); 955 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, VT, NewLD); 956 957 DEBUG(dbgs() << "\nPromoting "; 958 N->dump(&DAG); 959 dbgs() << "\nTo: "; 960 Result.getNode()->dump(&DAG); 961 dbgs() << '\n'); 962 WorkListRemover DeadNodes(*this); 963 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 964 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1)); 965 removeFromWorkList(N); 966 DAG.DeleteNode(N); 967 AddToWorkList(Result.getNode()); 968 return true; 969 } 970 return false; 971} 972 973 974//===----------------------------------------------------------------------===// 975// Main DAG Combiner implementation 976//===----------------------------------------------------------------------===// 977 978void DAGCombiner::Run(CombineLevel AtLevel) { 979 // set the instance variables, so that the various visit routines may use it. 980 Level = AtLevel; 981 LegalOperations = Level >= AfterLegalizeVectorOps; 982 LegalTypes = Level >= AfterLegalizeTypes; 983 984 // Add all the dag nodes to the worklist. 985 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 986 E = DAG.allnodes_end(); I != E; ++I) 987 AddToWorkList(I); 988 989 // Create a dummy node (which is not added to allnodes), that adds a reference 990 // to the root node, preventing it from being deleted, and tracking any 991 // changes of the root. 992 HandleSDNode Dummy(DAG.getRoot()); 993 994 // The root of the dag may dangle to deleted nodes until the dag combiner is 995 // done. Set it to null to avoid confusion. 996 DAG.setRoot(SDValue()); 997 998 // while the worklist isn't empty, find a node and 999 // try and combine it. 1000 while (!WorkListContents.empty()) { 1001 SDNode *N; 1002 // The WorkListOrder holds the SDNodes in order, but it may contain duplicates. 1003 // In order to avoid a linear scan, we use a set (O(log N)) to hold what the 1004 // worklist *should* contain, and check the node we want to visit is should 1005 // actually be visited. 1006 do { 1007 N = WorkListOrder.pop_back_val(); 1008 } while (!WorkListContents.erase(N)); 1009 1010 // If N has no uses, it is dead. Make sure to revisit all N's operands once 1011 // N is deleted from the DAG, since they too may now be dead or may have a 1012 // reduced number of uses, allowing other xforms. 1013 if (N->use_empty() && N != &Dummy) { 1014 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1015 AddToWorkList(N->getOperand(i).getNode()); 1016 1017 DAG.DeleteNode(N); 1018 continue; 1019 } 1020 1021 SDValue RV = combine(N); 1022 1023 if (RV.getNode() == 0) 1024 continue; 1025 1026 ++NodesCombined; 1027 1028 // If we get back the same node we passed in, rather than a new node or 1029 // zero, we know that the node must have defined multiple values and 1030 // CombineTo was used. Since CombineTo takes care of the worklist 1031 // mechanics for us, we have no work to do in this case. 1032 if (RV.getNode() == N) 1033 continue; 1034 1035 assert(N->getOpcode() != ISD::DELETED_NODE && 1036 RV.getNode()->getOpcode() != ISD::DELETED_NODE && 1037 "Node was deleted but visit returned new node!"); 1038 1039 DEBUG(dbgs() << "\nReplacing.3 "; 1040 N->dump(&DAG); 1041 dbgs() << "\nWith: "; 1042 RV.getNode()->dump(&DAG); 1043 dbgs() << '\n'); 1044 1045 // Transfer debug value. 1046 DAG.TransferDbgValues(SDValue(N, 0), RV); 1047 WorkListRemover DeadNodes(*this); 1048 if (N->getNumValues() == RV.getNode()->getNumValues()) 1049 DAG.ReplaceAllUsesWith(N, RV.getNode()); 1050 else { 1051 assert(N->getValueType(0) == RV.getValueType() && 1052 N->getNumValues() == 1 && "Type mismatch"); 1053 SDValue OpV = RV; 1054 DAG.ReplaceAllUsesWith(N, &OpV); 1055 } 1056 1057 // Push the new node and any users onto the worklist 1058 AddToWorkList(RV.getNode()); 1059 AddUsersToWorkList(RV.getNode()); 1060 1061 // Add any uses of the old node to the worklist in case this node is the 1062 // last one that uses them. They may become dead after this node is 1063 // deleted. 1064 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1065 AddToWorkList(N->getOperand(i).getNode()); 1066 1067 // Finally, if the node is now dead, remove it from the graph. The node 1068 // may not be dead if the replacement process recursively simplified to 1069 // something else needing this node. 1070 if (N->use_empty()) { 1071 // Nodes can be reintroduced into the worklist. Make sure we do not 1072 // process a node that has been replaced. 1073 removeFromWorkList(N); 1074 1075 // Finally, since the node is now dead, remove it from the graph. 1076 DAG.DeleteNode(N); 1077 } 1078 } 1079 1080 // If the root changed (e.g. it was a dead load, update the root). 1081 DAG.setRoot(Dummy.getValue()); 1082 DAG.RemoveDeadNodes(); 1083} 1084 1085SDValue DAGCombiner::visit(SDNode *N) { 1086 switch (N->getOpcode()) { 1087 default: break; 1088 case ISD::TokenFactor: return visitTokenFactor(N); 1089 case ISD::MERGE_VALUES: return visitMERGE_VALUES(N); 1090 case ISD::ADD: return visitADD(N); 1091 case ISD::SUB: return visitSUB(N); 1092 case ISD::ADDC: return visitADDC(N); 1093 case ISD::SUBC: return visitSUBC(N); 1094 case ISD::ADDE: return visitADDE(N); 1095 case ISD::SUBE: return visitSUBE(N); 1096 case ISD::MUL: return visitMUL(N); 1097 case ISD::SDIV: return visitSDIV(N); 1098 case ISD::UDIV: return visitUDIV(N); 1099 case ISD::SREM: return visitSREM(N); 1100 case ISD::UREM: return visitUREM(N); 1101 case ISD::MULHU: return visitMULHU(N); 1102 case ISD::MULHS: return visitMULHS(N); 1103 case ISD::SMUL_LOHI: return visitSMUL_LOHI(N); 1104 case ISD::UMUL_LOHI: return visitUMUL_LOHI(N); 1105 case ISD::SMULO: return visitSMULO(N); 1106 case ISD::UMULO: return visitUMULO(N); 1107 case ISD::SDIVREM: return visitSDIVREM(N); 1108 case ISD::UDIVREM: return visitUDIVREM(N); 1109 case ISD::AND: return visitAND(N); 1110 case ISD::OR: return visitOR(N); 1111 case ISD::XOR: return visitXOR(N); 1112 case ISD::SHL: return visitSHL(N); 1113 case ISD::SRA: return visitSRA(N); 1114 case ISD::SRL: return visitSRL(N); 1115 case ISD::CTLZ: return visitCTLZ(N); 1116 case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N); 1117 case ISD::CTTZ: return visitCTTZ(N); 1118 case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N); 1119 case ISD::CTPOP: return visitCTPOP(N); 1120 case ISD::SELECT: return visitSELECT(N); 1121 case ISD::SELECT_CC: return visitSELECT_CC(N); 1122 case ISD::SETCC: return visitSETCC(N); 1123 case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N); 1124 case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N); 1125 case ISD::ANY_EXTEND: return visitANY_EXTEND(N); 1126 case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N); 1127 case ISD::TRUNCATE: return visitTRUNCATE(N); 1128 case ISD::BITCAST: return visitBITCAST(N); 1129 case ISD::BUILD_PAIR: return visitBUILD_PAIR(N); 1130 case ISD::FADD: return visitFADD(N); 1131 case ISD::FSUB: return visitFSUB(N); 1132 case ISD::FMUL: return visitFMUL(N); 1133 case ISD::FMA: return visitFMA(N); 1134 case ISD::FDIV: return visitFDIV(N); 1135 case ISD::FREM: return visitFREM(N); 1136 case ISD::FCOPYSIGN: return visitFCOPYSIGN(N); 1137 case ISD::SINT_TO_FP: return visitSINT_TO_FP(N); 1138 case ISD::UINT_TO_FP: return visitUINT_TO_FP(N); 1139 case ISD::FP_TO_SINT: return visitFP_TO_SINT(N); 1140 case ISD::FP_TO_UINT: return visitFP_TO_UINT(N); 1141 case ISD::FP_ROUND: return visitFP_ROUND(N); 1142 case ISD::FP_ROUND_INREG: return visitFP_ROUND_INREG(N); 1143 case ISD::FP_EXTEND: return visitFP_EXTEND(N); 1144 case ISD::FNEG: return visitFNEG(N); 1145 case ISD::FABS: return visitFABS(N); 1146 case ISD::FFLOOR: return visitFFLOOR(N); 1147 case ISD::FCEIL: return visitFCEIL(N); 1148 case ISD::FTRUNC: return visitFTRUNC(N); 1149 case ISD::BRCOND: return visitBRCOND(N); 1150 case ISD::BR_CC: return visitBR_CC(N); 1151 case ISD::LOAD: return visitLOAD(N); 1152 case ISD::STORE: return visitSTORE(N); 1153 case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N); 1154 case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N); 1155 case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N); 1156 case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N); 1157 case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N); 1158 case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N); 1159 case ISD::MEMBARRIER: return visitMEMBARRIER(N); 1160 } 1161 return SDValue(); 1162} 1163 1164SDValue DAGCombiner::combine(SDNode *N) { 1165 SDValue RV = visit(N); 1166 1167 // If nothing happened, try a target-specific DAG combine. 1168 if (RV.getNode() == 0) { 1169 assert(N->getOpcode() != ISD::DELETED_NODE && 1170 "Node was deleted but visit returned NULL!"); 1171 1172 if (N->getOpcode() >= ISD::BUILTIN_OP_END || 1173 TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) { 1174 1175 // Expose the DAG combiner to the target combiner impls. 1176 TargetLowering::DAGCombinerInfo 1177 DagCombineInfo(DAG, !LegalTypes, !LegalOperations, false, this); 1178 1179 RV = TLI.PerformDAGCombine(N, DagCombineInfo); 1180 } 1181 } 1182 1183 // If nothing happened still, try promoting the operation. 1184 if (RV.getNode() == 0) { 1185 switch (N->getOpcode()) { 1186 default: break; 1187 case ISD::ADD: 1188 case ISD::SUB: 1189 case ISD::MUL: 1190 case ISD::AND: 1191 case ISD::OR: 1192 case ISD::XOR: 1193 RV = PromoteIntBinOp(SDValue(N, 0)); 1194 break; 1195 case ISD::SHL: 1196 case ISD::SRA: 1197 case ISD::SRL: 1198 RV = PromoteIntShiftOp(SDValue(N, 0)); 1199 break; 1200 case ISD::SIGN_EXTEND: 1201 case ISD::ZERO_EXTEND: 1202 case ISD::ANY_EXTEND: 1203 RV = PromoteExtend(SDValue(N, 0)); 1204 break; 1205 case ISD::LOAD: 1206 if (PromoteLoad(SDValue(N, 0))) 1207 RV = SDValue(N, 0); 1208 break; 1209 } 1210 } 1211 1212 // If N is a commutative binary node, try commuting it to enable more 1213 // sdisel CSE. 1214 if (RV.getNode() == 0 && 1215 SelectionDAG::isCommutativeBinOp(N->getOpcode()) && 1216 N->getNumValues() == 1) { 1217 SDValue N0 = N->getOperand(0); 1218 SDValue N1 = N->getOperand(1); 1219 1220 // Constant operands are canonicalized to RHS. 1221 if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) { 1222 SDValue Ops[] = { N1, N0 }; 1223 SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), 1224 Ops, 2); 1225 if (CSENode) 1226 return SDValue(CSENode, 0); 1227 } 1228 } 1229 1230 return RV; 1231} 1232 1233/// getInputChainForNode - Given a node, return its input chain if it has one, 1234/// otherwise return a null sd operand. 1235static SDValue getInputChainForNode(SDNode *N) { 1236 if (unsigned NumOps = N->getNumOperands()) { 1237 if (N->getOperand(0).getValueType() == MVT::Other) 1238 return N->getOperand(0); 1239 else if (N->getOperand(NumOps-1).getValueType() == MVT::Other) 1240 return N->getOperand(NumOps-1); 1241 for (unsigned i = 1; i < NumOps-1; ++i) 1242 if (N->getOperand(i).getValueType() == MVT::Other) 1243 return N->getOperand(i); 1244 } 1245 return SDValue(); 1246} 1247 1248SDValue DAGCombiner::visitTokenFactor(SDNode *N) { 1249 // If N has two operands, where one has an input chain equal to the other, 1250 // the 'other' chain is redundant. 1251 if (N->getNumOperands() == 2) { 1252 if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1)) 1253 return N->getOperand(0); 1254 if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0)) 1255 return N->getOperand(1); 1256 } 1257 1258 SmallVector<SDNode *, 8> TFs; // List of token factors to visit. 1259 SmallVector<SDValue, 8> Ops; // Ops for replacing token factor. 1260 SmallPtrSet<SDNode*, 16> SeenOps; 1261 bool Changed = false; // If we should replace this token factor. 1262 1263 // Start out with this token factor. 1264 TFs.push_back(N); 1265 1266 // Iterate through token factors. The TFs grows when new token factors are 1267 // encountered. 1268 for (unsigned i = 0; i < TFs.size(); ++i) { 1269 SDNode *TF = TFs[i]; 1270 1271 // Check each of the operands. 1272 for (unsigned i = 0, ie = TF->getNumOperands(); i != ie; ++i) { 1273 SDValue Op = TF->getOperand(i); 1274 1275 switch (Op.getOpcode()) { 1276 case ISD::EntryToken: 1277 // Entry tokens don't need to be added to the list. They are 1278 // rededundant. 1279 Changed = true; 1280 break; 1281 1282 case ISD::TokenFactor: 1283 if (Op.hasOneUse() && 1284 std::find(TFs.begin(), TFs.end(), Op.getNode()) == TFs.end()) { 1285 // Queue up for processing. 1286 TFs.push_back(Op.getNode()); 1287 // Clean up in case the token factor is removed. 1288 AddToWorkList(Op.getNode()); 1289 Changed = true; 1290 break; 1291 } 1292 // Fall thru 1293 1294 default: 1295 // Only add if it isn't already in the list. 1296 if (SeenOps.insert(Op.getNode())) 1297 Ops.push_back(Op); 1298 else 1299 Changed = true; 1300 break; 1301 } 1302 } 1303 } 1304 1305 SDValue Result; 1306 1307 // If we've change things around then replace token factor. 1308 if (Changed) { 1309 if (Ops.empty()) { 1310 // The entry token is the only possible outcome. 1311 Result = DAG.getEntryNode(); 1312 } else { 1313 // New and improved token factor. 1314 Result = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), 1315 MVT::Other, &Ops[0], Ops.size()); 1316 } 1317 1318 // Don't add users to work list. 1319 return CombineTo(N, Result, false); 1320 } 1321 1322 return Result; 1323} 1324 1325/// MERGE_VALUES can always be eliminated. 1326SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) { 1327 WorkListRemover DeadNodes(*this); 1328 // Replacing results may cause a different MERGE_VALUES to suddenly 1329 // be CSE'd with N, and carry its uses with it. Iterate until no 1330 // uses remain, to ensure that the node can be safely deleted. 1331 // First add the users of this node to the work list so that they 1332 // can be tried again once they have new operands. 1333 AddUsersToWorkList(N); 1334 do { 1335 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1336 DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i)); 1337 } while (!N->use_empty()); 1338 removeFromWorkList(N); 1339 DAG.DeleteNode(N); 1340 return SDValue(N, 0); // Return N so it doesn't get rechecked! 1341} 1342 1343static 1344SDValue combineShlAddConstant(DebugLoc DL, SDValue N0, SDValue N1, 1345 SelectionDAG &DAG) { 1346 EVT VT = N0.getValueType(); 1347 SDValue N00 = N0.getOperand(0); 1348 SDValue N01 = N0.getOperand(1); 1349 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N01); 1350 1351 if (N01C && N00.getOpcode() == ISD::ADD && N00.getNode()->hasOneUse() && 1352 isa<ConstantSDNode>(N00.getOperand(1))) { 1353 // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), ) 1354 N0 = DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT, 1355 DAG.getNode(ISD::SHL, N00.getDebugLoc(), VT, 1356 N00.getOperand(0), N01), 1357 DAG.getNode(ISD::SHL, N01.getDebugLoc(), VT, 1358 N00.getOperand(1), N01)); 1359 return DAG.getNode(ISD::ADD, DL, VT, N0, N1); 1360 } 1361 1362 return SDValue(); 1363} 1364 1365SDValue DAGCombiner::visitADD(SDNode *N) { 1366 SDValue N0 = N->getOperand(0); 1367 SDValue N1 = N->getOperand(1); 1368 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1369 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1370 EVT VT = N0.getValueType(); 1371 1372 // fold vector ops 1373 if (VT.isVector()) { 1374 SDValue FoldedVOp = SimplifyVBinOp(N); 1375 if (FoldedVOp.getNode()) return FoldedVOp; 1376 } 1377 1378 // fold (add x, undef) -> undef 1379 if (N0.getOpcode() == ISD::UNDEF) 1380 return N0; 1381 if (N1.getOpcode() == ISD::UNDEF) 1382 return N1; 1383 // fold (add c1, c2) -> c1+c2 1384 if (N0C && N1C) 1385 return DAG.FoldConstantArithmetic(ISD::ADD, VT, N0C, N1C); 1386 // canonicalize constant to RHS 1387 if (N0C && !N1C) 1388 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, N0); 1389 // fold (add x, 0) -> x 1390 if (N1C && N1C->isNullValue()) 1391 return N0; 1392 // fold (add Sym, c) -> Sym+c 1393 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0)) 1394 if (!LegalOperations && TLI.isOffsetFoldingLegal(GA) && N1C && 1395 GA->getOpcode() == ISD::GlobalAddress) 1396 return DAG.getGlobalAddress(GA->getGlobal(), N1C->getDebugLoc(), VT, 1397 GA->getOffset() + 1398 (uint64_t)N1C->getSExtValue()); 1399 // fold ((c1-A)+c2) -> (c1+c2)-A 1400 if (N1C && N0.getOpcode() == ISD::SUB) 1401 if (ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) 1402 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1403 DAG.getConstant(N1C->getAPIntValue()+ 1404 N0C->getAPIntValue(), VT), 1405 N0.getOperand(1)); 1406 // reassociate add 1407 SDValue RADD = ReassociateOps(ISD::ADD, N->getDebugLoc(), N0, N1); 1408 if (RADD.getNode() != 0) 1409 return RADD; 1410 // fold ((0-A) + B) -> B-A 1411 if (N0.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N0.getOperand(0)) && 1412 cast<ConstantSDNode>(N0.getOperand(0))->isNullValue()) 1413 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1, N0.getOperand(1)); 1414 // fold (A + (0-B)) -> A-B 1415 if (N1.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N1.getOperand(0)) && 1416 cast<ConstantSDNode>(N1.getOperand(0))->isNullValue()) 1417 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, N1.getOperand(1)); 1418 // fold (A+(B-A)) -> B 1419 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1)) 1420 return N1.getOperand(0); 1421 // fold ((B-A)+A) -> B 1422 if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1)) 1423 return N0.getOperand(0); 1424 // fold (A+(B-(A+C))) to (B-C) 1425 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD && 1426 N0 == N1.getOperand(1).getOperand(0)) 1427 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1.getOperand(0), 1428 N1.getOperand(1).getOperand(1)); 1429 // fold (A+(B-(C+A))) to (B-C) 1430 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD && 1431 N0 == N1.getOperand(1).getOperand(1)) 1432 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1.getOperand(0), 1433 N1.getOperand(1).getOperand(0)); 1434 // fold (A+((B-A)+or-C)) to (B+or-C) 1435 if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) && 1436 N1.getOperand(0).getOpcode() == ISD::SUB && 1437 N0 == N1.getOperand(0).getOperand(1)) 1438 return DAG.getNode(N1.getOpcode(), N->getDebugLoc(), VT, 1439 N1.getOperand(0).getOperand(0), N1.getOperand(1)); 1440 1441 // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant 1442 if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) { 1443 SDValue N00 = N0.getOperand(0); 1444 SDValue N01 = N0.getOperand(1); 1445 SDValue N10 = N1.getOperand(0); 1446 SDValue N11 = N1.getOperand(1); 1447 1448 if (isa<ConstantSDNode>(N00) || isa<ConstantSDNode>(N10)) 1449 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1450 DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT, N00, N10), 1451 DAG.getNode(ISD::ADD, N1.getDebugLoc(), VT, N01, N11)); 1452 } 1453 1454 if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0))) 1455 return SDValue(N, 0); 1456 1457 // fold (a+b) -> (a|b) iff a and b share no bits. 1458 if (VT.isInteger() && !VT.isVector()) { 1459 APInt LHSZero, LHSOne; 1460 APInt RHSZero, RHSOne; 1461 DAG.ComputeMaskedBits(N0, LHSZero, LHSOne); 1462 1463 if (LHSZero.getBoolValue()) { 1464 DAG.ComputeMaskedBits(N1, RHSZero, RHSOne); 1465 1466 // If all possibly-set bits on the LHS are clear on the RHS, return an OR. 1467 // If all possibly-set bits on the RHS are clear on the LHS, return an OR. 1468 if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero) 1469 return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1); 1470 } 1471 } 1472 1473 // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), ) 1474 if (N0.getOpcode() == ISD::SHL && N0.getNode()->hasOneUse()) { 1475 SDValue Result = combineShlAddConstant(N->getDebugLoc(), N0, N1, DAG); 1476 if (Result.getNode()) return Result; 1477 } 1478 if (N1.getOpcode() == ISD::SHL && N1.getNode()->hasOneUse()) { 1479 SDValue Result = combineShlAddConstant(N->getDebugLoc(), N1, N0, DAG); 1480 if (Result.getNode()) return Result; 1481 } 1482 1483 // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n)) 1484 if (N1.getOpcode() == ISD::SHL && 1485 N1.getOperand(0).getOpcode() == ISD::SUB) 1486 if (ConstantSDNode *C = 1487 dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(0))) 1488 if (C->getAPIntValue() == 0) 1489 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, 1490 DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, 1491 N1.getOperand(0).getOperand(1), 1492 N1.getOperand(1))); 1493 if (N0.getOpcode() == ISD::SHL && 1494 N0.getOperand(0).getOpcode() == ISD::SUB) 1495 if (ConstantSDNode *C = 1496 dyn_cast<ConstantSDNode>(N0.getOperand(0).getOperand(0))) 1497 if (C->getAPIntValue() == 0) 1498 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N1, 1499 DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, 1500 N0.getOperand(0).getOperand(1), 1501 N0.getOperand(1))); 1502 1503 if (N1.getOpcode() == ISD::AND) { 1504 SDValue AndOp0 = N1.getOperand(0); 1505 ConstantSDNode *AndOp1 = dyn_cast<ConstantSDNode>(N1->getOperand(1)); 1506 unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0); 1507 unsigned DestBits = VT.getScalarType().getSizeInBits(); 1508 1509 // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x)) 1510 // and similar xforms where the inner op is either ~0 or 0. 1511 if (NumSignBits == DestBits && AndOp1 && AndOp1->isOne()) { 1512 DebugLoc DL = N->getDebugLoc(); 1513 return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0); 1514 } 1515 } 1516 1517 // add (sext i1), X -> sub X, (zext i1) 1518 if (N0.getOpcode() == ISD::SIGN_EXTEND && 1519 N0.getOperand(0).getValueType() == MVT::i1 && 1520 !TLI.isOperationLegal(ISD::SIGN_EXTEND, MVT::i1)) { 1521 DebugLoc DL = N->getDebugLoc(); 1522 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)); 1523 return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt); 1524 } 1525 1526 return SDValue(); 1527} 1528 1529SDValue DAGCombiner::visitADDC(SDNode *N) { 1530 SDValue N0 = N->getOperand(0); 1531 SDValue N1 = N->getOperand(1); 1532 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1533 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1534 EVT VT = N0.getValueType(); 1535 1536 // If the flag result is dead, turn this into an ADD. 1537 if (!N->hasAnyUseOfValue(1)) 1538 return CombineTo(N, DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, N1), 1539 DAG.getNode(ISD::CARRY_FALSE, 1540 N->getDebugLoc(), MVT::Glue)); 1541 1542 // canonicalize constant to RHS. 1543 if (N0C && !N1C) 1544 return DAG.getNode(ISD::ADDC, N->getDebugLoc(), N->getVTList(), N1, N0); 1545 1546 // fold (addc x, 0) -> x + no carry out 1547 if (N1C && N1C->isNullValue()) 1548 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, 1549 N->getDebugLoc(), MVT::Glue)); 1550 1551 // fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits. 1552 APInt LHSZero, LHSOne; 1553 APInt RHSZero, RHSOne; 1554 DAG.ComputeMaskedBits(N0, LHSZero, LHSOne); 1555 1556 if (LHSZero.getBoolValue()) { 1557 DAG.ComputeMaskedBits(N1, RHSZero, RHSOne); 1558 1559 // If all possibly-set bits on the LHS are clear on the RHS, return an OR. 1560 // If all possibly-set bits on the RHS are clear on the LHS, return an OR. 1561 if ((RHSZero & ~LHSZero) == ~LHSZero || (LHSZero & ~RHSZero) == ~RHSZero) 1562 return CombineTo(N, DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1), 1563 DAG.getNode(ISD::CARRY_FALSE, 1564 N->getDebugLoc(), MVT::Glue)); 1565 } 1566 1567 return SDValue(); 1568} 1569 1570SDValue DAGCombiner::visitADDE(SDNode *N) { 1571 SDValue N0 = N->getOperand(0); 1572 SDValue N1 = N->getOperand(1); 1573 SDValue CarryIn = N->getOperand(2); 1574 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1575 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1576 1577 // canonicalize constant to RHS 1578 if (N0C && !N1C) 1579 return DAG.getNode(ISD::ADDE, N->getDebugLoc(), N->getVTList(), 1580 N1, N0, CarryIn); 1581 1582 // fold (adde x, y, false) -> (addc x, y) 1583 if (CarryIn.getOpcode() == ISD::CARRY_FALSE) 1584 return DAG.getNode(ISD::ADDC, N->getDebugLoc(), N->getVTList(), N0, N1); 1585 1586 return SDValue(); 1587} 1588 1589// Since it may not be valid to emit a fold to zero for vector initializers 1590// check if we can before folding. 1591static SDValue tryFoldToZero(DebugLoc DL, const TargetLowering &TLI, EVT VT, 1592 SelectionDAG &DAG, bool LegalOperations) { 1593 if (!VT.isVector()) { 1594 return DAG.getConstant(0, VT); 1595 } 1596 if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) { 1597 // Produce a vector of zeros. 1598 SDValue El = DAG.getConstant(0, VT.getVectorElementType()); 1599 std::vector<SDValue> Ops(VT.getVectorNumElements(), El); 1600 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, 1601 &Ops[0], Ops.size()); 1602 } 1603 return SDValue(); 1604} 1605 1606SDValue DAGCombiner::visitSUB(SDNode *N) { 1607 SDValue N0 = N->getOperand(0); 1608 SDValue N1 = N->getOperand(1); 1609 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode()); 1610 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 1611 ConstantSDNode *N1C1 = N1.getOpcode() != ISD::ADD ? 0 : 1612 dyn_cast<ConstantSDNode>(N1.getOperand(1).getNode()); 1613 EVT VT = N0.getValueType(); 1614 1615 // fold vector ops 1616 if (VT.isVector()) { 1617 SDValue FoldedVOp = SimplifyVBinOp(N); 1618 if (FoldedVOp.getNode()) return FoldedVOp; 1619 } 1620 1621 // fold (sub x, x) -> 0 1622 // FIXME: Refactor this and xor and other similar operations together. 1623 if (N0 == N1) 1624 return tryFoldToZero(N->getDebugLoc(), TLI, VT, DAG, LegalOperations); 1625 // fold (sub c1, c2) -> c1-c2 1626 if (N0C && N1C) 1627 return DAG.FoldConstantArithmetic(ISD::SUB, VT, N0C, N1C); 1628 // fold (sub x, c) -> (add x, -c) 1629 if (N1C) 1630 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, 1631 DAG.getConstant(-N1C->getAPIntValue(), VT)); 1632 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) 1633 if (N0C && N0C->isAllOnesValue()) 1634 return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0); 1635 // fold A-(A-B) -> B 1636 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0)) 1637 return N1.getOperand(1); 1638 // fold (A+B)-A -> B 1639 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1) 1640 return N0.getOperand(1); 1641 // fold (A+B)-B -> A 1642 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1) 1643 return N0.getOperand(0); 1644 // fold C2-(A+C1) -> (C2-C1)-A 1645 if (N1.getOpcode() == ISD::ADD && N0C && N1C1) { 1646 SDValue NewC = DAG.getConstant((N0C->getAPIntValue() - N1C1->getAPIntValue()), VT); 1647 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, NewC, 1648 N1.getOperand(0)); 1649 } 1650 // fold ((A+(B+or-C))-B) -> A+or-C 1651 if (N0.getOpcode() == ISD::ADD && 1652 (N0.getOperand(1).getOpcode() == ISD::SUB || 1653 N0.getOperand(1).getOpcode() == ISD::ADD) && 1654 N0.getOperand(1).getOperand(0) == N1) 1655 return DAG.getNode(N0.getOperand(1).getOpcode(), N->getDebugLoc(), VT, 1656 N0.getOperand(0), N0.getOperand(1).getOperand(1)); 1657 // fold ((A+(C+B))-B) -> A+C 1658 if (N0.getOpcode() == ISD::ADD && 1659 N0.getOperand(1).getOpcode() == ISD::ADD && 1660 N0.getOperand(1).getOperand(1) == N1) 1661 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, 1662 N0.getOperand(0), N0.getOperand(1).getOperand(0)); 1663 // fold ((A-(B-C))-C) -> A-B 1664 if (N0.getOpcode() == ISD::SUB && 1665 N0.getOperand(1).getOpcode() == ISD::SUB && 1666 N0.getOperand(1).getOperand(1) == N1) 1667 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1668 N0.getOperand(0), N0.getOperand(1).getOperand(0)); 1669 1670 // If either operand of a sub is undef, the result is undef 1671 if (N0.getOpcode() == ISD::UNDEF) 1672 return N0; 1673 if (N1.getOpcode() == ISD::UNDEF) 1674 return N1; 1675 1676 // If the relocation model supports it, consider symbol offsets. 1677 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0)) 1678 if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) { 1679 // fold (sub Sym, c) -> Sym-c 1680 if (N1C && GA->getOpcode() == ISD::GlobalAddress) 1681 return DAG.getGlobalAddress(GA->getGlobal(), N1C->getDebugLoc(), VT, 1682 GA->getOffset() - 1683 (uint64_t)N1C->getSExtValue()); 1684 // fold (sub Sym+c1, Sym+c2) -> c1-c2 1685 if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1)) 1686 if (GA->getGlobal() == GB->getGlobal()) 1687 return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(), 1688 VT); 1689 } 1690 1691 return SDValue(); 1692} 1693 1694SDValue DAGCombiner::visitSUBC(SDNode *N) { 1695 SDValue N0 = N->getOperand(0); 1696 SDValue N1 = N->getOperand(1); 1697 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1698 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1699 EVT VT = N0.getValueType(); 1700 1701 // If the flag result is dead, turn this into an SUB. 1702 if (!N->hasAnyUseOfValue(1)) 1703 return CombineTo(N, DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, N1), 1704 DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(), 1705 MVT::Glue)); 1706 1707 // fold (subc x, x) -> 0 + no borrow 1708 if (N0 == N1) 1709 return CombineTo(N, DAG.getConstant(0, VT), 1710 DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(), 1711 MVT::Glue)); 1712 1713 // fold (subc x, 0) -> x + no borrow 1714 if (N1C && N1C->isNullValue()) 1715 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(), 1716 MVT::Glue)); 1717 1718 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow 1719 if (N0C && N0C->isAllOnesValue()) 1720 return CombineTo(N, DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0), 1721 DAG.getNode(ISD::CARRY_FALSE, N->getDebugLoc(), 1722 MVT::Glue)); 1723 1724 return SDValue(); 1725} 1726 1727SDValue DAGCombiner::visitSUBE(SDNode *N) { 1728 SDValue N0 = N->getOperand(0); 1729 SDValue N1 = N->getOperand(1); 1730 SDValue CarryIn = N->getOperand(2); 1731 1732 // fold (sube x, y, false) -> (subc x, y) 1733 if (CarryIn.getOpcode() == ISD::CARRY_FALSE) 1734 return DAG.getNode(ISD::SUBC, N->getDebugLoc(), N->getVTList(), N0, N1); 1735 1736 return SDValue(); 1737} 1738 1739SDValue DAGCombiner::visitMUL(SDNode *N) { 1740 SDValue N0 = N->getOperand(0); 1741 SDValue N1 = N->getOperand(1); 1742 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1743 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1744 EVT VT = N0.getValueType(); 1745 1746 // fold vector ops 1747 if (VT.isVector()) { 1748 SDValue FoldedVOp = SimplifyVBinOp(N); 1749 if (FoldedVOp.getNode()) return FoldedVOp; 1750 } 1751 1752 // fold (mul x, undef) -> 0 1753 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) 1754 return DAG.getConstant(0, VT); 1755 // fold (mul c1, c2) -> c1*c2 1756 if (N0C && N1C) 1757 return DAG.FoldConstantArithmetic(ISD::MUL, VT, N0C, N1C); 1758 // canonicalize constant to RHS 1759 if (N0C && !N1C) 1760 return DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, N1, N0); 1761 // fold (mul x, 0) -> 0 1762 if (N1C && N1C->isNullValue()) 1763 return N1; 1764 // fold (mul x, -1) -> 0-x 1765 if (N1C && N1C->isAllOnesValue()) 1766 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1767 DAG.getConstant(0, VT), N0); 1768 // fold (mul x, (1 << c)) -> x << c 1769 if (N1C && N1C->getAPIntValue().isPowerOf2()) 1770 return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0, 1771 DAG.getConstant(N1C->getAPIntValue().logBase2(), 1772 getShiftAmountTy(N0.getValueType()))); 1773 // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c 1774 if (N1C && (-N1C->getAPIntValue()).isPowerOf2()) { 1775 unsigned Log2Val = (-N1C->getAPIntValue()).logBase2(); 1776 // FIXME: If the input is something that is easily negated (e.g. a 1777 // single-use add), we should put the negate there. 1778 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1779 DAG.getConstant(0, VT), 1780 DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0, 1781 DAG.getConstant(Log2Val, 1782 getShiftAmountTy(N0.getValueType())))); 1783 } 1784 // (mul (shl X, c1), c2) -> (mul X, c2 << c1) 1785 if (N1C && N0.getOpcode() == ISD::SHL && 1786 isa<ConstantSDNode>(N0.getOperand(1))) { 1787 SDValue C3 = DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, 1788 N1, N0.getOperand(1)); 1789 AddToWorkList(C3.getNode()); 1790 return DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, 1791 N0.getOperand(0), C3); 1792 } 1793 1794 // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one 1795 // use. 1796 { 1797 SDValue Sh(0,0), Y(0,0); 1798 // Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)). 1799 if (N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1)) && 1800 N0.getNode()->hasOneUse()) { 1801 Sh = N0; Y = N1; 1802 } else if (N1.getOpcode() == ISD::SHL && 1803 isa<ConstantSDNode>(N1.getOperand(1)) && 1804 N1.getNode()->hasOneUse()) { 1805 Sh = N1; Y = N0; 1806 } 1807 1808 if (Sh.getNode()) { 1809 SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, 1810 Sh.getOperand(0), Y); 1811 return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, 1812 Mul, Sh.getOperand(1)); 1813 } 1814 } 1815 1816 // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2) 1817 if (N1C && N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() && 1818 isa<ConstantSDNode>(N0.getOperand(1))) 1819 return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, 1820 DAG.getNode(ISD::MUL, N0.getDebugLoc(), VT, 1821 N0.getOperand(0), N1), 1822 DAG.getNode(ISD::MUL, N1.getDebugLoc(), VT, 1823 N0.getOperand(1), N1)); 1824 1825 // reassociate mul 1826 SDValue RMUL = ReassociateOps(ISD::MUL, N->getDebugLoc(), N0, N1); 1827 if (RMUL.getNode() != 0) 1828 return RMUL; 1829 1830 return SDValue(); 1831} 1832 1833SDValue DAGCombiner::visitSDIV(SDNode *N) { 1834 SDValue N0 = N->getOperand(0); 1835 SDValue N1 = N->getOperand(1); 1836 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode()); 1837 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 1838 EVT VT = N->getValueType(0); 1839 1840 // fold vector ops 1841 if (VT.isVector()) { 1842 SDValue FoldedVOp = SimplifyVBinOp(N); 1843 if (FoldedVOp.getNode()) return FoldedVOp; 1844 } 1845 1846 // fold (sdiv c1, c2) -> c1/c2 1847 if (N0C && N1C && !N1C->isNullValue()) 1848 return DAG.FoldConstantArithmetic(ISD::SDIV, VT, N0C, N1C); 1849 // fold (sdiv X, 1) -> X 1850 if (N1C && N1C->getAPIntValue() == 1LL) 1851 return N0; 1852 // fold (sdiv X, -1) -> 0-X 1853 if (N1C && N1C->isAllOnesValue()) 1854 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1855 DAG.getConstant(0, VT), N0); 1856 // If we know the sign bits of both operands are zero, strength reduce to a 1857 // udiv instead. Handles (X&15) /s 4 -> X&15 >> 2 1858 if (!VT.isVector()) { 1859 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) 1860 return DAG.getNode(ISD::UDIV, N->getDebugLoc(), N1.getValueType(), 1861 N0, N1); 1862 } 1863 // fold (sdiv X, pow2) -> simple ops after legalize 1864 if (N1C && !N1C->isNullValue() && 1865 (N1C->getAPIntValue().isPowerOf2() || 1866 (-N1C->getAPIntValue()).isPowerOf2())) { 1867 // If dividing by powers of two is cheap, then don't perform the following 1868 // fold. 1869 if (TLI.isPow2DivCheap()) 1870 return SDValue(); 1871 1872 unsigned lg2 = N1C->getAPIntValue().countTrailingZeros(); 1873 1874 // Splat the sign bit into the register 1875 SDValue SGN = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0, 1876 DAG.getConstant(VT.getSizeInBits()-1, 1877 getShiftAmountTy(N0.getValueType()))); 1878 AddToWorkList(SGN.getNode()); 1879 1880 // Add (N0 < 0) ? abs2 - 1 : 0; 1881 SDValue SRL = DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, SGN, 1882 DAG.getConstant(VT.getSizeInBits() - lg2, 1883 getShiftAmountTy(SGN.getValueType()))); 1884 SDValue ADD = DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, SRL); 1885 AddToWorkList(SRL.getNode()); 1886 AddToWorkList(ADD.getNode()); // Divide by pow2 1887 SDValue SRA = DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, ADD, 1888 DAG.getConstant(lg2, getShiftAmountTy(ADD.getValueType()))); 1889 1890 // If we're dividing by a positive value, we're done. Otherwise, we must 1891 // negate the result. 1892 if (N1C->getAPIntValue().isNonNegative()) 1893 return SRA; 1894 1895 AddToWorkList(SRA.getNode()); 1896 return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, 1897 DAG.getConstant(0, VT), SRA); 1898 } 1899 1900 // if integer divide is expensive and we satisfy the requirements, emit an 1901 // alternate sequence. 1902 if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap()) { 1903 SDValue Op = BuildSDIV(N); 1904 if (Op.getNode()) return Op; 1905 } 1906 1907 // undef / X -> 0 1908 if (N0.getOpcode() == ISD::UNDEF) 1909 return DAG.getConstant(0, VT); 1910 // X / undef -> undef 1911 if (N1.getOpcode() == ISD::UNDEF) 1912 return N1; 1913 1914 return SDValue(); 1915} 1916 1917SDValue DAGCombiner::visitUDIV(SDNode *N) { 1918 SDValue N0 = N->getOperand(0); 1919 SDValue N1 = N->getOperand(1); 1920 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode()); 1921 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 1922 EVT VT = N->getValueType(0); 1923 1924 // fold vector ops 1925 if (VT.isVector()) { 1926 SDValue FoldedVOp = SimplifyVBinOp(N); 1927 if (FoldedVOp.getNode()) return FoldedVOp; 1928 } 1929 1930 // fold (udiv c1, c2) -> c1/c2 1931 if (N0C && N1C && !N1C->isNullValue()) 1932 return DAG.FoldConstantArithmetic(ISD::UDIV, VT, N0C, N1C); 1933 // fold (udiv x, (1 << c)) -> x >>u c 1934 if (N1C && N1C->getAPIntValue().isPowerOf2()) 1935 return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, 1936 DAG.getConstant(N1C->getAPIntValue().logBase2(), 1937 getShiftAmountTy(N0.getValueType()))); 1938 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 1939 if (N1.getOpcode() == ISD::SHL) { 1940 if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) { 1941 if (SHC->getAPIntValue().isPowerOf2()) { 1942 EVT ADDVT = N1.getOperand(1).getValueType(); 1943 SDValue Add = DAG.getNode(ISD::ADD, N->getDebugLoc(), ADDVT, 1944 N1.getOperand(1), 1945 DAG.getConstant(SHC->getAPIntValue() 1946 .logBase2(), 1947 ADDVT)); 1948 AddToWorkList(Add.getNode()); 1949 return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, Add); 1950 } 1951 } 1952 } 1953 // fold (udiv x, c) -> alternate 1954 if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap()) { 1955 SDValue Op = BuildUDIV(N); 1956 if (Op.getNode()) return Op; 1957 } 1958 1959 // undef / X -> 0 1960 if (N0.getOpcode() == ISD::UNDEF) 1961 return DAG.getConstant(0, VT); 1962 // X / undef -> undef 1963 if (N1.getOpcode() == ISD::UNDEF) 1964 return N1; 1965 1966 return SDValue(); 1967} 1968 1969SDValue DAGCombiner::visitSREM(SDNode *N) { 1970 SDValue N0 = N->getOperand(0); 1971 SDValue N1 = N->getOperand(1); 1972 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1973 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1974 EVT VT = N->getValueType(0); 1975 1976 // fold (srem c1, c2) -> c1%c2 1977 if (N0C && N1C && !N1C->isNullValue()) 1978 return DAG.FoldConstantArithmetic(ISD::SREM, VT, N0C, N1C); 1979 // If we know the sign bits of both operands are zero, strength reduce to a 1980 // urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15 1981 if (!VT.isVector()) { 1982 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) 1983 return DAG.getNode(ISD::UREM, N->getDebugLoc(), VT, N0, N1); 1984 } 1985 1986 // If X/C can be simplified by the division-by-constant logic, lower 1987 // X%C to the equivalent of X-X/C*C. 1988 if (N1C && !N1C->isNullValue()) { 1989 SDValue Div = DAG.getNode(ISD::SDIV, N->getDebugLoc(), VT, N0, N1); 1990 AddToWorkList(Div.getNode()); 1991 SDValue OptimizedDiv = combine(Div.getNode()); 1992 if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) { 1993 SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, 1994 OptimizedDiv, N1); 1995 SDValue Sub = DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, Mul); 1996 AddToWorkList(Mul.getNode()); 1997 return Sub; 1998 } 1999 } 2000 2001 // undef % X -> 0 2002 if (N0.getOpcode() == ISD::UNDEF) 2003 return DAG.getConstant(0, VT); 2004 // X % undef -> undef 2005 if (N1.getOpcode() == ISD::UNDEF) 2006 return N1; 2007 2008 return SDValue(); 2009} 2010 2011SDValue DAGCombiner::visitUREM(SDNode *N) { 2012 SDValue N0 = N->getOperand(0); 2013 SDValue N1 = N->getOperand(1); 2014 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 2015 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2016 EVT VT = N->getValueType(0); 2017 2018 // fold (urem c1, c2) -> c1%c2 2019 if (N0C && N1C && !N1C->isNullValue()) 2020 return DAG.FoldConstantArithmetic(ISD::UREM, VT, N0C, N1C); 2021 // fold (urem x, pow2) -> (and x, pow2-1) 2022 if (N1C && !N1C->isNullValue() && N1C->getAPIntValue().isPowerOf2()) 2023 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, 2024 DAG.getConstant(N1C->getAPIntValue()-1,VT)); 2025 // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1)) 2026 if (N1.getOpcode() == ISD::SHL) { 2027 if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) { 2028 if (SHC->getAPIntValue().isPowerOf2()) { 2029 SDValue Add = 2030 DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, 2031 DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), 2032 VT)); 2033 AddToWorkList(Add.getNode()); 2034 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, Add); 2035 } 2036 } 2037 } 2038 2039 // If X/C can be simplified by the division-by-constant logic, lower 2040 // X%C to the equivalent of X-X/C*C. 2041 if (N1C && !N1C->isNullValue()) { 2042 SDValue Div = DAG.getNode(ISD::UDIV, N->getDebugLoc(), VT, N0, N1); 2043 AddToWorkList(Div.getNode()); 2044 SDValue OptimizedDiv = combine(Div.getNode()); 2045 if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) { 2046 SDValue Mul = DAG.getNode(ISD::MUL, N->getDebugLoc(), VT, 2047 OptimizedDiv, N1); 2048 SDValue Sub = DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, N0, Mul); 2049 AddToWorkList(Mul.getNode()); 2050 return Sub; 2051 } 2052 } 2053 2054 // undef % X -> 0 2055 if (N0.getOpcode() == ISD::UNDEF) 2056 return DAG.getConstant(0, VT); 2057 // X % undef -> undef 2058 if (N1.getOpcode() == ISD::UNDEF) 2059 return N1; 2060 2061 return SDValue(); 2062} 2063 2064SDValue DAGCombiner::visitMULHS(SDNode *N) { 2065 SDValue N0 = N->getOperand(0); 2066 SDValue N1 = N->getOperand(1); 2067 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2068 EVT VT = N->getValueType(0); 2069 DebugLoc DL = N->getDebugLoc(); 2070 2071 // fold (mulhs x, 0) -> 0 2072 if (N1C && N1C->isNullValue()) 2073 return N1; 2074 // fold (mulhs x, 1) -> (sra x, size(x)-1) 2075 if (N1C && N1C->getAPIntValue() == 1) 2076 return DAG.getNode(ISD::SRA, N->getDebugLoc(), N0.getValueType(), N0, 2077 DAG.getConstant(N0.getValueType().getSizeInBits() - 1, 2078 getShiftAmountTy(N0.getValueType()))); 2079 // fold (mulhs x, undef) -> 0 2080 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) 2081 return DAG.getConstant(0, VT); 2082 2083 // If the type twice as wide is legal, transform the mulhs to a wider multiply 2084 // plus a shift. 2085 if (VT.isSimple() && !VT.isVector()) { 2086 MVT Simple = VT.getSimpleVT(); 2087 unsigned SimpleSize = Simple.getSizeInBits(); 2088 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2089 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2090 N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0); 2091 N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1); 2092 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); 2093 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, 2094 DAG.getConstant(SimpleSize, getShiftAmountTy(N1.getValueType()))); 2095 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); 2096 } 2097 } 2098 2099 return SDValue(); 2100} 2101 2102SDValue DAGCombiner::visitMULHU(SDNode *N) { 2103 SDValue N0 = N->getOperand(0); 2104 SDValue N1 = N->getOperand(1); 2105 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2106 EVT VT = N->getValueType(0); 2107 DebugLoc DL = N->getDebugLoc(); 2108 2109 // fold (mulhu x, 0) -> 0 2110 if (N1C && N1C->isNullValue()) 2111 return N1; 2112 // fold (mulhu x, 1) -> 0 2113 if (N1C && N1C->getAPIntValue() == 1) 2114 return DAG.getConstant(0, N0.getValueType()); 2115 // fold (mulhu x, undef) -> 0 2116 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) 2117 return DAG.getConstant(0, VT); 2118 2119 // If the type twice as wide is legal, transform the mulhu to a wider multiply 2120 // plus a shift. 2121 if (VT.isSimple() && !VT.isVector()) { 2122 MVT Simple = VT.getSimpleVT(); 2123 unsigned SimpleSize = Simple.getSizeInBits(); 2124 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2125 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2126 N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0); 2127 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1); 2128 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); 2129 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, 2130 DAG.getConstant(SimpleSize, getShiftAmountTy(N1.getValueType()))); 2131 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); 2132 } 2133 } 2134 2135 return SDValue(); 2136} 2137 2138/// SimplifyNodeWithTwoResults - Perform optimizations common to nodes that 2139/// compute two values. LoOp and HiOp give the opcodes for the two computations 2140/// that are being performed. Return true if a simplification was made. 2141/// 2142SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 2143 unsigned HiOp) { 2144 // If the high half is not needed, just compute the low half. 2145 bool HiExists = N->hasAnyUseOfValue(1); 2146 if (!HiExists && 2147 (!LegalOperations || 2148 TLI.isOperationLegal(LoOp, N->getValueType(0)))) { 2149 SDValue Res = DAG.getNode(LoOp, N->getDebugLoc(), N->getValueType(0), 2150 N->op_begin(), N->getNumOperands()); 2151 return CombineTo(N, Res, Res); 2152 } 2153 2154 // If the low half is not needed, just compute the high half. 2155 bool LoExists = N->hasAnyUseOfValue(0); 2156 if (!LoExists && 2157 (!LegalOperations || 2158 TLI.isOperationLegal(HiOp, N->getValueType(1)))) { 2159 SDValue Res = DAG.getNode(HiOp, N->getDebugLoc(), N->getValueType(1), 2160 N->op_begin(), N->getNumOperands()); 2161 return CombineTo(N, Res, Res); 2162 } 2163 2164 // If both halves are used, return as it is. 2165 if (LoExists && HiExists) 2166 return SDValue(); 2167 2168 // If the two computed results can be simplified separately, separate them. 2169 if (LoExists) { 2170 SDValue Lo = DAG.getNode(LoOp, N->getDebugLoc(), N->getValueType(0), 2171 N->op_begin(), N->getNumOperands()); 2172 AddToWorkList(Lo.getNode()); 2173 SDValue LoOpt = combine(Lo.getNode()); 2174 if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() && 2175 (!LegalOperations || 2176 TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType()))) 2177 return CombineTo(N, LoOpt, LoOpt); 2178 } 2179 2180 if (HiExists) { 2181 SDValue Hi = DAG.getNode(HiOp, N->getDebugLoc(), N->getValueType(1), 2182 N->op_begin(), N->getNumOperands()); 2183 AddToWorkList(Hi.getNode()); 2184 SDValue HiOpt = combine(Hi.getNode()); 2185 if (HiOpt.getNode() && HiOpt != Hi && 2186 (!LegalOperations || 2187 TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType()))) 2188 return CombineTo(N, HiOpt, HiOpt); 2189 } 2190 2191 return SDValue(); 2192} 2193 2194SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) { 2195 SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS); 2196 if (Res.getNode()) return Res; 2197 2198 EVT VT = N->getValueType(0); 2199 DebugLoc DL = N->getDebugLoc(); 2200 2201 // If the type twice as wide is legal, transform the mulhu to a wider multiply 2202 // plus a shift. 2203 if (VT.isSimple() && !VT.isVector()) { 2204 MVT Simple = VT.getSimpleVT(); 2205 unsigned SimpleSize = Simple.getSizeInBits(); 2206 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2207 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2208 SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0)); 2209 SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1)); 2210 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); 2211 // Compute the high part as N1. 2212 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, 2213 DAG.getConstant(SimpleSize, getShiftAmountTy(Lo.getValueType()))); 2214 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); 2215 // Compute the low part as N0. 2216 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); 2217 return CombineTo(N, Lo, Hi); 2218 } 2219 } 2220 2221 return SDValue(); 2222} 2223 2224SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) { 2225 SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU); 2226 if (Res.getNode()) return Res; 2227 2228 EVT VT = N->getValueType(0); 2229 DebugLoc DL = N->getDebugLoc(); 2230 2231 // If the type twice as wide is legal, transform the mulhu to a wider multiply 2232 // plus a shift. 2233 if (VT.isSimple() && !VT.isVector()) { 2234 MVT Simple = VT.getSimpleVT(); 2235 unsigned SimpleSize = Simple.getSizeInBits(); 2236 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2237 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2238 SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0)); 2239 SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1)); 2240 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); 2241 // Compute the high part as N1. 2242 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, 2243 DAG.getConstant(SimpleSize, getShiftAmountTy(Lo.getValueType()))); 2244 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); 2245 // Compute the low part as N0. 2246 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); 2247 return CombineTo(N, Lo, Hi); 2248 } 2249 } 2250 2251 return SDValue(); 2252} 2253 2254SDValue DAGCombiner::visitSMULO(SDNode *N) { 2255 // (smulo x, 2) -> (saddo x, x) 2256 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1))) 2257 if (C2->getAPIntValue() == 2) 2258 return DAG.getNode(ISD::SADDO, N->getDebugLoc(), N->getVTList(), 2259 N->getOperand(0), N->getOperand(0)); 2260 2261 return SDValue(); 2262} 2263 2264SDValue DAGCombiner::visitUMULO(SDNode *N) { 2265 // (umulo x, 2) -> (uaddo x, x) 2266 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1))) 2267 if (C2->getAPIntValue() == 2) 2268 return DAG.getNode(ISD::UADDO, N->getDebugLoc(), N->getVTList(), 2269 N->getOperand(0), N->getOperand(0)); 2270 2271 return SDValue(); 2272} 2273 2274SDValue DAGCombiner::visitSDIVREM(SDNode *N) { 2275 SDValue Res = SimplifyNodeWithTwoResults(N, ISD::SDIV, ISD::SREM); 2276 if (Res.getNode()) return Res; 2277 2278 return SDValue(); 2279} 2280 2281SDValue DAGCombiner::visitUDIVREM(SDNode *N) { 2282 SDValue Res = SimplifyNodeWithTwoResults(N, ISD::UDIV, ISD::UREM); 2283 if (Res.getNode()) return Res; 2284 2285 return SDValue(); 2286} 2287 2288/// SimplifyBinOpWithSameOpcodeHands - If this is a binary operator with 2289/// two operands of the same opcode, try to simplify it. 2290SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) { 2291 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 2292 EVT VT = N0.getValueType(); 2293 assert(N0.getOpcode() == N1.getOpcode() && "Bad input!"); 2294 2295 // Bail early if none of these transforms apply. 2296 if (N0.getNode()->getNumOperands() == 0) return SDValue(); 2297 2298 // For each of OP in AND/OR/XOR: 2299 // fold (OP (zext x), (zext y)) -> (zext (OP x, y)) 2300 // fold (OP (sext x), (sext y)) -> (sext (OP x, y)) 2301 // fold (OP (aext x), (aext y)) -> (aext (OP x, y)) 2302 // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free) 2303 // 2304 // do not sink logical op inside of a vector extend, since it may combine 2305 // into a vsetcc. 2306 EVT Op0VT = N0.getOperand(0).getValueType(); 2307 if ((N0.getOpcode() == ISD::ZERO_EXTEND || 2308 N0.getOpcode() == ISD::SIGN_EXTEND || 2309 // Avoid infinite looping with PromoteIntBinOp. 2310 (N0.getOpcode() == ISD::ANY_EXTEND && 2311 (!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) || 2312 (N0.getOpcode() == ISD::TRUNCATE && 2313 (!TLI.isZExtFree(VT, Op0VT) || 2314 !TLI.isTruncateFree(Op0VT, VT)) && 2315 TLI.isTypeLegal(Op0VT))) && 2316 !VT.isVector() && 2317 Op0VT == N1.getOperand(0).getValueType() && 2318 (!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) { 2319 SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getDebugLoc(), 2320 N0.getOperand(0).getValueType(), 2321 N0.getOperand(0), N1.getOperand(0)); 2322 AddToWorkList(ORNode.getNode()); 2323 return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, ORNode); 2324 } 2325 2326 // For each of OP in SHL/SRL/SRA/AND... 2327 // fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z) 2328 // fold (or (OP x, z), (OP y, z)) -> (OP (or x, y), z) 2329 // fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z) 2330 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL || 2331 N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) && 2332 N0.getOperand(1) == N1.getOperand(1)) { 2333 SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getDebugLoc(), 2334 N0.getOperand(0).getValueType(), 2335 N0.getOperand(0), N1.getOperand(0)); 2336 AddToWorkList(ORNode.getNode()); 2337 return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, 2338 ORNode, N0.getOperand(1)); 2339 } 2340 2341 // Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B)) 2342 // Only perform this optimization after type legalization and before 2343 // LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by 2344 // adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and 2345 // we don't want to undo this promotion. 2346 // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper 2347 // on scalars. 2348 if ((N0.getOpcode() == ISD::BITCAST || N0.getOpcode() == ISD::SCALAR_TO_VECTOR) 2349 && Level == AfterLegalizeTypes) { 2350 SDValue In0 = N0.getOperand(0); 2351 SDValue In1 = N1.getOperand(0); 2352 EVT In0Ty = In0.getValueType(); 2353 EVT In1Ty = In1.getValueType(); 2354 // If both incoming values are integers, and the original types are the same. 2355 if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) { 2356 SDValue Op = DAG.getNode(N->getOpcode(), N->getDebugLoc(), In0Ty, In0, In1); 2357 SDValue BC = DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, Op); 2358 AddToWorkList(Op.getNode()); 2359 return BC; 2360 } 2361 } 2362 2363 // Xor/and/or are indifferent to the swizzle operation (shuffle of one value). 2364 // Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B)) 2365 // If both shuffles use the same mask, and both shuffle within a single 2366 // vector, then it is worthwhile to move the swizzle after the operation. 2367 // The type-legalizer generates this pattern when loading illegal 2368 // vector types from memory. In many cases this allows additional shuffle 2369 // optimizations. 2370 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG && 2371 N0.getOperand(1).getOpcode() == ISD::UNDEF && 2372 N1.getOperand(1).getOpcode() == ISD::UNDEF) { 2373 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(N0); 2374 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(N1); 2375 2376 assert(N0.getOperand(0).getValueType() == N1.getOperand(1).getValueType() && 2377 "Inputs to shuffles are not the same type"); 2378 2379 unsigned NumElts = VT.getVectorNumElements(); 2380 2381 // Check that both shuffles use the same mask. The masks are known to be of 2382 // the same length because the result vector type is the same. 2383 bool SameMask = true; 2384 for (unsigned i = 0; i != NumElts; ++i) { 2385 int Idx0 = SVN0->getMaskElt(i); 2386 int Idx1 = SVN1->getMaskElt(i); 2387 if (Idx0 != Idx1) { 2388 SameMask = false; 2389 break; 2390 } 2391 } 2392 2393 if (SameMask) { 2394 SDValue Op = DAG.getNode(N->getOpcode(), N->getDebugLoc(), VT, 2395 N0.getOperand(0), N1.getOperand(0)); 2396 AddToWorkList(Op.getNode()); 2397 return DAG.getVectorShuffle(VT, N->getDebugLoc(), Op, 2398 DAG.getUNDEF(VT), &SVN0->getMask()[0]); 2399 } 2400 } 2401 2402 return SDValue(); 2403} 2404 2405SDValue DAGCombiner::visitAND(SDNode *N) { 2406 SDValue N0 = N->getOperand(0); 2407 SDValue N1 = N->getOperand(1); 2408 SDValue LL, LR, RL, RR, CC0, CC1; 2409 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 2410 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2411 EVT VT = N1.getValueType(); 2412 unsigned BitWidth = VT.getScalarType().getSizeInBits(); 2413 2414 // fold vector ops 2415 if (VT.isVector()) { 2416 SDValue FoldedVOp = SimplifyVBinOp(N); 2417 if (FoldedVOp.getNode()) return FoldedVOp; 2418 } 2419 2420 // fold (and x, undef) -> 0 2421 if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) 2422 return DAG.getConstant(0, VT); 2423 // fold (and c1, c2) -> c1&c2 2424 if (N0C && N1C) 2425 return DAG.FoldConstantArithmetic(ISD::AND, VT, N0C, N1C); 2426 // canonicalize constant to RHS 2427 if (N0C && !N1C) 2428 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N1, N0); 2429 // fold (and x, -1) -> x 2430 if (N1C && N1C->isAllOnesValue()) 2431 return N0; 2432 // if (and x, c) is known to be zero, return 0 2433 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), 2434 APInt::getAllOnesValue(BitWidth))) 2435 return DAG.getConstant(0, VT); 2436 // reassociate and 2437 SDValue RAND = ReassociateOps(ISD::AND, N->getDebugLoc(), N0, N1); 2438 if (RAND.getNode() != 0) 2439 return RAND; 2440 // fold (and (or x, C), D) -> D if (C & D) == D 2441 if (N1C && N0.getOpcode() == ISD::OR) 2442 if (ConstantSDNode *ORI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) 2443 if ((ORI->getAPIntValue() & N1C->getAPIntValue()) == N1C->getAPIntValue()) 2444 return N1; 2445 // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits. 2446 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 2447 SDValue N0Op0 = N0.getOperand(0); 2448 APInt Mask = ~N1C->getAPIntValue(); 2449 Mask = Mask.trunc(N0Op0.getValueSizeInBits()); 2450 if (DAG.MaskedValueIsZero(N0Op0, Mask)) { 2451 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), 2452 N0.getValueType(), N0Op0); 2453 2454 // Replace uses of the AND with uses of the Zero extend node. 2455 CombineTo(N, Zext); 2456 2457 // We actually want to replace all uses of the any_extend with the 2458 // zero_extend, to avoid duplicating things. This will later cause this 2459 // AND to be folded. 2460 CombineTo(N0.getNode(), Zext); 2461 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2462 } 2463 } 2464 // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) -> 2465 // (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must 2466 // already be zero by virtue of the width of the base type of the load. 2467 // 2468 // the 'X' node here can either be nothing or an extract_vector_elt to catch 2469 // more cases. 2470 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 2471 N0.getOperand(0).getOpcode() == ISD::LOAD) || 2472 N0.getOpcode() == ISD::LOAD) { 2473 LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ? 2474 N0 : N0.getOperand(0) ); 2475 2476 // Get the constant (if applicable) the zero'th operand is being ANDed with. 2477 // This can be a pure constant or a vector splat, in which case we treat the 2478 // vector as a scalar and use the splat value. 2479 APInt Constant = APInt::getNullValue(1); 2480 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 2481 Constant = C->getAPIntValue(); 2482 } else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) { 2483 APInt SplatValue, SplatUndef; 2484 unsigned SplatBitSize; 2485 bool HasAnyUndefs; 2486 bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef, 2487 SplatBitSize, HasAnyUndefs); 2488 if (IsSplat) { 2489 // Undef bits can contribute to a possible optimisation if set, so 2490 // set them. 2491 SplatValue |= SplatUndef; 2492 2493 // The splat value may be something like "0x00FFFFFF", which means 0 for 2494 // the first vector value and FF for the rest, repeating. We need a mask 2495 // that will apply equally to all members of the vector, so AND all the 2496 // lanes of the constant together. 2497 EVT VT = Vector->getValueType(0); 2498 unsigned BitWidth = VT.getVectorElementType().getSizeInBits(); 2499 Constant = APInt::getAllOnesValue(BitWidth); 2500 for (unsigned i = 0, n = VT.getVectorNumElements(); i < n; ++i) 2501 Constant &= SplatValue.lshr(i*BitWidth).zextOrTrunc(BitWidth); 2502 } 2503 } 2504 2505 // If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is 2506 // actually legal and isn't going to get expanded, else this is a false 2507 // optimisation. 2508 bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD, 2509 Load->getMemoryVT()); 2510 2511 // Resize the constant to the same size as the original memory access before 2512 // extension. If it is still the AllOnesValue then this AND is completely 2513 // unneeded. 2514 Constant = 2515 Constant.zextOrTrunc(Load->getMemoryVT().getScalarType().getSizeInBits()); 2516 2517 bool B; 2518 switch (Load->getExtensionType()) { 2519 default: B = false; break; 2520 case ISD::EXTLOAD: B = CanZextLoadProfitably; break; 2521 case ISD::ZEXTLOAD: 2522 case ISD::NON_EXTLOAD: B = true; break; 2523 } 2524 2525 if (B && Constant.isAllOnesValue()) { 2526 // If the load type was an EXTLOAD, convert to ZEXTLOAD in order to 2527 // preserve semantics once we get rid of the AND. 2528 SDValue NewLoad(Load, 0); 2529 if (Load->getExtensionType() == ISD::EXTLOAD) { 2530 NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD, 2531 Load->getValueType(0), Load->getDebugLoc(), 2532 Load->getChain(), Load->getBasePtr(), 2533 Load->getOffset(), Load->getMemoryVT(), 2534 Load->getMemOperand()); 2535 // Replace uses of the EXTLOAD with the new ZEXTLOAD. 2536 if (Load->getNumValues() == 3) { 2537 // PRE/POST_INC loads have 3 values. 2538 SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1), 2539 NewLoad.getValue(2) }; 2540 CombineTo(Load, To, 3, true); 2541 } else { 2542 CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1)); 2543 } 2544 } 2545 2546 // Fold the AND away, taking care not to fold to the old load node if we 2547 // replaced it. 2548 CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0); 2549 2550 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2551 } 2552 } 2553 // fold (and (setcc x), (setcc y)) -> (setcc (and x, y)) 2554 if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){ 2555 ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get(); 2556 ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get(); 2557 2558 if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 && 2559 LL.getValueType().isInteger()) { 2560 // fold (and (seteq X, 0), (seteq Y, 0)) -> (seteq (or X, Y), 0) 2561 if (cast<ConstantSDNode>(LR)->isNullValue() && Op1 == ISD::SETEQ) { 2562 SDValue ORNode = DAG.getNode(ISD::OR, N0.getDebugLoc(), 2563 LR.getValueType(), LL, RL); 2564 AddToWorkList(ORNode.getNode()); 2565 return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1); 2566 } 2567 // fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1) 2568 if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETEQ) { 2569 SDValue ANDNode = DAG.getNode(ISD::AND, N0.getDebugLoc(), 2570 LR.getValueType(), LL, RL); 2571 AddToWorkList(ANDNode.getNode()); 2572 return DAG.getSetCC(N->getDebugLoc(), VT, ANDNode, LR, Op1); 2573 } 2574 // fold (and (setgt X, -1), (setgt Y, -1)) -> (setgt (or X, Y), -1) 2575 if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETGT) { 2576 SDValue ORNode = DAG.getNode(ISD::OR, N0.getDebugLoc(), 2577 LR.getValueType(), LL, RL); 2578 AddToWorkList(ORNode.getNode()); 2579 return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1); 2580 } 2581 } 2582 // canonicalize equivalent to ll == rl 2583 if (LL == RR && LR == RL) { 2584 Op1 = ISD::getSetCCSwappedOperands(Op1); 2585 std::swap(RL, RR); 2586 } 2587 if (LL == RL && LR == RR) { 2588 bool isInteger = LL.getValueType().isInteger(); 2589 ISD::CondCode Result = ISD::getSetCCAndOperation(Op0, Op1, isInteger); 2590 if (Result != ISD::SETCC_INVALID && 2591 (!LegalOperations || TLI.isCondCodeLegal(Result, LL.getValueType()))) 2592 return DAG.getSetCC(N->getDebugLoc(), N0.getValueType(), 2593 LL, LR, Result); 2594 } 2595 } 2596 2597 // Simplify: (and (op x...), (op y...)) -> (op (and x, y)) 2598 if (N0.getOpcode() == N1.getOpcode()) { 2599 SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N); 2600 if (Tmp.getNode()) return Tmp; 2601 } 2602 2603 // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1) 2604 // fold (and (sra)) -> (and (srl)) when possible. 2605 if (!VT.isVector() && 2606 SimplifyDemandedBits(SDValue(N, 0))) 2607 return SDValue(N, 0); 2608 2609 // fold (zext_inreg (extload x)) -> (zextload x) 2610 if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) { 2611 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 2612 EVT MemVT = LN0->getMemoryVT(); 2613 // If we zero all the possible extended bits, then we can turn this into 2614 // a zextload if we are running before legalize or the operation is legal. 2615 unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits(); 2616 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, 2617 BitWidth - MemVT.getScalarType().getSizeInBits())) && 2618 ((!LegalOperations && !LN0->isVolatile()) || 2619 TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) { 2620 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT, 2621 LN0->getChain(), LN0->getBasePtr(), 2622 LN0->getPointerInfo(), MemVT, 2623 LN0->isVolatile(), LN0->isNonTemporal(), 2624 LN0->getAlignment()); 2625 AddToWorkList(N); 2626 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 2627 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2628 } 2629 } 2630 // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use 2631 if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 2632 N0.hasOneUse()) { 2633 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 2634 EVT MemVT = LN0->getMemoryVT(); 2635 // If we zero all the possible extended bits, then we can turn this into 2636 // a zextload if we are running before legalize or the operation is legal. 2637 unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits(); 2638 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, 2639 BitWidth - MemVT.getScalarType().getSizeInBits())) && 2640 ((!LegalOperations && !LN0->isVolatile()) || 2641 TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) { 2642 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT, 2643 LN0->getChain(), 2644 LN0->getBasePtr(), LN0->getPointerInfo(), 2645 MemVT, 2646 LN0->isVolatile(), LN0->isNonTemporal(), 2647 LN0->getAlignment()); 2648 AddToWorkList(N); 2649 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 2650 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2651 } 2652 } 2653 2654 // fold (and (load x), 255) -> (zextload x, i8) 2655 // fold (and (extload x, i16), 255) -> (zextload x, i8) 2656 // fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8) 2657 if (N1C && (N0.getOpcode() == ISD::LOAD || 2658 (N0.getOpcode() == ISD::ANY_EXTEND && 2659 N0.getOperand(0).getOpcode() == ISD::LOAD))) { 2660 bool HasAnyExt = N0.getOpcode() == ISD::ANY_EXTEND; 2661 LoadSDNode *LN0 = HasAnyExt 2662 ? cast<LoadSDNode>(N0.getOperand(0)) 2663 : cast<LoadSDNode>(N0); 2664 if (LN0->getExtensionType() != ISD::SEXTLOAD && 2665 LN0->isUnindexed() && N0.hasOneUse() && LN0->hasOneUse()) { 2666 uint32_t ActiveBits = N1C->getAPIntValue().getActiveBits(); 2667 if (ActiveBits > 0 && APIntOps::isMask(ActiveBits, N1C->getAPIntValue())){ 2668 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits); 2669 EVT LoadedVT = LN0->getMemoryVT(); 2670 2671 if (ExtVT == LoadedVT && 2672 (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) { 2673 EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT; 2674 2675 SDValue NewLoad = 2676 DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), LoadResultTy, 2677 LN0->getChain(), LN0->getBasePtr(), 2678 LN0->getPointerInfo(), 2679 ExtVT, LN0->isVolatile(), LN0->isNonTemporal(), 2680 LN0->getAlignment()); 2681 AddToWorkList(N); 2682 CombineTo(LN0, NewLoad, NewLoad.getValue(1)); 2683 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2684 } 2685 2686 // Do not change the width of a volatile load. 2687 // Do not generate loads of non-round integer types since these can 2688 // be expensive (and would be wrong if the type is not byte sized). 2689 if (!LN0->isVolatile() && LoadedVT.bitsGT(ExtVT) && ExtVT.isRound() && 2690 (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) { 2691 EVT PtrType = LN0->getOperand(1).getValueType(); 2692 2693 unsigned Alignment = LN0->getAlignment(); 2694 SDValue NewPtr = LN0->getBasePtr(); 2695 2696 // For big endian targets, we need to add an offset to the pointer 2697 // to load the correct bytes. For little endian systems, we merely 2698 // need to read fewer bytes from the same pointer. 2699 if (TLI.isBigEndian()) { 2700 unsigned LVTStoreBytes = LoadedVT.getStoreSize(); 2701 unsigned EVTStoreBytes = ExtVT.getStoreSize(); 2702 unsigned PtrOff = LVTStoreBytes - EVTStoreBytes; 2703 NewPtr = DAG.getNode(ISD::ADD, LN0->getDebugLoc(), PtrType, 2704 NewPtr, DAG.getConstant(PtrOff, PtrType)); 2705 Alignment = MinAlign(Alignment, PtrOff); 2706 } 2707 2708 AddToWorkList(NewPtr.getNode()); 2709 2710 EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT; 2711 SDValue Load = 2712 DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), LoadResultTy, 2713 LN0->getChain(), NewPtr, 2714 LN0->getPointerInfo(), 2715 ExtVT, LN0->isVolatile(), LN0->isNonTemporal(), 2716 Alignment); 2717 AddToWorkList(N); 2718 CombineTo(LN0, Load, Load.getValue(1)); 2719 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2720 } 2721 } 2722 } 2723 } 2724 2725 if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL && 2726 VT.getSizeInBits() <= 64) { 2727 if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 2728 APInt ADDC = ADDI->getAPIntValue(); 2729 if (!TLI.isLegalAddImmediate(ADDC.getSExtValue())) { 2730 // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal 2731 // immediate for an add, but it is legal if its top c2 bits are set, 2732 // transform the ADD so the immediate doesn't need to be materialized 2733 // in a register. 2734 if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) { 2735 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 2736 SRLI->getZExtValue()); 2737 if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) { 2738 ADDC |= Mask; 2739 if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) { 2740 SDValue NewAdd = 2741 DAG.getNode(ISD::ADD, N0.getDebugLoc(), VT, 2742 N0.getOperand(0), DAG.getConstant(ADDC, VT)); 2743 CombineTo(N0.getNode(), NewAdd); 2744 return SDValue(N, 0); // Return N so it doesn't get rechecked! 2745 } 2746 } 2747 } 2748 } 2749 } 2750 } 2751 2752 2753 return SDValue(); 2754} 2755 2756/// MatchBSwapHWord - Match (a >> 8) | (a << 8) as (bswap a) >> 16 2757/// 2758SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, 2759 bool DemandHighBits) { 2760 if (!LegalOperations) 2761 return SDValue(); 2762 2763 EVT VT = N->getValueType(0); 2764 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16) 2765 return SDValue(); 2766 if (!TLI.isOperationLegal(ISD::BSWAP, VT)) 2767 return SDValue(); 2768 2769 // Recognize (and (shl a, 8), 0xff), (and (srl a, 8), 0xff00) 2770 bool LookPassAnd0 = false; 2771 bool LookPassAnd1 = false; 2772 if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL) 2773 std::swap(N0, N1); 2774 if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL) 2775 std::swap(N0, N1); 2776 if (N0.getOpcode() == ISD::AND) { 2777 if (!N0.getNode()->hasOneUse()) 2778 return SDValue(); 2779 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 2780 if (!N01C || N01C->getZExtValue() != 0xFF00) 2781 return SDValue(); 2782 N0 = N0.getOperand(0); 2783 LookPassAnd0 = true; 2784 } 2785 2786 if (N1.getOpcode() == ISD::AND) { 2787 if (!N1.getNode()->hasOneUse()) 2788 return SDValue(); 2789 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 2790 if (!N11C || N11C->getZExtValue() != 0xFF) 2791 return SDValue(); 2792 N1 = N1.getOperand(0); 2793 LookPassAnd1 = true; 2794 } 2795 2796 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 2797 std::swap(N0, N1); 2798 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 2799 return SDValue(); 2800 if (!N0.getNode()->hasOneUse() || 2801 !N1.getNode()->hasOneUse()) 2802 return SDValue(); 2803 2804 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 2805 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 2806 if (!N01C || !N11C) 2807 return SDValue(); 2808 if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8) 2809 return SDValue(); 2810 2811 // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8) 2812 SDValue N00 = N0->getOperand(0); 2813 if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) { 2814 if (!N00.getNode()->hasOneUse()) 2815 return SDValue(); 2816 ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1)); 2817 if (!N001C || N001C->getZExtValue() != 0xFF) 2818 return SDValue(); 2819 N00 = N00.getOperand(0); 2820 LookPassAnd0 = true; 2821 } 2822 2823 SDValue N10 = N1->getOperand(0); 2824 if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) { 2825 if (!N10.getNode()->hasOneUse()) 2826 return SDValue(); 2827 ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1)); 2828 if (!N101C || N101C->getZExtValue() != 0xFF00) 2829 return SDValue(); 2830 N10 = N10.getOperand(0); 2831 LookPassAnd1 = true; 2832 } 2833 2834 if (N00 != N10) 2835 return SDValue(); 2836 2837 // Make sure everything beyond the low halfword is zero since the SRL 16 2838 // will clear the top bits. 2839 unsigned OpSizeInBits = VT.getSizeInBits(); 2840 if (DemandHighBits && OpSizeInBits > 16 && 2841 (!LookPassAnd0 || !LookPassAnd1) && 2842 !DAG.MaskedValueIsZero(N10, APInt::getHighBitsSet(OpSizeInBits, 16))) 2843 return SDValue(); 2844 2845 SDValue Res = DAG.getNode(ISD::BSWAP, N->getDebugLoc(), VT, N00); 2846 if (OpSizeInBits > 16) 2847 Res = DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, Res, 2848 DAG.getConstant(OpSizeInBits-16, getShiftAmountTy(VT))); 2849 return Res; 2850} 2851 2852/// isBSwapHWordElement - Return true if the specified node is an element 2853/// that makes up a 32-bit packed halfword byteswap. i.e. 2854/// ((x&0xff)<<8)|((x&0xff00)>>8)|((x&0x00ff0000)<<8)|((x&0xff000000)>>8) 2855static bool isBSwapHWordElement(SDValue N, SmallVector<SDNode*,4> &Parts) { 2856 if (!N.getNode()->hasOneUse()) 2857 return false; 2858 2859 unsigned Opc = N.getOpcode(); 2860 if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL) 2861 return false; 2862 2863 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 2864 if (!N1C) 2865 return false; 2866 2867 unsigned Num; 2868 switch (N1C->getZExtValue()) { 2869 default: 2870 return false; 2871 case 0xFF: Num = 0; break; 2872 case 0xFF00: Num = 1; break; 2873 case 0xFF0000: Num = 2; break; 2874 case 0xFF000000: Num = 3; break; 2875 } 2876 2877 // Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00). 2878 SDValue N0 = N.getOperand(0); 2879 if (Opc == ISD::AND) { 2880 if (Num == 0 || Num == 2) { 2881 // (x >> 8) & 0xff 2882 // (x >> 8) & 0xff0000 2883 if (N0.getOpcode() != ISD::SRL) 2884 return false; 2885 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 2886 if (!C || C->getZExtValue() != 8) 2887 return false; 2888 } else { 2889 // (x << 8) & 0xff00 2890 // (x << 8) & 0xff000000 2891 if (N0.getOpcode() != ISD::SHL) 2892 return false; 2893 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 2894 if (!C || C->getZExtValue() != 8) 2895 return false; 2896 } 2897 } else if (Opc == ISD::SHL) { 2898 // (x & 0xff) << 8 2899 // (x & 0xff0000) << 8 2900 if (Num != 0 && Num != 2) 2901 return false; 2902 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 2903 if (!C || C->getZExtValue() != 8) 2904 return false; 2905 } else { // Opc == ISD::SRL 2906 // (x & 0xff00) >> 8 2907 // (x & 0xff000000) >> 8 2908 if (Num != 1 && Num != 3) 2909 return false; 2910 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 2911 if (!C || C->getZExtValue() != 8) 2912 return false; 2913 } 2914 2915 if (Parts[Num]) 2916 return false; 2917 2918 Parts[Num] = N0.getOperand(0).getNode(); 2919 return true; 2920} 2921 2922/// MatchBSwapHWord - Match a 32-bit packed halfword bswap. That is 2923/// ((x&0xff)<<8)|((x&0xff00)>>8)|((x&0x00ff0000)<<8)|((x&0xff000000)>>8) 2924/// => (rotl (bswap x), 16) 2925SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) { 2926 if (!LegalOperations) 2927 return SDValue(); 2928 2929 EVT VT = N->getValueType(0); 2930 if (VT != MVT::i32) 2931 return SDValue(); 2932 if (!TLI.isOperationLegal(ISD::BSWAP, VT)) 2933 return SDValue(); 2934 2935 SmallVector<SDNode*,4> Parts(4, (SDNode*)0); 2936 // Look for either 2937 // (or (or (and), (and)), (or (and), (and))) 2938 // (or (or (or (and), (and)), (and)), (and)) 2939 if (N0.getOpcode() != ISD::OR) 2940 return SDValue(); 2941 SDValue N00 = N0.getOperand(0); 2942 SDValue N01 = N0.getOperand(1); 2943 2944 if (N1.getOpcode() == ISD::OR) { 2945 // (or (or (and), (and)), (or (and), (and))) 2946 SDValue N000 = N00.getOperand(0); 2947 if (!isBSwapHWordElement(N000, Parts)) 2948 return SDValue(); 2949 2950 SDValue N001 = N00.getOperand(1); 2951 if (!isBSwapHWordElement(N001, Parts)) 2952 return SDValue(); 2953 SDValue N010 = N01.getOperand(0); 2954 if (!isBSwapHWordElement(N010, Parts)) 2955 return SDValue(); 2956 SDValue N011 = N01.getOperand(1); 2957 if (!isBSwapHWordElement(N011, Parts)) 2958 return SDValue(); 2959 } else { 2960 // (or (or (or (and), (and)), (and)), (and)) 2961 if (!isBSwapHWordElement(N1, Parts)) 2962 return SDValue(); 2963 if (!isBSwapHWordElement(N01, Parts)) 2964 return SDValue(); 2965 if (N00.getOpcode() != ISD::OR) 2966 return SDValue(); 2967 SDValue N000 = N00.getOperand(0); 2968 if (!isBSwapHWordElement(N000, Parts)) 2969 return SDValue(); 2970 SDValue N001 = N00.getOperand(1); 2971 if (!isBSwapHWordElement(N001, Parts)) 2972 return SDValue(); 2973 } 2974 2975 // Make sure the parts are all coming from the same node. 2976 if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3]) 2977 return SDValue(); 2978 2979 SDValue BSwap = DAG.getNode(ISD::BSWAP, N->getDebugLoc(), VT, 2980 SDValue(Parts[0],0)); 2981 2982 // Result of the bswap should be rotated by 16. If it's not legal, than 2983 // do (x << 16) | (x >> 16). 2984 SDValue ShAmt = DAG.getConstant(16, getShiftAmountTy(VT)); 2985 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT)) 2986 return DAG.getNode(ISD::ROTL, N->getDebugLoc(), VT, BSwap, ShAmt); 2987 else if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT)) 2988 return DAG.getNode(ISD::ROTR, N->getDebugLoc(), VT, BSwap, ShAmt); 2989 return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, 2990 DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, BSwap, ShAmt), 2991 DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, BSwap, ShAmt)); 2992} 2993 2994SDValue DAGCombiner::visitOR(SDNode *N) { 2995 SDValue N0 = N->getOperand(0); 2996 SDValue N1 = N->getOperand(1); 2997 SDValue LL, LR, RL, RR, CC0, CC1; 2998 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 2999 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3000 EVT VT = N1.getValueType(); 3001 3002 // fold vector ops 3003 if (VT.isVector()) { 3004 SDValue FoldedVOp = SimplifyVBinOp(N); 3005 if (FoldedVOp.getNode()) return FoldedVOp; 3006 } 3007 3008 // fold (or x, undef) -> -1 3009 if (!LegalOperations && 3010 (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)) { 3011 EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT; 3012 return DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT); 3013 } 3014 // fold (or c1, c2) -> c1|c2 3015 if (N0C && N1C) 3016 return DAG.FoldConstantArithmetic(ISD::OR, VT, N0C, N1C); 3017 // canonicalize constant to RHS 3018 if (N0C && !N1C) 3019 return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N1, N0); 3020 // fold (or x, 0) -> x 3021 if (N1C && N1C->isNullValue()) 3022 return N0; 3023 // fold (or x, -1) -> -1 3024 if (N1C && N1C->isAllOnesValue()) 3025 return N1; 3026 // fold (or x, c) -> c iff (x & ~c) == 0 3027 if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue())) 3028 return N1; 3029 3030 // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16) 3031 SDValue BSwap = MatchBSwapHWord(N, N0, N1); 3032 if (BSwap.getNode() != 0) 3033 return BSwap; 3034 BSwap = MatchBSwapHWordLow(N, N0, N1); 3035 if (BSwap.getNode() != 0) 3036 return BSwap; 3037 3038 // reassociate or 3039 SDValue ROR = ReassociateOps(ISD::OR, N->getDebugLoc(), N0, N1); 3040 if (ROR.getNode() != 0) 3041 return ROR; 3042 // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2) 3043 // iff (c1 & c2) == 0. 3044 if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && 3045 isa<ConstantSDNode>(N0.getOperand(1))) { 3046 ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1)); 3047 if ((C1->getAPIntValue() & N1C->getAPIntValue()) != 0) 3048 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 3049 DAG.getNode(ISD::OR, N0.getDebugLoc(), VT, 3050 N0.getOperand(0), N1), 3051 DAG.FoldConstantArithmetic(ISD::OR, VT, N1C, C1)); 3052 } 3053 // fold (or (setcc x), (setcc y)) -> (setcc (or x, y)) 3054 if (isSetCCEquivalent(N0, LL, LR, CC0) && isSetCCEquivalent(N1, RL, RR, CC1)){ 3055 ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get(); 3056 ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get(); 3057 3058 if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 && 3059 LL.getValueType().isInteger()) { 3060 // fold (or (setne X, 0), (setne Y, 0)) -> (setne (or X, Y), 0) 3061 // fold (or (setlt X, 0), (setlt Y, 0)) -> (setne (or X, Y), 0) 3062 if (cast<ConstantSDNode>(LR)->isNullValue() && 3063 (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) { 3064 SDValue ORNode = DAG.getNode(ISD::OR, LR.getDebugLoc(), 3065 LR.getValueType(), LL, RL); 3066 AddToWorkList(ORNode.getNode()); 3067 return DAG.getSetCC(N->getDebugLoc(), VT, ORNode, LR, Op1); 3068 } 3069 // fold (or (setne X, -1), (setne Y, -1)) -> (setne (and X, Y), -1) 3070 // fold (or (setgt X, -1), (setgt Y -1)) -> (setgt (and X, Y), -1) 3071 if (cast<ConstantSDNode>(LR)->isAllOnesValue() && 3072 (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) { 3073 SDValue ANDNode = DAG.getNode(ISD::AND, LR.getDebugLoc(), 3074 LR.getValueType(), LL, RL); 3075 AddToWorkList(ANDNode.getNode()); 3076 return DAG.getSetCC(N->getDebugLoc(), VT, ANDNode, LR, Op1); 3077 } 3078 } 3079 // canonicalize equivalent to ll == rl 3080 if (LL == RR && LR == RL) { 3081 Op1 = ISD::getSetCCSwappedOperands(Op1); 3082 std::swap(RL, RR); 3083 } 3084 if (LL == RL && LR == RR) { 3085 bool isInteger = LL.getValueType().isInteger(); 3086 ISD::CondCode Result = ISD::getSetCCOrOperation(Op0, Op1, isInteger); 3087 if (Result != ISD::SETCC_INVALID && 3088 (!LegalOperations || TLI.isCondCodeLegal(Result, LL.getValueType()))) 3089 return DAG.getSetCC(N->getDebugLoc(), N0.getValueType(), 3090 LL, LR, Result); 3091 } 3092 } 3093 3094 // Simplify: (or (op x...), (op y...)) -> (op (or x, y)) 3095 if (N0.getOpcode() == N1.getOpcode()) { 3096 SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N); 3097 if (Tmp.getNode()) return Tmp; 3098 } 3099 3100 // (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible. 3101 if (N0.getOpcode() == ISD::AND && 3102 N1.getOpcode() == ISD::AND && 3103 N0.getOperand(1).getOpcode() == ISD::Constant && 3104 N1.getOperand(1).getOpcode() == ISD::Constant && 3105 // Don't increase # computations. 3106 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) { 3107 // We can only do this xform if we know that bits from X that are set in C2 3108 // but not in C1 are already zero. Likewise for Y. 3109 const APInt &LHSMask = 3110 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 3111 const APInt &RHSMask = 3112 cast<ConstantSDNode>(N1.getOperand(1))->getAPIntValue(); 3113 3114 if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) && 3115 DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) { 3116 SDValue X = DAG.getNode(ISD::OR, N0.getDebugLoc(), VT, 3117 N0.getOperand(0), N1.getOperand(0)); 3118 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, X, 3119 DAG.getConstant(LHSMask | RHSMask, VT)); 3120 } 3121 } 3122 3123 // See if this is some rotate idiom. 3124 if (SDNode *Rot = MatchRotate(N0, N1, N->getDebugLoc())) 3125 return SDValue(Rot, 0); 3126 3127 // Simplify the operands using demanded-bits information. 3128 if (!VT.isVector() && 3129 SimplifyDemandedBits(SDValue(N, 0))) 3130 return SDValue(N, 0); 3131 3132 return SDValue(); 3133} 3134 3135/// MatchRotateHalf - Match "(X shl/srl V1) & V2" where V2 may not be present. 3136static bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) { 3137 if (Op.getOpcode() == ISD::AND) { 3138 if (isa<ConstantSDNode>(Op.getOperand(1))) { 3139 Mask = Op.getOperand(1); 3140 Op = Op.getOperand(0); 3141 } else { 3142 return false; 3143 } 3144 } 3145 3146 if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) { 3147 Shift = Op; 3148 return true; 3149 } 3150 3151 return false; 3152} 3153 3154// MatchRotate - Handle an 'or' of two operands. If this is one of the many 3155// idioms for rotate, and if the target supports rotation instructions, generate 3156// a rot[lr]. 3157SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL) { 3158 // Must be a legal type. Expanded 'n promoted things won't work with rotates. 3159 EVT VT = LHS.getValueType(); 3160 if (!TLI.isTypeLegal(VT)) return 0; 3161 3162 // The target must have at least one rotate flavor. 3163 bool HasROTL = TLI.isOperationLegalOrCustom(ISD::ROTL, VT); 3164 bool HasROTR = TLI.isOperationLegalOrCustom(ISD::ROTR, VT); 3165 if (!HasROTL && !HasROTR) return 0; 3166 3167 // Match "(X shl/srl V1) & V2" where V2 may not be present. 3168 SDValue LHSShift; // The shift. 3169 SDValue LHSMask; // AND value if any. 3170 if (!MatchRotateHalf(LHS, LHSShift, LHSMask)) 3171 return 0; // Not part of a rotate. 3172 3173 SDValue RHSShift; // The shift. 3174 SDValue RHSMask; // AND value if any. 3175 if (!MatchRotateHalf(RHS, RHSShift, RHSMask)) 3176 return 0; // Not part of a rotate. 3177 3178 if (LHSShift.getOperand(0) != RHSShift.getOperand(0)) 3179 return 0; // Not shifting the same value. 3180 3181 if (LHSShift.getOpcode() == RHSShift.getOpcode()) 3182 return 0; // Shifts must disagree. 3183 3184 // Canonicalize shl to left side in a shl/srl pair. 3185 if (RHSShift.getOpcode() == ISD::SHL) { 3186 std::swap(LHS, RHS); 3187 std::swap(LHSShift, RHSShift); 3188 std::swap(LHSMask , RHSMask ); 3189 } 3190 3191 unsigned OpSizeInBits = VT.getSizeInBits(); 3192 SDValue LHSShiftArg = LHSShift.getOperand(0); 3193 SDValue LHSShiftAmt = LHSShift.getOperand(1); 3194 SDValue RHSShiftAmt = RHSShift.getOperand(1); 3195 3196 // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1) 3197 // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2) 3198 if (LHSShiftAmt.getOpcode() == ISD::Constant && 3199 RHSShiftAmt.getOpcode() == ISD::Constant) { 3200 uint64_t LShVal = cast<ConstantSDNode>(LHSShiftAmt)->getZExtValue(); 3201 uint64_t RShVal = cast<ConstantSDNode>(RHSShiftAmt)->getZExtValue(); 3202 if ((LShVal + RShVal) != OpSizeInBits) 3203 return 0; 3204 3205 SDValue Rot; 3206 if (HasROTL) 3207 Rot = DAG.getNode(ISD::ROTL, DL, VT, LHSShiftArg, LHSShiftAmt); 3208 else 3209 Rot = DAG.getNode(ISD::ROTR, DL, VT, LHSShiftArg, RHSShiftAmt); 3210 3211 // If there is an AND of either shifted operand, apply it to the result. 3212 if (LHSMask.getNode() || RHSMask.getNode()) { 3213 APInt Mask = APInt::getAllOnesValue(OpSizeInBits); 3214 3215 if (LHSMask.getNode()) { 3216 APInt RHSBits = APInt::getLowBitsSet(OpSizeInBits, LShVal); 3217 Mask &= cast<ConstantSDNode>(LHSMask)->getAPIntValue() | RHSBits; 3218 } 3219 if (RHSMask.getNode()) { 3220 APInt LHSBits = APInt::getHighBitsSet(OpSizeInBits, RShVal); 3221 Mask &= cast<ConstantSDNode>(RHSMask)->getAPIntValue() | LHSBits; 3222 } 3223 3224 Rot = DAG.getNode(ISD::AND, DL, VT, Rot, DAG.getConstant(Mask, VT)); 3225 } 3226 3227 return Rot.getNode(); 3228 } 3229 3230 // If there is a mask here, and we have a variable shift, we can't be sure 3231 // that we're masking out the right stuff. 3232 if (LHSMask.getNode() || RHSMask.getNode()) 3233 return 0; 3234 3235 // fold (or (shl x, y), (srl x, (sub 32, y))) -> (rotl x, y) 3236 // fold (or (shl x, y), (srl x, (sub 32, y))) -> (rotr x, (sub 32, y)) 3237 if (RHSShiftAmt.getOpcode() == ISD::SUB && 3238 LHSShiftAmt == RHSShiftAmt.getOperand(1)) { 3239 if (ConstantSDNode *SUBC = 3240 dyn_cast<ConstantSDNode>(RHSShiftAmt.getOperand(0))) { 3241 if (SUBC->getAPIntValue() == OpSizeInBits) { 3242 if (HasROTL) 3243 return DAG.getNode(ISD::ROTL, DL, VT, 3244 LHSShiftArg, LHSShiftAmt).getNode(); 3245 else 3246 return DAG.getNode(ISD::ROTR, DL, VT, 3247 LHSShiftArg, RHSShiftAmt).getNode(); 3248 } 3249 } 3250 } 3251 3252 // fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotr x, y) 3253 // fold (or (shl x, (sub 32, y)), (srl x, r)) -> (rotl x, (sub 32, y)) 3254 if (LHSShiftAmt.getOpcode() == ISD::SUB && 3255 RHSShiftAmt == LHSShiftAmt.getOperand(1)) { 3256 if (ConstantSDNode *SUBC = 3257 dyn_cast<ConstantSDNode>(LHSShiftAmt.getOperand(0))) { 3258 if (SUBC->getAPIntValue() == OpSizeInBits) { 3259 if (HasROTR) 3260 return DAG.getNode(ISD::ROTR, DL, VT, 3261 LHSShiftArg, RHSShiftAmt).getNode(); 3262 else 3263 return DAG.getNode(ISD::ROTL, DL, VT, 3264 LHSShiftArg, LHSShiftAmt).getNode(); 3265 } 3266 } 3267 } 3268 3269 // Look for sign/zext/any-extended or truncate cases: 3270 if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND 3271 || LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND 3272 || LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND 3273 || LHSShiftAmt.getOpcode() == ISD::TRUNCATE) && 3274 (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND 3275 || RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND 3276 || RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND 3277 || RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) { 3278 SDValue LExtOp0 = LHSShiftAmt.getOperand(0); 3279 SDValue RExtOp0 = RHSShiftAmt.getOperand(0); 3280 if (RExtOp0.getOpcode() == ISD::SUB && 3281 RExtOp0.getOperand(1) == LExtOp0) { 3282 // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) -> 3283 // (rotl x, y) 3284 // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) -> 3285 // (rotr x, (sub 32, y)) 3286 if (ConstantSDNode *SUBC = 3287 dyn_cast<ConstantSDNode>(RExtOp0.getOperand(0))) { 3288 if (SUBC->getAPIntValue() == OpSizeInBits) { 3289 return DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, 3290 LHSShiftArg, 3291 HasROTL ? LHSShiftAmt : RHSShiftAmt).getNode(); 3292 } 3293 } 3294 } else if (LExtOp0.getOpcode() == ISD::SUB && 3295 RExtOp0 == LExtOp0.getOperand(1)) { 3296 // fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) -> 3297 // (rotr x, y) 3298 // fold (or (shl x, (*ext (sub 32, y))), (srl x, (*ext y))) -> 3299 // (rotl x, (sub 32, y)) 3300 if (ConstantSDNode *SUBC = 3301 dyn_cast<ConstantSDNode>(LExtOp0.getOperand(0))) { 3302 if (SUBC->getAPIntValue() == OpSizeInBits) { 3303 return DAG.getNode(HasROTR ? ISD::ROTR : ISD::ROTL, DL, VT, 3304 LHSShiftArg, 3305 HasROTR ? RHSShiftAmt : LHSShiftAmt).getNode(); 3306 } 3307 } 3308 } 3309 } 3310 3311 return 0; 3312} 3313 3314SDValue DAGCombiner::visitXOR(SDNode *N) { 3315 SDValue N0 = N->getOperand(0); 3316 SDValue N1 = N->getOperand(1); 3317 SDValue LHS, RHS, CC; 3318 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 3319 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3320 EVT VT = N0.getValueType(); 3321 3322 // fold vector ops 3323 if (VT.isVector()) { 3324 SDValue FoldedVOp = SimplifyVBinOp(N); 3325 if (FoldedVOp.getNode()) return FoldedVOp; 3326 } 3327 3328 // fold (xor undef, undef) -> 0. This is a common idiom (misuse). 3329 if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF) 3330 return DAG.getConstant(0, VT); 3331 // fold (xor x, undef) -> undef 3332 if (N0.getOpcode() == ISD::UNDEF) 3333 return N0; 3334 if (N1.getOpcode() == ISD::UNDEF) 3335 return N1; 3336 // fold (xor c1, c2) -> c1^c2 3337 if (N0C && N1C) 3338 return DAG.FoldConstantArithmetic(ISD::XOR, VT, N0C, N1C); 3339 // canonicalize constant to RHS 3340 if (N0C && !N1C) 3341 return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0); 3342 // fold (xor x, 0) -> x 3343 if (N1C && N1C->isNullValue()) 3344 return N0; 3345 // reassociate xor 3346 SDValue RXOR = ReassociateOps(ISD::XOR, N->getDebugLoc(), N0, N1); 3347 if (RXOR.getNode() != 0) 3348 return RXOR; 3349 3350 // fold !(x cc y) -> (x !cc y) 3351 if (N1C && N1C->getAPIntValue() == 1 && isSetCCEquivalent(N0, LHS, RHS, CC)) { 3352 bool isInt = LHS.getValueType().isInteger(); 3353 ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 3354 isInt); 3355 3356 if (!LegalOperations || TLI.isCondCodeLegal(NotCC, LHS.getValueType())) { 3357 switch (N0.getOpcode()) { 3358 default: 3359 llvm_unreachable("Unhandled SetCC Equivalent!"); 3360 case ISD::SETCC: 3361 return DAG.getSetCC(N->getDebugLoc(), VT, LHS, RHS, NotCC); 3362 case ISD::SELECT_CC: 3363 return DAG.getSelectCC(N->getDebugLoc(), LHS, RHS, N0.getOperand(2), 3364 N0.getOperand(3), NotCC); 3365 } 3366 } 3367 } 3368 3369 // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y))) 3370 if (N1C && N1C->getAPIntValue() == 1 && N0.getOpcode() == ISD::ZERO_EXTEND && 3371 N0.getNode()->hasOneUse() && 3372 isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){ 3373 SDValue V = N0.getOperand(0); 3374 V = DAG.getNode(ISD::XOR, N0.getDebugLoc(), V.getValueType(), V, 3375 DAG.getConstant(1, V.getValueType())); 3376 AddToWorkList(V.getNode()); 3377 return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, V); 3378 } 3379 3380 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc 3381 if (N1C && N1C->getAPIntValue() == 1 && VT == MVT::i1 && 3382 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { 3383 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 3384 if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) { 3385 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; 3386 LHS = DAG.getNode(ISD::XOR, LHS.getDebugLoc(), VT, LHS, N1); // LHS = ~LHS 3387 RHS = DAG.getNode(ISD::XOR, RHS.getDebugLoc(), VT, RHS, N1); // RHS = ~RHS 3388 AddToWorkList(LHS.getNode()); AddToWorkList(RHS.getNode()); 3389 return DAG.getNode(NewOpcode, N->getDebugLoc(), VT, LHS, RHS); 3390 } 3391 } 3392 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants 3393 if (N1C && N1C->isAllOnesValue() && 3394 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { 3395 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 3396 if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) { 3397 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; 3398 LHS = DAG.getNode(ISD::XOR, LHS.getDebugLoc(), VT, LHS, N1); // LHS = ~LHS 3399 RHS = DAG.getNode(ISD::XOR, RHS.getDebugLoc(), VT, RHS, N1); // RHS = ~RHS 3400 AddToWorkList(LHS.getNode()); AddToWorkList(RHS.getNode()); 3401 return DAG.getNode(NewOpcode, N->getDebugLoc(), VT, LHS, RHS); 3402 } 3403 } 3404 // fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2)) 3405 if (N1C && N0.getOpcode() == ISD::XOR) { 3406 ConstantSDNode *N00C = dyn_cast<ConstantSDNode>(N0.getOperand(0)); 3407 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3408 if (N00C) 3409 return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N0.getOperand(1), 3410 DAG.getConstant(N1C->getAPIntValue() ^ 3411 N00C->getAPIntValue(), VT)); 3412 if (N01C) 3413 return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N0.getOperand(0), 3414 DAG.getConstant(N1C->getAPIntValue() ^ 3415 N01C->getAPIntValue(), VT)); 3416 } 3417 // fold (xor x, x) -> 0 3418 if (N0 == N1) 3419 return tryFoldToZero(N->getDebugLoc(), TLI, VT, DAG, LegalOperations); 3420 3421 // Simplify: xor (op x...), (op y...) -> (op (xor x, y)) 3422 if (N0.getOpcode() == N1.getOpcode()) { 3423 SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N); 3424 if (Tmp.getNode()) return Tmp; 3425 } 3426 3427 // Simplify the expression using non-local knowledge. 3428 if (!VT.isVector() && 3429 SimplifyDemandedBits(SDValue(N, 0))) 3430 return SDValue(N, 0); 3431 3432 return SDValue(); 3433} 3434 3435/// visitShiftByConstant - Handle transforms common to the three shifts, when 3436/// the shift amount is a constant. 3437SDValue DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) { 3438 SDNode *LHS = N->getOperand(0).getNode(); 3439 if (!LHS->hasOneUse()) return SDValue(); 3440 3441 // We want to pull some binops through shifts, so that we have (and (shift)) 3442 // instead of (shift (and)), likewise for add, or, xor, etc. This sort of 3443 // thing happens with address calculations, so it's important to canonicalize 3444 // it. 3445 bool HighBitSet = false; // Can we transform this if the high bit is set? 3446 3447 switch (LHS->getOpcode()) { 3448 default: return SDValue(); 3449 case ISD::OR: 3450 case ISD::XOR: 3451 HighBitSet = false; // We can only transform sra if the high bit is clear. 3452 break; 3453 case ISD::AND: 3454 HighBitSet = true; // We can only transform sra if the high bit is set. 3455 break; 3456 case ISD::ADD: 3457 if (N->getOpcode() != ISD::SHL) 3458 return SDValue(); // only shl(add) not sr[al](add). 3459 HighBitSet = false; // We can only transform sra if the high bit is clear. 3460 break; 3461 } 3462 3463 // We require the RHS of the binop to be a constant as well. 3464 ConstantSDNode *BinOpCst = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); 3465 if (!BinOpCst) return SDValue(); 3466 3467 // FIXME: disable this unless the input to the binop is a shift by a constant. 3468 // If it is not a shift, it pessimizes some common cases like: 3469 // 3470 // void foo(int *X, int i) { X[i & 1235] = 1; } 3471 // int bar(int *X, int i) { return X[i & 255]; } 3472 SDNode *BinOpLHSVal = LHS->getOperand(0).getNode(); 3473 if ((BinOpLHSVal->getOpcode() != ISD::SHL && 3474 BinOpLHSVal->getOpcode() != ISD::SRA && 3475 BinOpLHSVal->getOpcode() != ISD::SRL) || 3476 !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1))) 3477 return SDValue(); 3478 3479 EVT VT = N->getValueType(0); 3480 3481 // If this is a signed shift right, and the high bit is modified by the 3482 // logical operation, do not perform the transformation. The highBitSet 3483 // boolean indicates the value of the high bit of the constant which would 3484 // cause it to be modified for this operation. 3485 if (N->getOpcode() == ISD::SRA) { 3486 bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative(); 3487 if (BinOpRHSSignSet != HighBitSet) 3488 return SDValue(); 3489 } 3490 3491 // Fold the constants, shifting the binop RHS by the shift amount. 3492 SDValue NewRHS = DAG.getNode(N->getOpcode(), LHS->getOperand(1).getDebugLoc(), 3493 N->getValueType(0), 3494 LHS->getOperand(1), N->getOperand(1)); 3495 3496 // Create the new shift. 3497 SDValue NewShift = DAG.getNode(N->getOpcode(), 3498 LHS->getOperand(0).getDebugLoc(), 3499 VT, LHS->getOperand(0), N->getOperand(1)); 3500 3501 // Create the new binop. 3502 return DAG.getNode(LHS->getOpcode(), N->getDebugLoc(), VT, NewShift, NewRHS); 3503} 3504 3505SDValue DAGCombiner::visitSHL(SDNode *N) { 3506 SDValue N0 = N->getOperand(0); 3507 SDValue N1 = N->getOperand(1); 3508 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 3509 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3510 EVT VT = N0.getValueType(); 3511 unsigned OpSizeInBits = VT.getScalarType().getSizeInBits(); 3512 3513 // fold (shl c1, c2) -> c1<<c2 3514 if (N0C && N1C) 3515 return DAG.FoldConstantArithmetic(ISD::SHL, VT, N0C, N1C); 3516 // fold (shl 0, x) -> 0 3517 if (N0C && N0C->isNullValue()) 3518 return N0; 3519 // fold (shl x, c >= size(x)) -> undef 3520 if (N1C && N1C->getZExtValue() >= OpSizeInBits) 3521 return DAG.getUNDEF(VT); 3522 // fold (shl x, 0) -> x 3523 if (N1C && N1C->isNullValue()) 3524 return N0; 3525 // fold (shl undef, x) -> 0 3526 if (N0.getOpcode() == ISD::UNDEF) 3527 return DAG.getConstant(0, VT); 3528 // if (shl x, c) is known to be zero, return 0 3529 if (DAG.MaskedValueIsZero(SDValue(N, 0), 3530 APInt::getAllOnesValue(OpSizeInBits))) 3531 return DAG.getConstant(0, VT); 3532 // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))). 3533 if (N1.getOpcode() == ISD::TRUNCATE && 3534 N1.getOperand(0).getOpcode() == ISD::AND && 3535 N1.hasOneUse() && N1.getOperand(0).hasOneUse()) { 3536 SDValue N101 = N1.getOperand(0).getOperand(1); 3537 if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) { 3538 EVT TruncVT = N1.getValueType(); 3539 SDValue N100 = N1.getOperand(0).getOperand(0); 3540 APInt TruncC = N101C->getAPIntValue(); 3541 TruncC = TruncC.trunc(TruncVT.getSizeInBits()); 3542 return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0, 3543 DAG.getNode(ISD::AND, N->getDebugLoc(), TruncVT, 3544 DAG.getNode(ISD::TRUNCATE, 3545 N->getDebugLoc(), 3546 TruncVT, N100), 3547 DAG.getConstant(TruncC, TruncVT))); 3548 } 3549 } 3550 3551 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 3552 return SDValue(N, 0); 3553 3554 // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2)) 3555 if (N1C && N0.getOpcode() == ISD::SHL && 3556 N0.getOperand(1).getOpcode() == ISD::Constant) { 3557 uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 3558 uint64_t c2 = N1C->getZExtValue(); 3559 if (c1 + c2 >= OpSizeInBits) 3560 return DAG.getConstant(0, VT); 3561 return DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0.getOperand(0), 3562 DAG.getConstant(c1 + c2, N1.getValueType())); 3563 } 3564 3565 // fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2))) 3566 // For this to be valid, the second form must not preserve any of the bits 3567 // that are shifted out by the inner shift in the first form. This means 3568 // the outer shift size must be >= the number of bits added by the ext. 3569 // As a corollary, we don't care what kind of ext it is. 3570 if (N1C && (N0.getOpcode() == ISD::ZERO_EXTEND || 3571 N0.getOpcode() == ISD::ANY_EXTEND || 3572 N0.getOpcode() == ISD::SIGN_EXTEND) && 3573 N0.getOperand(0).getOpcode() == ISD::SHL && 3574 isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) { 3575 uint64_t c1 = 3576 cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue(); 3577 uint64_t c2 = N1C->getZExtValue(); 3578 EVT InnerShiftVT = N0.getOperand(0).getValueType(); 3579 uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits(); 3580 if (c2 >= OpSizeInBits - InnerShiftSize) { 3581 if (c1 + c2 >= OpSizeInBits) 3582 return DAG.getConstant(0, VT); 3583 return DAG.getNode(ISD::SHL, N0->getDebugLoc(), VT, 3584 DAG.getNode(N0.getOpcode(), N0->getDebugLoc(), VT, 3585 N0.getOperand(0)->getOperand(0)), 3586 DAG.getConstant(c1 + c2, N1.getValueType())); 3587 } 3588 } 3589 3590 // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or 3591 // (and (srl x, (sub c1, c2), MASK) 3592 // Only fold this if the inner shift has no other uses -- if it does, folding 3593 // this will increase the total number of instructions. 3594 if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse() && 3595 N0.getOperand(1).getOpcode() == ISD::Constant) { 3596 uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 3597 if (c1 < VT.getSizeInBits()) { 3598 uint64_t c2 = N1C->getZExtValue(); 3599 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 3600 VT.getSizeInBits() - c1); 3601 SDValue Shift; 3602 if (c2 > c1) { 3603 Mask = Mask.shl(c2-c1); 3604 Shift = DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, N0.getOperand(0), 3605 DAG.getConstant(c2-c1, N1.getValueType())); 3606 } else { 3607 Mask = Mask.lshr(c1-c2); 3608 Shift = DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0), 3609 DAG.getConstant(c1-c2, N1.getValueType())); 3610 } 3611 return DAG.getNode(ISD::AND, N0.getDebugLoc(), VT, Shift, 3612 DAG.getConstant(Mask, VT)); 3613 } 3614 } 3615 // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1)) 3616 if (N1C && N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1)) { 3617 SDValue HiBitsMask = 3618 DAG.getConstant(APInt::getHighBitsSet(VT.getSizeInBits(), 3619 VT.getSizeInBits() - 3620 N1C->getZExtValue()), 3621 VT); 3622 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0), 3623 HiBitsMask); 3624 } 3625 3626 if (N1C) { 3627 SDValue NewSHL = visitShiftByConstant(N, N1C->getZExtValue()); 3628 if (NewSHL.getNode()) 3629 return NewSHL; 3630 } 3631 3632 return SDValue(); 3633} 3634 3635SDValue DAGCombiner::visitSRA(SDNode *N) { 3636 SDValue N0 = N->getOperand(0); 3637 SDValue N1 = N->getOperand(1); 3638 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 3639 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3640 EVT VT = N0.getValueType(); 3641 unsigned OpSizeInBits = VT.getScalarType().getSizeInBits(); 3642 3643 // fold (sra c1, c2) -> (sra c1, c2) 3644 if (N0C && N1C) 3645 return DAG.FoldConstantArithmetic(ISD::SRA, VT, N0C, N1C); 3646 // fold (sra 0, x) -> 0 3647 if (N0C && N0C->isNullValue()) 3648 return N0; 3649 // fold (sra -1, x) -> -1 3650 if (N0C && N0C->isAllOnesValue()) 3651 return N0; 3652 // fold (sra x, (setge c, size(x))) -> undef 3653 if (N1C && N1C->getZExtValue() >= OpSizeInBits) 3654 return DAG.getUNDEF(VT); 3655 // fold (sra x, 0) -> x 3656 if (N1C && N1C->isNullValue()) 3657 return N0; 3658 // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports 3659 // sext_inreg. 3660 if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) { 3661 unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue(); 3662 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits); 3663 if (VT.isVector()) 3664 ExtVT = EVT::getVectorVT(*DAG.getContext(), 3665 ExtVT, VT.getVectorNumElements()); 3666 if ((!LegalOperations || 3667 TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT))) 3668 return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, 3669 N0.getOperand(0), DAG.getValueType(ExtVT)); 3670 } 3671 3672 // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) 3673 if (N1C && N0.getOpcode() == ISD::SRA) { 3674 if (ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3675 unsigned Sum = N1C->getZExtValue() + C1->getZExtValue(); 3676 if (Sum >= OpSizeInBits) Sum = OpSizeInBits-1; 3677 return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0.getOperand(0), 3678 DAG.getConstant(Sum, N1C->getValueType(0))); 3679 } 3680 } 3681 3682 // fold (sra (shl X, m), (sub result_size, n)) 3683 // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for 3684 // result_size - n != m. 3685 // If truncate is free for the target sext(shl) is likely to result in better 3686 // code. 3687 if (N0.getOpcode() == ISD::SHL) { 3688 // Get the two constanst of the shifts, CN0 = m, CN = n. 3689 const ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3690 if (N01C && N1C) { 3691 // Determine what the truncate's result bitsize and type would be. 3692 EVT TruncVT = 3693 EVT::getIntegerVT(*DAG.getContext(), 3694 OpSizeInBits - N1C->getZExtValue()); 3695 // Determine the residual right-shift amount. 3696 signed ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue(); 3697 3698 // If the shift is not a no-op (in which case this should be just a sign 3699 // extend already), the truncated to type is legal, sign_extend is legal 3700 // on that type, and the truncate to that type is both legal and free, 3701 // perform the transform. 3702 if ((ShiftAmt > 0) && 3703 TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) && 3704 TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) && 3705 TLI.isTruncateFree(VT, TruncVT)) { 3706 3707 SDValue Amt = DAG.getConstant(ShiftAmt, 3708 getShiftAmountTy(N0.getOperand(0).getValueType())); 3709 SDValue Shift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), VT, 3710 N0.getOperand(0), Amt); 3711 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), TruncVT, 3712 Shift); 3713 return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), 3714 N->getValueType(0), Trunc); 3715 } 3716 } 3717 } 3718 3719 // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))). 3720 if (N1.getOpcode() == ISD::TRUNCATE && 3721 N1.getOperand(0).getOpcode() == ISD::AND && 3722 N1.hasOneUse() && N1.getOperand(0).hasOneUse()) { 3723 SDValue N101 = N1.getOperand(0).getOperand(1); 3724 if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) { 3725 EVT TruncVT = N1.getValueType(); 3726 SDValue N100 = N1.getOperand(0).getOperand(0); 3727 APInt TruncC = N101C->getAPIntValue(); 3728 TruncC = TruncC.trunc(TruncVT.getScalarType().getSizeInBits()); 3729 return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, N0, 3730 DAG.getNode(ISD::AND, N->getDebugLoc(), 3731 TruncVT, 3732 DAG.getNode(ISD::TRUNCATE, 3733 N->getDebugLoc(), 3734 TruncVT, N100), 3735 DAG.getConstant(TruncC, TruncVT))); 3736 } 3737 } 3738 3739 // fold (sra (trunc (sr x, c1)), c2) -> (trunc (sra x, c1+c2)) 3740 // if c1 is equal to the number of bits the trunc removes 3741 if (N0.getOpcode() == ISD::TRUNCATE && 3742 (N0.getOperand(0).getOpcode() == ISD::SRL || 3743 N0.getOperand(0).getOpcode() == ISD::SRA) && 3744 N0.getOperand(0).hasOneUse() && 3745 N0.getOperand(0).getOperand(1).hasOneUse() && 3746 N1C && isa<ConstantSDNode>(N0.getOperand(0).getOperand(1))) { 3747 EVT LargeVT = N0.getOperand(0).getValueType(); 3748 ConstantSDNode *LargeShiftAmt = 3749 cast<ConstantSDNode>(N0.getOperand(0).getOperand(1)); 3750 3751 if (LargeVT.getScalarType().getSizeInBits() - OpSizeInBits == 3752 LargeShiftAmt->getZExtValue()) { 3753 SDValue Amt = 3754 DAG.getConstant(LargeShiftAmt->getZExtValue() + N1C->getZExtValue(), 3755 getShiftAmountTy(N0.getOperand(0).getOperand(0).getValueType())); 3756 SDValue SRA = DAG.getNode(ISD::SRA, N->getDebugLoc(), LargeVT, 3757 N0.getOperand(0).getOperand(0), Amt); 3758 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, SRA); 3759 } 3760 } 3761 3762 // Simplify, based on bits shifted out of the LHS. 3763 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 3764 return SDValue(N, 0); 3765 3766 3767 // If the sign bit is known to be zero, switch this to a SRL. 3768 if (DAG.SignBitIsZero(N0)) 3769 return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, N1); 3770 3771 if (N1C) { 3772 SDValue NewSRA = visitShiftByConstant(N, N1C->getZExtValue()); 3773 if (NewSRA.getNode()) 3774 return NewSRA; 3775 } 3776 3777 return SDValue(); 3778} 3779 3780SDValue DAGCombiner::visitSRL(SDNode *N) { 3781 SDValue N0 = N->getOperand(0); 3782 SDValue N1 = N->getOperand(1); 3783 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 3784 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 3785 EVT VT = N0.getValueType(); 3786 unsigned OpSizeInBits = VT.getScalarType().getSizeInBits(); 3787 3788 // fold (srl c1, c2) -> c1 >>u c2 3789 if (N0C && N1C) 3790 return DAG.FoldConstantArithmetic(ISD::SRL, VT, N0C, N1C); 3791 // fold (srl 0, x) -> 0 3792 if (N0C && N0C->isNullValue()) 3793 return N0; 3794 // fold (srl x, c >= size(x)) -> undef 3795 if (N1C && N1C->getZExtValue() >= OpSizeInBits) 3796 return DAG.getUNDEF(VT); 3797 // fold (srl x, 0) -> x 3798 if (N1C && N1C->isNullValue()) 3799 return N0; 3800 // if (srl x, c) is known to be zero, return 0 3801 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), 3802 APInt::getAllOnesValue(OpSizeInBits))) 3803 return DAG.getConstant(0, VT); 3804 3805 // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2)) 3806 if (N1C && N0.getOpcode() == ISD::SRL && 3807 N0.getOperand(1).getOpcode() == ISD::Constant) { 3808 uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 3809 uint64_t c2 = N1C->getZExtValue(); 3810 if (c1 + c2 >= OpSizeInBits) 3811 return DAG.getConstant(0, VT); 3812 return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0), 3813 DAG.getConstant(c1 + c2, N1.getValueType())); 3814 } 3815 3816 // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2))) 3817 if (N1C && N0.getOpcode() == ISD::TRUNCATE && 3818 N0.getOperand(0).getOpcode() == ISD::SRL && 3819 isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) { 3820 uint64_t c1 = 3821 cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue(); 3822 uint64_t c2 = N1C->getZExtValue(); 3823 EVT InnerShiftVT = N0.getOperand(0).getValueType(); 3824 EVT ShiftCountVT = N0.getOperand(0)->getOperand(1).getValueType(); 3825 uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits(); 3826 // This is only valid if the OpSizeInBits + c1 = size of inner shift. 3827 if (c1 + OpSizeInBits == InnerShiftSize) { 3828 if (c1 + c2 >= InnerShiftSize) 3829 return DAG.getConstant(0, VT); 3830 return DAG.getNode(ISD::TRUNCATE, N0->getDebugLoc(), VT, 3831 DAG.getNode(ISD::SRL, N0->getDebugLoc(), InnerShiftVT, 3832 N0.getOperand(0)->getOperand(0), 3833 DAG.getConstant(c1 + c2, ShiftCountVT))); 3834 } 3835 } 3836 3837 // fold (srl (shl x, c), c) -> (and x, cst2) 3838 if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 && 3839 N0.getValueSizeInBits() <= 64) { 3840 uint64_t ShAmt = N1C->getZExtValue()+64-N0.getValueSizeInBits(); 3841 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0), 3842 DAG.getConstant(~0ULL >> ShAmt, VT)); 3843 } 3844 3845 3846 // fold (srl (anyextend x), c) -> (anyextend (srl x, c)) 3847 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 3848 // Shifting in all undef bits? 3849 EVT SmallVT = N0.getOperand(0).getValueType(); 3850 if (N1C->getZExtValue() >= SmallVT.getSizeInBits()) 3851 return DAG.getUNDEF(VT); 3852 3853 if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) { 3854 uint64_t ShiftAmt = N1C->getZExtValue(); 3855 SDValue SmallShift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), SmallVT, 3856 N0.getOperand(0), 3857 DAG.getConstant(ShiftAmt, getShiftAmountTy(SmallVT))); 3858 AddToWorkList(SmallShift.getNode()); 3859 return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, SmallShift); 3860 } 3861 } 3862 3863 // fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign 3864 // bit, which is unmodified by sra. 3865 if (N1C && N1C->getZExtValue() + 1 == VT.getSizeInBits()) { 3866 if (N0.getOpcode() == ISD::SRA) 3867 return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0), N1); 3868 } 3869 3870 // fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit). 3871 if (N1C && N0.getOpcode() == ISD::CTLZ && 3872 N1C->getAPIntValue() == Log2_32(VT.getSizeInBits())) { 3873 APInt KnownZero, KnownOne; 3874 DAG.ComputeMaskedBits(N0.getOperand(0), KnownZero, KnownOne); 3875 3876 // If any of the input bits are KnownOne, then the input couldn't be all 3877 // zeros, thus the result of the srl will always be zero. 3878 if (KnownOne.getBoolValue()) return DAG.getConstant(0, VT); 3879 3880 // If all of the bits input the to ctlz node are known to be zero, then 3881 // the result of the ctlz is "32" and the result of the shift is one. 3882 APInt UnknownBits = ~KnownZero; 3883 if (UnknownBits == 0) return DAG.getConstant(1, VT); 3884 3885 // Otherwise, check to see if there is exactly one bit input to the ctlz. 3886 if ((UnknownBits & (UnknownBits - 1)) == 0) { 3887 // Okay, we know that only that the single bit specified by UnknownBits 3888 // could be set on input to the CTLZ node. If this bit is set, the SRL 3889 // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair 3890 // to an SRL/XOR pair, which is likely to simplify more. 3891 unsigned ShAmt = UnknownBits.countTrailingZeros(); 3892 SDValue Op = N0.getOperand(0); 3893 3894 if (ShAmt) { 3895 Op = DAG.getNode(ISD::SRL, N0.getDebugLoc(), VT, Op, 3896 DAG.getConstant(ShAmt, getShiftAmountTy(Op.getValueType()))); 3897 AddToWorkList(Op.getNode()); 3898 } 3899 3900 return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, 3901 Op, DAG.getConstant(1, VT)); 3902 } 3903 } 3904 3905 // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))). 3906 if (N1.getOpcode() == ISD::TRUNCATE && 3907 N1.getOperand(0).getOpcode() == ISD::AND && 3908 N1.hasOneUse() && N1.getOperand(0).hasOneUse()) { 3909 SDValue N101 = N1.getOperand(0).getOperand(1); 3910 if (ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N101)) { 3911 EVT TruncVT = N1.getValueType(); 3912 SDValue N100 = N1.getOperand(0).getOperand(0); 3913 APInt TruncC = N101C->getAPIntValue(); 3914 TruncC = TruncC.trunc(TruncVT.getSizeInBits()); 3915 return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, 3916 DAG.getNode(ISD::AND, N->getDebugLoc(), 3917 TruncVT, 3918 DAG.getNode(ISD::TRUNCATE, 3919 N->getDebugLoc(), 3920 TruncVT, N100), 3921 DAG.getConstant(TruncC, TruncVT))); 3922 } 3923 } 3924 3925 // fold operands of srl based on knowledge that the low bits are not 3926 // demanded. 3927 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 3928 return SDValue(N, 0); 3929 3930 if (N1C) { 3931 SDValue NewSRL = visitShiftByConstant(N, N1C->getZExtValue()); 3932 if (NewSRL.getNode()) 3933 return NewSRL; 3934 } 3935 3936 // Attempt to convert a srl of a load into a narrower zero-extending load. 3937 SDValue NarrowLoad = ReduceLoadWidth(N); 3938 if (NarrowLoad.getNode()) 3939 return NarrowLoad; 3940 3941 // Here is a common situation. We want to optimize: 3942 // 3943 // %a = ... 3944 // %b = and i32 %a, 2 3945 // %c = srl i32 %b, 1 3946 // brcond i32 %c ... 3947 // 3948 // into 3949 // 3950 // %a = ... 3951 // %b = and %a, 2 3952 // %c = setcc eq %b, 0 3953 // brcond %c ... 3954 // 3955 // However when after the source operand of SRL is optimized into AND, the SRL 3956 // itself may not be optimized further. Look for it and add the BRCOND into 3957 // the worklist. 3958 if (N->hasOneUse()) { 3959 SDNode *Use = *N->use_begin(); 3960 if (Use->getOpcode() == ISD::BRCOND) 3961 AddToWorkList(Use); 3962 else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) { 3963 // Also look pass the truncate. 3964 Use = *Use->use_begin(); 3965 if (Use->getOpcode() == ISD::BRCOND) 3966 AddToWorkList(Use); 3967 } 3968 } 3969 3970 return SDValue(); 3971} 3972 3973SDValue DAGCombiner::visitCTLZ(SDNode *N) { 3974 SDValue N0 = N->getOperand(0); 3975 EVT VT = N->getValueType(0); 3976 3977 // fold (ctlz c1) -> c2 3978 if (isa<ConstantSDNode>(N0)) 3979 return DAG.getNode(ISD::CTLZ, N->getDebugLoc(), VT, N0); 3980 return SDValue(); 3981} 3982 3983SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) { 3984 SDValue N0 = N->getOperand(0); 3985 EVT VT = N->getValueType(0); 3986 3987 // fold (ctlz_zero_undef c1) -> c2 3988 if (isa<ConstantSDNode>(N0)) 3989 return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, N->getDebugLoc(), VT, N0); 3990 return SDValue(); 3991} 3992 3993SDValue DAGCombiner::visitCTTZ(SDNode *N) { 3994 SDValue N0 = N->getOperand(0); 3995 EVT VT = N->getValueType(0); 3996 3997 // fold (cttz c1) -> c2 3998 if (isa<ConstantSDNode>(N0)) 3999 return DAG.getNode(ISD::CTTZ, N->getDebugLoc(), VT, N0); 4000 return SDValue(); 4001} 4002 4003SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) { 4004 SDValue N0 = N->getOperand(0); 4005 EVT VT = N->getValueType(0); 4006 4007 // fold (cttz_zero_undef c1) -> c2 4008 if (isa<ConstantSDNode>(N0)) 4009 return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, N->getDebugLoc(), VT, N0); 4010 return SDValue(); 4011} 4012 4013SDValue DAGCombiner::visitCTPOP(SDNode *N) { 4014 SDValue N0 = N->getOperand(0); 4015 EVT VT = N->getValueType(0); 4016 4017 // fold (ctpop c1) -> c2 4018 if (isa<ConstantSDNode>(N0)) 4019 return DAG.getNode(ISD::CTPOP, N->getDebugLoc(), VT, N0); 4020 return SDValue(); 4021} 4022 4023SDValue DAGCombiner::visitSELECT(SDNode *N) { 4024 SDValue N0 = N->getOperand(0); 4025 SDValue N1 = N->getOperand(1); 4026 SDValue N2 = N->getOperand(2); 4027 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 4028 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4029 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); 4030 EVT VT = N->getValueType(0); 4031 EVT VT0 = N0.getValueType(); 4032 4033 // fold (select C, X, X) -> X 4034 if (N1 == N2) 4035 return N1; 4036 // fold (select true, X, Y) -> X 4037 if (N0C && !N0C->isNullValue()) 4038 return N1; 4039 // fold (select false, X, Y) -> Y 4040 if (N0C && N0C->isNullValue()) 4041 return N2; 4042 // fold (select C, 1, X) -> (or C, X) 4043 if (VT == MVT::i1 && N1C && N1C->getAPIntValue() == 1) 4044 return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N2); 4045 // fold (select C, 0, 1) -> (xor C, 1) 4046 if (VT.isInteger() && 4047 (VT0 == MVT::i1 || 4048 (VT0.isInteger() && 4049 TLI.getBooleanContents(false) == TargetLowering::ZeroOrOneBooleanContent)) && 4050 N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) { 4051 SDValue XORNode; 4052 if (VT == VT0) 4053 return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT0, 4054 N0, DAG.getConstant(1, VT0)); 4055 XORNode = DAG.getNode(ISD::XOR, N0.getDebugLoc(), VT0, 4056 N0, DAG.getConstant(1, VT0)); 4057 AddToWorkList(XORNode.getNode()); 4058 if (VT.bitsGT(VT0)) 4059 return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, XORNode); 4060 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, XORNode); 4061 } 4062 // fold (select C, 0, X) -> (and (not C), X) 4063 if (VT == VT0 && VT == MVT::i1 && N1C && N1C->isNullValue()) { 4064 SDValue NOTNode = DAG.getNOT(N0.getDebugLoc(), N0, VT); 4065 AddToWorkList(NOTNode.getNode()); 4066 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, NOTNode, N2); 4067 } 4068 // fold (select C, X, 1) -> (or (not C), X) 4069 if (VT == VT0 && VT == MVT::i1 && N2C && N2C->getAPIntValue() == 1) { 4070 SDValue NOTNode = DAG.getNOT(N0.getDebugLoc(), N0, VT); 4071 AddToWorkList(NOTNode.getNode()); 4072 return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, NOTNode, N1); 4073 } 4074 // fold (select C, X, 0) -> (and C, X) 4075 if (VT == MVT::i1 && N2C && N2C->isNullValue()) 4076 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, N1); 4077 // fold (select X, X, Y) -> (or X, Y) 4078 // fold (select X, 1, Y) -> (or X, Y) 4079 if (VT == MVT::i1 && (N0 == N1 || (N1C && N1C->getAPIntValue() == 1))) 4080 return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N2); 4081 // fold (select X, Y, X) -> (and X, Y) 4082 // fold (select X, Y, 0) -> (and X, Y) 4083 if (VT == MVT::i1 && (N0 == N2 || (N2C && N2C->getAPIntValue() == 0))) 4084 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, N1); 4085 4086 // If we can fold this based on the true/false value, do so. 4087 if (SimplifySelectOps(N, N1, N2)) 4088 return SDValue(N, 0); // Don't revisit N. 4089 4090 // fold selects based on a setcc into other things, such as min/max/abs 4091 if (N0.getOpcode() == ISD::SETCC) { 4092 // FIXME: 4093 // Check against MVT::Other for SELECT_CC, which is a workaround for targets 4094 // having to say they don't support SELECT_CC on every type the DAG knows 4095 // about, since there is no way to mark an opcode illegal at all value types 4096 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other) && 4097 TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) 4098 return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT, 4099 N0.getOperand(0), N0.getOperand(1), 4100 N1, N2, N0.getOperand(2)); 4101 return SimplifySelect(N->getDebugLoc(), N0, N1, N2); 4102 } 4103 4104 return SDValue(); 4105} 4106 4107SDValue DAGCombiner::visitSELECT_CC(SDNode *N) { 4108 SDValue N0 = N->getOperand(0); 4109 SDValue N1 = N->getOperand(1); 4110 SDValue N2 = N->getOperand(2); 4111 SDValue N3 = N->getOperand(3); 4112 SDValue N4 = N->getOperand(4); 4113 ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get(); 4114 4115 // fold select_cc lhs, rhs, x, x, cc -> x 4116 if (N2 == N3) 4117 return N2; 4118 4119 // Determine if the condition we're dealing with is constant 4120 SDValue SCC = SimplifySetCC(TLI.getSetCCResultType(N0.getValueType()), 4121 N0, N1, CC, N->getDebugLoc(), false); 4122 if (SCC.getNode()) AddToWorkList(SCC.getNode()); 4123 4124 if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode())) { 4125 if (!SCCC->isNullValue()) 4126 return N2; // cond always true -> true val 4127 else 4128 return N3; // cond always false -> false val 4129 } 4130 4131 // Fold to a simpler select_cc 4132 if (SCC.getNode() && SCC.getOpcode() == ISD::SETCC) 4133 return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), N2.getValueType(), 4134 SCC.getOperand(0), SCC.getOperand(1), N2, N3, 4135 SCC.getOperand(2)); 4136 4137 // If we can fold this based on the true/false value, do so. 4138 if (SimplifySelectOps(N, N2, N3)) 4139 return SDValue(N, 0); // Don't revisit N. 4140 4141 // fold select_cc into other things, such as min/max/abs 4142 return SimplifySelectCC(N->getDebugLoc(), N0, N1, N2, N3, CC); 4143} 4144 4145SDValue DAGCombiner::visitSETCC(SDNode *N) { 4146 return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1), 4147 cast<CondCodeSDNode>(N->getOperand(2))->get(), 4148 N->getDebugLoc()); 4149} 4150 4151// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this: 4152// "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))" 4153// transformation. Returns true if extension are possible and the above 4154// mentioned transformation is profitable. 4155static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0, 4156 unsigned ExtOpc, 4157 SmallVector<SDNode*, 4> &ExtendNodes, 4158 const TargetLowering &TLI) { 4159 bool HasCopyToRegUses = false; 4160 bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType()); 4161 for (SDNode::use_iterator UI = N0.getNode()->use_begin(), 4162 UE = N0.getNode()->use_end(); 4163 UI != UE; ++UI) { 4164 SDNode *User = *UI; 4165 if (User == N) 4166 continue; 4167 if (UI.getUse().getResNo() != N0.getResNo()) 4168 continue; 4169 // FIXME: Only extend SETCC N, N and SETCC N, c for now. 4170 if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) { 4171 ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get(); 4172 if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC)) 4173 // Sign bits will be lost after a zext. 4174 return false; 4175 bool Add = false; 4176 for (unsigned i = 0; i != 2; ++i) { 4177 SDValue UseOp = User->getOperand(i); 4178 if (UseOp == N0) 4179 continue; 4180 if (!isa<ConstantSDNode>(UseOp)) 4181 return false; 4182 Add = true; 4183 } 4184 if (Add) 4185 ExtendNodes.push_back(User); 4186 continue; 4187 } 4188 // If truncates aren't free and there are users we can't 4189 // extend, it isn't worthwhile. 4190 if (!isTruncFree) 4191 return false; 4192 // Remember if this value is live-out. 4193 if (User->getOpcode() == ISD::CopyToReg) 4194 HasCopyToRegUses = true; 4195 } 4196 4197 if (HasCopyToRegUses) { 4198 bool BothLiveOut = false; 4199 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 4200 UI != UE; ++UI) { 4201 SDUse &Use = UI.getUse(); 4202 if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) { 4203 BothLiveOut = true; 4204 break; 4205 } 4206 } 4207 if (BothLiveOut) 4208 // Both unextended and extended values are live out. There had better be 4209 // a good reason for the transformation. 4210 return ExtendNodes.size(); 4211 } 4212 return true; 4213} 4214 4215void DAGCombiner::ExtendSetCCUses(SmallVector<SDNode*, 4> SetCCs, 4216 SDValue Trunc, SDValue ExtLoad, DebugLoc DL, 4217 ISD::NodeType ExtType) { 4218 // Extend SetCC uses if necessary. 4219 for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) { 4220 SDNode *SetCC = SetCCs[i]; 4221 SmallVector<SDValue, 4> Ops; 4222 4223 for (unsigned j = 0; j != 2; ++j) { 4224 SDValue SOp = SetCC->getOperand(j); 4225 if (SOp == Trunc) 4226 Ops.push_back(ExtLoad); 4227 else 4228 Ops.push_back(DAG.getNode(ExtType, DL, ExtLoad->getValueType(0), SOp)); 4229 } 4230 4231 Ops.push_back(SetCC->getOperand(2)); 4232 CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), 4233 &Ops[0], Ops.size())); 4234 } 4235} 4236 4237SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) { 4238 SDValue N0 = N->getOperand(0); 4239 EVT VT = N->getValueType(0); 4240 4241 // fold (sext c1) -> c1 4242 if (isa<ConstantSDNode>(N0)) 4243 return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, N0); 4244 4245 // fold (sext (sext x)) -> (sext x) 4246 // fold (sext (aext x)) -> (sext x) 4247 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) 4248 return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, 4249 N0.getOperand(0)); 4250 4251 if (N0.getOpcode() == ISD::TRUNCATE) { 4252 // fold (sext (truncate (load x))) -> (sext (smaller load x)) 4253 // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n))) 4254 SDValue NarrowLoad = ReduceLoadWidth(N0.getNode()); 4255 if (NarrowLoad.getNode()) { 4256 SDNode* oye = N0.getNode()->getOperand(0).getNode(); 4257 if (NarrowLoad.getNode() != N0.getNode()) { 4258 CombineTo(N0.getNode(), NarrowLoad); 4259 // CombineTo deleted the truncate, if needed, but not what's under it. 4260 AddToWorkList(oye); 4261 } 4262 return SDValue(N, 0); // Return N so it doesn't get rechecked! 4263 } 4264 4265 // See if the value being truncated is already sign extended. If so, just 4266 // eliminate the trunc/sext pair. 4267 SDValue Op = N0.getOperand(0); 4268 unsigned OpBits = Op.getValueType().getScalarType().getSizeInBits(); 4269 unsigned MidBits = N0.getValueType().getScalarType().getSizeInBits(); 4270 unsigned DestBits = VT.getScalarType().getSizeInBits(); 4271 unsigned NumSignBits = DAG.ComputeNumSignBits(Op); 4272 4273 if (OpBits == DestBits) { 4274 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign 4275 // bits, it is already ready. 4276 if (NumSignBits > DestBits-MidBits) 4277 return Op; 4278 } else if (OpBits < DestBits) { 4279 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign 4280 // bits, just sext from i32. 4281 if (NumSignBits > OpBits-MidBits) 4282 return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, Op); 4283 } else { 4284 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign 4285 // bits, just truncate to i32. 4286 if (NumSignBits > OpBits-MidBits) 4287 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op); 4288 } 4289 4290 // fold (sext (truncate x)) -> (sextinreg x). 4291 if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, 4292 N0.getValueType())) { 4293 if (OpBits < DestBits) 4294 Op = DAG.getNode(ISD::ANY_EXTEND, N0.getDebugLoc(), VT, Op); 4295 else if (OpBits > DestBits) 4296 Op = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), VT, Op); 4297 return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, Op, 4298 DAG.getValueType(N0.getValueType())); 4299 } 4300 } 4301 4302 // fold (sext (load x)) -> (sext (truncate (sextload x))) 4303 // None of the supported targets knows how to perform load and sign extend 4304 // on vectors in one instruction. We only perform this transformation on 4305 // scalars. 4306 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() && 4307 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 4308 TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()))) { 4309 bool DoXform = true; 4310 SmallVector<SDNode*, 4> SetCCs; 4311 if (!N0.hasOneUse()) 4312 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI); 4313 if (DoXform) { 4314 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 4315 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT, 4316 LN0->getChain(), 4317 LN0->getBasePtr(), LN0->getPointerInfo(), 4318 N0.getValueType(), 4319 LN0->isVolatile(), LN0->isNonTemporal(), 4320 LN0->getAlignment()); 4321 CombineTo(N, ExtLoad); 4322 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), 4323 N0.getValueType(), ExtLoad); 4324 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 4325 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, N->getDebugLoc(), 4326 ISD::SIGN_EXTEND); 4327 return SDValue(N, 0); // Return N so it doesn't get rechecked! 4328 } 4329 } 4330 4331 // fold (sext (sextload x)) -> (sext (truncate (sextload x))) 4332 // fold (sext ( extload x)) -> (sext (truncate (sextload x))) 4333 if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && 4334 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { 4335 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 4336 EVT MemVT = LN0->getMemoryVT(); 4337 if ((!LegalOperations && !LN0->isVolatile()) || 4338 TLI.isLoadExtLegal(ISD::SEXTLOAD, MemVT)) { 4339 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT, 4340 LN0->getChain(), 4341 LN0->getBasePtr(), LN0->getPointerInfo(), 4342 MemVT, 4343 LN0->isVolatile(), LN0->isNonTemporal(), 4344 LN0->getAlignment()); 4345 CombineTo(N, ExtLoad); 4346 CombineTo(N0.getNode(), 4347 DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), 4348 N0.getValueType(), ExtLoad), 4349 ExtLoad.getValue(1)); 4350 return SDValue(N, 0); // Return N so it doesn't get rechecked! 4351 } 4352 } 4353 4354 // fold (sext (and/or/xor (load x), cst)) -> 4355 // (and/or/xor (sextload x), (sext cst)) 4356 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR || 4357 N0.getOpcode() == ISD::XOR) && 4358 isa<LoadSDNode>(N0.getOperand(0)) && 4359 N0.getOperand(1).getOpcode() == ISD::Constant && 4360 TLI.isLoadExtLegal(ISD::SEXTLOAD, N0.getValueType()) && 4361 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { 4362 LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0)); 4363 if (LN0->getExtensionType() != ISD::ZEXTLOAD) { 4364 bool DoXform = true; 4365 SmallVector<SDNode*, 4> SetCCs; 4366 if (!N0.hasOneUse()) 4367 DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::SIGN_EXTEND, 4368 SetCCs, TLI); 4369 if (DoXform) { 4370 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, LN0->getDebugLoc(), VT, 4371 LN0->getChain(), LN0->getBasePtr(), 4372 LN0->getPointerInfo(), 4373 LN0->getMemoryVT(), 4374 LN0->isVolatile(), 4375 LN0->isNonTemporal(), 4376 LN0->getAlignment()); 4377 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 4378 Mask = Mask.sext(VT.getSizeInBits()); 4379 SDValue And = DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, 4380 ExtLoad, DAG.getConstant(Mask, VT)); 4381 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, 4382 N0.getOperand(0).getDebugLoc(), 4383 N0.getOperand(0).getValueType(), ExtLoad); 4384 CombineTo(N, And); 4385 CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); 4386 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, N->getDebugLoc(), 4387 ISD::SIGN_EXTEND); 4388 return SDValue(N, 0); // Return N so it doesn't get rechecked! 4389 } 4390 } 4391 } 4392 4393 if (N0.getOpcode() == ISD::SETCC) { 4394 // sext(setcc) -> sext_in_reg(vsetcc) for vectors. 4395 // Only do this before legalize for now. 4396 if (VT.isVector() && !LegalOperations) { 4397 EVT N0VT = N0.getOperand(0).getValueType(); 4398 // On some architectures (such as SSE/NEON/etc) the SETCC result type is 4399 // of the same size as the compared operands. Only optimize sext(setcc()) 4400 // if this is the case. 4401 EVT SVT = TLI.getSetCCResultType(N0VT); 4402 4403 // We know that the # elements of the results is the same as the 4404 // # elements of the compare (and the # elements of the compare result 4405 // for that matter). Check to see that they are the same size. If so, 4406 // we know that the element size of the sext'd result matches the 4407 // element size of the compare operands. 4408 if (VT.getSizeInBits() == SVT.getSizeInBits()) 4409 return DAG.getSetCC(N->getDebugLoc(), VT, N0.getOperand(0), 4410 N0.getOperand(1), 4411 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 4412 // If the desired elements are smaller or larger than the source 4413 // elements we can use a matching integer vector type and then 4414 // truncate/sign extend 4415 else { 4416 EVT MatchingElementType = 4417 EVT::getIntegerVT(*DAG.getContext(), 4418 N0VT.getScalarType().getSizeInBits()); 4419 EVT MatchingVectorType = 4420 EVT::getVectorVT(*DAG.getContext(), MatchingElementType, 4421 N0VT.getVectorNumElements()); 4422 4423 if (SVT == MatchingVectorType) { 4424 SDValue VsetCC = DAG.getSetCC(N->getDebugLoc(), MatchingVectorType, 4425 N0.getOperand(0), N0.getOperand(1), 4426 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 4427 return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT); 4428 } 4429 } 4430 } 4431 4432 // sext(setcc x, y, cc) -> (select_cc x, y, -1, 0, cc) 4433 unsigned ElementWidth = VT.getScalarType().getSizeInBits(); 4434 SDValue NegOne = 4435 DAG.getConstant(APInt::getAllOnesValue(ElementWidth), VT); 4436 SDValue SCC = 4437 SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1), 4438 NegOne, DAG.getConstant(0, VT), 4439 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true); 4440 if (SCC.getNode()) return SCC; 4441 if (!LegalOperations || 4442 TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(VT))) 4443 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 4444 DAG.getSetCC(N->getDebugLoc(), 4445 TLI.getSetCCResultType(VT), 4446 N0.getOperand(0), N0.getOperand(1), 4447 cast<CondCodeSDNode>(N0.getOperand(2))->get()), 4448 NegOne, DAG.getConstant(0, VT)); 4449 } 4450 4451 // fold (sext x) -> (zext x) if the sign bit is known zero. 4452 if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) && 4453 DAG.SignBitIsZero(N0)) 4454 return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, N0); 4455 4456 return SDValue(); 4457} 4458 4459// isTruncateOf - If N is a truncate of some other value, return true, record 4460// the value being truncated in Op and which of Op's bits are zero in KnownZero. 4461// This function computes KnownZero to avoid a duplicated call to 4462// ComputeMaskedBits in the caller. 4463static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op, 4464 APInt &KnownZero) { 4465 APInt KnownOne; 4466 if (N->getOpcode() == ISD::TRUNCATE) { 4467 Op = N->getOperand(0); 4468 DAG.ComputeMaskedBits(Op, KnownZero, KnownOne); 4469 return true; 4470 } 4471 4472 if (N->getOpcode() != ISD::SETCC || N->getValueType(0) != MVT::i1 || 4473 cast<CondCodeSDNode>(N->getOperand(2))->get() != ISD::SETNE) 4474 return false; 4475 4476 SDValue Op0 = N->getOperand(0); 4477 SDValue Op1 = N->getOperand(1); 4478 assert(Op0.getValueType() == Op1.getValueType()); 4479 4480 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0); 4481 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1); 4482 if (COp0 && COp0->isNullValue()) 4483 Op = Op1; 4484 else if (COp1 && COp1->isNullValue()) 4485 Op = Op0; 4486 else 4487 return false; 4488 4489 DAG.ComputeMaskedBits(Op, KnownZero, KnownOne); 4490 4491 if (!(KnownZero | APInt(Op.getValueSizeInBits(), 1)).isAllOnesValue()) 4492 return false; 4493 4494 return true; 4495} 4496 4497SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) { 4498 SDValue N0 = N->getOperand(0); 4499 EVT VT = N->getValueType(0); 4500 4501 // fold (zext c1) -> c1 4502 if (isa<ConstantSDNode>(N0)) 4503 return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, N0); 4504 // fold (zext (zext x)) -> (zext x) 4505 // fold (zext (aext x)) -> (zext x) 4506 if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) 4507 return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, 4508 N0.getOperand(0)); 4509 4510 // fold (zext (truncate x)) -> (zext x) or 4511 // (zext (truncate x)) -> (truncate x) 4512 // This is valid when the truncated bits of x are already zero. 4513 // FIXME: We should extend this to work for vectors too. 4514 SDValue Op; 4515 APInt KnownZero; 4516 if (!VT.isVector() && isTruncateOf(DAG, N0, Op, KnownZero)) { 4517 APInt TruncatedBits = 4518 (Op.getValueSizeInBits() == N0.getValueSizeInBits()) ? 4519 APInt(Op.getValueSizeInBits(), 0) : 4520 APInt::getBitsSet(Op.getValueSizeInBits(), 4521 N0.getValueSizeInBits(), 4522 std::min(Op.getValueSizeInBits(), 4523 VT.getSizeInBits())); 4524 if (TruncatedBits == (KnownZero & TruncatedBits)) { 4525 if (VT.bitsGT(Op.getValueType())) 4526 return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, Op); 4527 if (VT.bitsLT(Op.getValueType())) 4528 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op); 4529 4530 return Op; 4531 } 4532 } 4533 4534 // fold (zext (truncate (load x))) -> (zext (smaller load x)) 4535 // fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n))) 4536 if (N0.getOpcode() == ISD::TRUNCATE) { 4537 SDValue NarrowLoad = ReduceLoadWidth(N0.getNode()); 4538 if (NarrowLoad.getNode()) { 4539 SDNode* oye = N0.getNode()->getOperand(0).getNode(); 4540 if (NarrowLoad.getNode() != N0.getNode()) { 4541 CombineTo(N0.getNode(), NarrowLoad); 4542 // CombineTo deleted the truncate, if needed, but not what's under it. 4543 AddToWorkList(oye); 4544 } 4545 return SDValue(N, 0); // Return N so it doesn't get rechecked! 4546 } 4547 } 4548 4549 // fold (zext (truncate x)) -> (and x, mask) 4550 if (N0.getOpcode() == ISD::TRUNCATE && 4551 (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) { 4552 4553 // fold (zext (truncate (load x))) -> (zext (smaller load x)) 4554 // fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n))) 4555 SDValue NarrowLoad = ReduceLoadWidth(N0.getNode()); 4556 if (NarrowLoad.getNode()) { 4557 SDNode* oye = N0.getNode()->getOperand(0).getNode(); 4558 if (NarrowLoad.getNode() != N0.getNode()) { 4559 CombineTo(N0.getNode(), NarrowLoad); 4560 // CombineTo deleted the truncate, if needed, but not what's under it. 4561 AddToWorkList(oye); 4562 } 4563 return SDValue(N, 0); // Return N so it doesn't get rechecked! 4564 } 4565 4566 SDValue Op = N0.getOperand(0); 4567 if (Op.getValueType().bitsLT(VT)) { 4568 Op = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, Op); 4569 AddToWorkList(Op.getNode()); 4570 } else if (Op.getValueType().bitsGT(VT)) { 4571 Op = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Op); 4572 AddToWorkList(Op.getNode()); 4573 } 4574 return DAG.getZeroExtendInReg(Op, N->getDebugLoc(), 4575 N0.getValueType().getScalarType()); 4576 } 4577 4578 // Fold (zext (and (trunc x), cst)) -> (and x, cst), 4579 // if either of the casts is not free. 4580 if (N0.getOpcode() == ISD::AND && 4581 N0.getOperand(0).getOpcode() == ISD::TRUNCATE && 4582 N0.getOperand(1).getOpcode() == ISD::Constant && 4583 (!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(), 4584 N0.getValueType()) || 4585 !TLI.isZExtFree(N0.getValueType(), VT))) { 4586 SDValue X = N0.getOperand(0).getOperand(0); 4587 if (X.getValueType().bitsLT(VT)) { 4588 X = DAG.getNode(ISD::ANY_EXTEND, X.getDebugLoc(), VT, X); 4589 } else if (X.getValueType().bitsGT(VT)) { 4590 X = DAG.getNode(ISD::TRUNCATE, X.getDebugLoc(), VT, X); 4591 } 4592 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 4593 Mask = Mask.zext(VT.getSizeInBits()); 4594 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 4595 X, DAG.getConstant(Mask, VT)); 4596 } 4597 4598 // fold (zext (load x)) -> (zext (truncate (zextload x))) 4599 // None of the supported targets knows how to perform load and vector_zext 4600 // on vectors in one instruction. We only perform this transformation on 4601 // scalars. 4602 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() && 4603 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 4604 TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()))) { 4605 bool DoXform = true; 4606 SmallVector<SDNode*, 4> SetCCs; 4607 if (!N0.hasOneUse()) 4608 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI); 4609 if (DoXform) { 4610 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 4611 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT, 4612 LN0->getChain(), 4613 LN0->getBasePtr(), LN0->getPointerInfo(), 4614 N0.getValueType(), 4615 LN0->isVolatile(), LN0->isNonTemporal(), 4616 LN0->getAlignment()); 4617 CombineTo(N, ExtLoad); 4618 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), 4619 N0.getValueType(), ExtLoad); 4620 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 4621 4622 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, N->getDebugLoc(), 4623 ISD::ZERO_EXTEND); 4624 return SDValue(N, 0); // Return N so it doesn't get rechecked! 4625 } 4626 } 4627 4628 // fold (zext (and/or/xor (load x), cst)) -> 4629 // (and/or/xor (zextload x), (zext cst)) 4630 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR || 4631 N0.getOpcode() == ISD::XOR) && 4632 isa<LoadSDNode>(N0.getOperand(0)) && 4633 N0.getOperand(1).getOpcode() == ISD::Constant && 4634 TLI.isLoadExtLegal(ISD::ZEXTLOAD, N0.getValueType()) && 4635 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { 4636 LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0)); 4637 if (LN0->getExtensionType() != ISD::SEXTLOAD) { 4638 bool DoXform = true; 4639 SmallVector<SDNode*, 4> SetCCs; 4640 if (!N0.hasOneUse()) 4641 DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::ZERO_EXTEND, 4642 SetCCs, TLI); 4643 if (DoXform) { 4644 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), VT, 4645 LN0->getChain(), LN0->getBasePtr(), 4646 LN0->getPointerInfo(), 4647 LN0->getMemoryVT(), 4648 LN0->isVolatile(), 4649 LN0->isNonTemporal(), 4650 LN0->getAlignment()); 4651 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 4652 Mask = Mask.zext(VT.getSizeInBits()); 4653 SDValue And = DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, 4654 ExtLoad, DAG.getConstant(Mask, VT)); 4655 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, 4656 N0.getOperand(0).getDebugLoc(), 4657 N0.getOperand(0).getValueType(), ExtLoad); 4658 CombineTo(N, And); 4659 CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); 4660 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, N->getDebugLoc(), 4661 ISD::ZERO_EXTEND); 4662 return SDValue(N, 0); // Return N so it doesn't get rechecked! 4663 } 4664 } 4665 } 4666 4667 // fold (zext (zextload x)) -> (zext (truncate (zextload x))) 4668 // fold (zext ( extload x)) -> (zext (truncate (zextload x))) 4669 if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && 4670 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { 4671 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 4672 EVT MemVT = LN0->getMemoryVT(); 4673 if ((!LegalOperations && !LN0->isVolatile()) || 4674 TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT)) { 4675 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT, 4676 LN0->getChain(), 4677 LN0->getBasePtr(), LN0->getPointerInfo(), 4678 MemVT, 4679 LN0->isVolatile(), LN0->isNonTemporal(), 4680 LN0->getAlignment()); 4681 CombineTo(N, ExtLoad); 4682 CombineTo(N0.getNode(), 4683 DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), N0.getValueType(), 4684 ExtLoad), 4685 ExtLoad.getValue(1)); 4686 return SDValue(N, 0); // Return N so it doesn't get rechecked! 4687 } 4688 } 4689 4690 if (N0.getOpcode() == ISD::SETCC) { 4691 if (!LegalOperations && VT.isVector()) { 4692 // zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors. 4693 // Only do this before legalize for now. 4694 EVT N0VT = N0.getOperand(0).getValueType(); 4695 EVT EltVT = VT.getVectorElementType(); 4696 SmallVector<SDValue,8> OneOps(VT.getVectorNumElements(), 4697 DAG.getConstant(1, EltVT)); 4698 if (VT.getSizeInBits() == N0VT.getSizeInBits()) 4699 // We know that the # elements of the results is the same as the 4700 // # elements of the compare (and the # elements of the compare result 4701 // for that matter). Check to see that they are the same size. If so, 4702 // we know that the element size of the sext'd result matches the 4703 // element size of the compare operands. 4704 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 4705 DAG.getSetCC(N->getDebugLoc(), VT, N0.getOperand(0), 4706 N0.getOperand(1), 4707 cast<CondCodeSDNode>(N0.getOperand(2))->get()), 4708 DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT, 4709 &OneOps[0], OneOps.size())); 4710 4711 // If the desired elements are smaller or larger than the source 4712 // elements we can use a matching integer vector type and then 4713 // truncate/sign extend 4714 EVT MatchingElementType = 4715 EVT::getIntegerVT(*DAG.getContext(), 4716 N0VT.getScalarType().getSizeInBits()); 4717 EVT MatchingVectorType = 4718 EVT::getVectorVT(*DAG.getContext(), MatchingElementType, 4719 N0VT.getVectorNumElements()); 4720 SDValue VsetCC = 4721 DAG.getSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0), 4722 N0.getOperand(1), 4723 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 4724 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 4725 DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT), 4726 DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT, 4727 &OneOps[0], OneOps.size())); 4728 } 4729 4730 // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc 4731 SDValue SCC = 4732 SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1), 4733 DAG.getConstant(1, VT), DAG.getConstant(0, VT), 4734 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true); 4735 if (SCC.getNode()) return SCC; 4736 } 4737 4738 // (zext (shl (zext x), cst)) -> (shl (zext x), cst) 4739 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) && 4740 isa<ConstantSDNode>(N0.getOperand(1)) && 4741 N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND && 4742 N0.hasOneUse()) { 4743 SDValue ShAmt = N0.getOperand(1); 4744 unsigned ShAmtVal = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 4745 if (N0.getOpcode() == ISD::SHL) { 4746 SDValue InnerZExt = N0.getOperand(0); 4747 // If the original shl may be shifting out bits, do not perform this 4748 // transformation. 4749 unsigned KnownZeroBits = InnerZExt.getValueType().getSizeInBits() - 4750 InnerZExt.getOperand(0).getValueType().getSizeInBits(); 4751 if (ShAmtVal > KnownZeroBits) 4752 return SDValue(); 4753 } 4754 4755 DebugLoc DL = N->getDebugLoc(); 4756 4757 // Ensure that the shift amount is wide enough for the shifted value. 4758 if (VT.getSizeInBits() >= 256) 4759 ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt); 4760 4761 return DAG.getNode(N0.getOpcode(), DL, VT, 4762 DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)), 4763 ShAmt); 4764 } 4765 4766 return SDValue(); 4767} 4768 4769SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) { 4770 SDValue N0 = N->getOperand(0); 4771 EVT VT = N->getValueType(0); 4772 4773 // fold (aext c1) -> c1 4774 if (isa<ConstantSDNode>(N0)) 4775 return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, N0); 4776 // fold (aext (aext x)) -> (aext x) 4777 // fold (aext (zext x)) -> (zext x) 4778 // fold (aext (sext x)) -> (sext x) 4779 if (N0.getOpcode() == ISD::ANY_EXTEND || 4780 N0.getOpcode() == ISD::ZERO_EXTEND || 4781 N0.getOpcode() == ISD::SIGN_EXTEND) 4782 return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, N0.getOperand(0)); 4783 4784 // fold (aext (truncate (load x))) -> (aext (smaller load x)) 4785 // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n))) 4786 if (N0.getOpcode() == ISD::TRUNCATE) { 4787 SDValue NarrowLoad = ReduceLoadWidth(N0.getNode()); 4788 if (NarrowLoad.getNode()) { 4789 SDNode* oye = N0.getNode()->getOperand(0).getNode(); 4790 if (NarrowLoad.getNode() != N0.getNode()) { 4791 CombineTo(N0.getNode(), NarrowLoad); 4792 // CombineTo deleted the truncate, if needed, but not what's under it. 4793 AddToWorkList(oye); 4794 } 4795 return SDValue(N, 0); // Return N so it doesn't get rechecked! 4796 } 4797 } 4798 4799 // fold (aext (truncate x)) 4800 if (N0.getOpcode() == ISD::TRUNCATE) { 4801 SDValue TruncOp = N0.getOperand(0); 4802 if (TruncOp.getValueType() == VT) 4803 return TruncOp; // x iff x size == zext size. 4804 if (TruncOp.getValueType().bitsGT(VT)) 4805 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, TruncOp); 4806 return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, TruncOp); 4807 } 4808 4809 // Fold (aext (and (trunc x), cst)) -> (and x, cst) 4810 // if the trunc is not free. 4811 if (N0.getOpcode() == ISD::AND && 4812 N0.getOperand(0).getOpcode() == ISD::TRUNCATE && 4813 N0.getOperand(1).getOpcode() == ISD::Constant && 4814 !TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(), 4815 N0.getValueType())) { 4816 SDValue X = N0.getOperand(0).getOperand(0); 4817 if (X.getValueType().bitsLT(VT)) { 4818 X = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, X); 4819 } else if (X.getValueType().bitsGT(VT)) { 4820 X = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, X); 4821 } 4822 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 4823 Mask = Mask.zext(VT.getSizeInBits()); 4824 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 4825 X, DAG.getConstant(Mask, VT)); 4826 } 4827 4828 // fold (aext (load x)) -> (aext (truncate (extload x))) 4829 // None of the supported targets knows how to perform load and any_ext 4830 // on vectors in one instruction. We only perform this transformation on 4831 // scalars. 4832 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() && 4833 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 4834 TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) { 4835 bool DoXform = true; 4836 SmallVector<SDNode*, 4> SetCCs; 4837 if (!N0.hasOneUse()) 4838 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI); 4839 if (DoXform) { 4840 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 4841 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT, 4842 LN0->getChain(), 4843 LN0->getBasePtr(), LN0->getPointerInfo(), 4844 N0.getValueType(), 4845 LN0->isVolatile(), LN0->isNonTemporal(), 4846 LN0->getAlignment()); 4847 CombineTo(N, ExtLoad); 4848 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), 4849 N0.getValueType(), ExtLoad); 4850 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 4851 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, N->getDebugLoc(), 4852 ISD::ANY_EXTEND); 4853 return SDValue(N, 0); // Return N so it doesn't get rechecked! 4854 } 4855 } 4856 4857 // fold (aext (zextload x)) -> (aext (truncate (zextload x))) 4858 // fold (aext (sextload x)) -> (aext (truncate (sextload x))) 4859 // fold (aext ( extload x)) -> (aext (truncate (extload x))) 4860 if (N0.getOpcode() == ISD::LOAD && 4861 !ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 4862 N0.hasOneUse()) { 4863 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 4864 EVT MemVT = LN0->getMemoryVT(); 4865 SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), N->getDebugLoc(), 4866 VT, LN0->getChain(), LN0->getBasePtr(), 4867 LN0->getPointerInfo(), MemVT, 4868 LN0->isVolatile(), LN0->isNonTemporal(), 4869 LN0->getAlignment()); 4870 CombineTo(N, ExtLoad); 4871 CombineTo(N0.getNode(), 4872 DAG.getNode(ISD::TRUNCATE, N0.getDebugLoc(), 4873 N0.getValueType(), ExtLoad), 4874 ExtLoad.getValue(1)); 4875 return SDValue(N, 0); // Return N so it doesn't get rechecked! 4876 } 4877 4878 if (N0.getOpcode() == ISD::SETCC) { 4879 // aext(setcc) -> sext_in_reg(vsetcc) for vectors. 4880 // Only do this before legalize for now. 4881 if (VT.isVector() && !LegalOperations) { 4882 EVT N0VT = N0.getOperand(0).getValueType(); 4883 // We know that the # elements of the results is the same as the 4884 // # elements of the compare (and the # elements of the compare result 4885 // for that matter). Check to see that they are the same size. If so, 4886 // we know that the element size of the sext'd result matches the 4887 // element size of the compare operands. 4888 if (VT.getSizeInBits() == N0VT.getSizeInBits()) 4889 return DAG.getSetCC(N->getDebugLoc(), VT, N0.getOperand(0), 4890 N0.getOperand(1), 4891 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 4892 // If the desired elements are smaller or larger than the source 4893 // elements we can use a matching integer vector type and then 4894 // truncate/sign extend 4895 else { 4896 EVT MatchingElementType = 4897 EVT::getIntegerVT(*DAG.getContext(), 4898 N0VT.getScalarType().getSizeInBits()); 4899 EVT MatchingVectorType = 4900 EVT::getVectorVT(*DAG.getContext(), MatchingElementType, 4901 N0VT.getVectorNumElements()); 4902 SDValue VsetCC = 4903 DAG.getSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0), 4904 N0.getOperand(1), 4905 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 4906 return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT); 4907 } 4908 } 4909 4910 // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc 4911 SDValue SCC = 4912 SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1), 4913 DAG.getConstant(1, VT), DAG.getConstant(0, VT), 4914 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true); 4915 if (SCC.getNode()) 4916 return SCC; 4917 } 4918 4919 return SDValue(); 4920} 4921 4922/// GetDemandedBits - See if the specified operand can be simplified with the 4923/// knowledge that only the bits specified by Mask are used. If so, return the 4924/// simpler operand, otherwise return a null SDValue. 4925SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) { 4926 switch (V.getOpcode()) { 4927 default: break; 4928 case ISD::Constant: { 4929 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode()); 4930 assert(CV != 0 && "Const value should be ConstSDNode."); 4931 const APInt &CVal = CV->getAPIntValue(); 4932 APInt NewVal = CVal & Mask; 4933 if (NewVal != CVal) { 4934 return DAG.getConstant(NewVal, V.getValueType()); 4935 } 4936 break; 4937 } 4938 case ISD::OR: 4939 case ISD::XOR: 4940 // If the LHS or RHS don't contribute bits to the or, drop them. 4941 if (DAG.MaskedValueIsZero(V.getOperand(0), Mask)) 4942 return V.getOperand(1); 4943 if (DAG.MaskedValueIsZero(V.getOperand(1), Mask)) 4944 return V.getOperand(0); 4945 break; 4946 case ISD::SRL: 4947 // Only look at single-use SRLs. 4948 if (!V.getNode()->hasOneUse()) 4949 break; 4950 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) { 4951 // See if we can recursively simplify the LHS. 4952 unsigned Amt = RHSC->getZExtValue(); 4953 4954 // Watch out for shift count overflow though. 4955 if (Amt >= Mask.getBitWidth()) break; 4956 APInt NewMask = Mask << Amt; 4957 SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask); 4958 if (SimplifyLHS.getNode()) 4959 return DAG.getNode(ISD::SRL, V.getDebugLoc(), V.getValueType(), 4960 SimplifyLHS, V.getOperand(1)); 4961 } 4962 } 4963 return SDValue(); 4964} 4965 4966/// ReduceLoadWidth - If the result of a wider load is shifted to right of N 4967/// bits and then truncated to a narrower type and where N is a multiple 4968/// of number of bits of the narrower type, transform it to a narrower load 4969/// from address + N / num of bits of new type. If the result is to be 4970/// extended, also fold the extension to form a extending load. 4971SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) { 4972 unsigned Opc = N->getOpcode(); 4973 4974 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 4975 SDValue N0 = N->getOperand(0); 4976 EVT VT = N->getValueType(0); 4977 EVT ExtVT = VT; 4978 4979 // This transformation isn't valid for vector loads. 4980 if (VT.isVector()) 4981 return SDValue(); 4982 4983 // Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then 4984 // extended to VT. 4985 if (Opc == ISD::SIGN_EXTEND_INREG) { 4986 ExtType = ISD::SEXTLOAD; 4987 ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 4988 } else if (Opc == ISD::SRL) { 4989 // Another special-case: SRL is basically zero-extending a narrower value. 4990 ExtType = ISD::ZEXTLOAD; 4991 N0 = SDValue(N, 0); 4992 ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 4993 if (!N01) return SDValue(); 4994 ExtVT = EVT::getIntegerVT(*DAG.getContext(), 4995 VT.getSizeInBits() - N01->getZExtValue()); 4996 } 4997 if (LegalOperations && !TLI.isLoadExtLegal(ExtType, ExtVT)) 4998 return SDValue(); 4999 5000 unsigned EVTBits = ExtVT.getSizeInBits(); 5001 5002 // Do not generate loads of non-round integer types since these can 5003 // be expensive (and would be wrong if the type is not byte sized). 5004 if (!ExtVT.isRound()) 5005 return SDValue(); 5006 5007 unsigned ShAmt = 0; 5008 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 5009 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 5010 ShAmt = N01->getZExtValue(); 5011 // Is the shift amount a multiple of size of VT? 5012 if ((ShAmt & (EVTBits-1)) == 0) { 5013 N0 = N0.getOperand(0); 5014 // Is the load width a multiple of size of VT? 5015 if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0) 5016 return SDValue(); 5017 } 5018 5019 // At this point, we must have a load or else we can't do the transform. 5020 if (!isa<LoadSDNode>(N0)) return SDValue(); 5021 5022 // If the shift amount is larger than the input type then we're not 5023 // accessing any of the loaded bytes. If the load was a zextload/extload 5024 // then the result of the shift+trunc is zero/undef (handled elsewhere). 5025 // If the load was a sextload then the result is a splat of the sign bit 5026 // of the extended byte. This is not worth optimizing for. 5027 if (ShAmt >= cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits()) 5028 return SDValue(); 5029 } 5030 } 5031 5032 // If the load is shifted left (and the result isn't shifted back right), 5033 // we can fold the truncate through the shift. 5034 unsigned ShLeftAmt = 0; 5035 if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() && 5036 ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) { 5037 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 5038 ShLeftAmt = N01->getZExtValue(); 5039 N0 = N0.getOperand(0); 5040 } 5041 } 5042 5043 // If we haven't found a load, we can't narrow it. Don't transform one with 5044 // multiple uses, this would require adding a new load. 5045 if (!isa<LoadSDNode>(N0) || !N0.hasOneUse() || 5046 // Don't change the width of a volatile load. 5047 cast<LoadSDNode>(N0)->isVolatile()) 5048 return SDValue(); 5049 5050 // Verify that we are actually reducing a load width here. 5051 if (cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits() < EVTBits) 5052 return SDValue(); 5053 5054 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5055 EVT PtrType = N0.getOperand(1).getValueType(); 5056 5057 if (PtrType == MVT::Untyped || PtrType.isExtended()) 5058 // It's not possible to generate a constant of extended or untyped type. 5059 return SDValue(); 5060 5061 // For big endian targets, we need to adjust the offset to the pointer to 5062 // load the correct bytes. 5063 if (TLI.isBigEndian()) { 5064 unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits(); 5065 unsigned EVTStoreBits = ExtVT.getStoreSizeInBits(); 5066 ShAmt = LVTStoreBits - EVTStoreBits - ShAmt; 5067 } 5068 5069 uint64_t PtrOff = ShAmt / 8; 5070 unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff); 5071 SDValue NewPtr = DAG.getNode(ISD::ADD, LN0->getDebugLoc(), 5072 PtrType, LN0->getBasePtr(), 5073 DAG.getConstant(PtrOff, PtrType)); 5074 AddToWorkList(NewPtr.getNode()); 5075 5076 SDValue Load; 5077 if (ExtType == ISD::NON_EXTLOAD) 5078 Load = DAG.getLoad(VT, N0.getDebugLoc(), LN0->getChain(), NewPtr, 5079 LN0->getPointerInfo().getWithOffset(PtrOff), 5080 LN0->isVolatile(), LN0->isNonTemporal(), 5081 LN0->isInvariant(), NewAlign); 5082 else 5083 Load = DAG.getExtLoad(ExtType, N0.getDebugLoc(), VT, LN0->getChain(),NewPtr, 5084 LN0->getPointerInfo().getWithOffset(PtrOff), 5085 ExtVT, LN0->isVolatile(), LN0->isNonTemporal(), 5086 NewAlign); 5087 5088 // Replace the old load's chain with the new load's chain. 5089 WorkListRemover DeadNodes(*this); 5090 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); 5091 5092 // Shift the result left, if we've swallowed a left shift. 5093 SDValue Result = Load; 5094 if (ShLeftAmt != 0) { 5095 EVT ShImmTy = getShiftAmountTy(Result.getValueType()); 5096 if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt)) 5097 ShImmTy = VT; 5098 Result = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT, 5099 Result, DAG.getConstant(ShLeftAmt, ShImmTy)); 5100 } 5101 5102 // Return the new loaded value. 5103 return Result; 5104} 5105 5106SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { 5107 SDValue N0 = N->getOperand(0); 5108 SDValue N1 = N->getOperand(1); 5109 EVT VT = N->getValueType(0); 5110 EVT EVT = cast<VTSDNode>(N1)->getVT(); 5111 unsigned VTBits = VT.getScalarType().getSizeInBits(); 5112 unsigned EVTBits = EVT.getScalarType().getSizeInBits(); 5113 5114 // fold (sext_in_reg c1) -> c1 5115 if (isa<ConstantSDNode>(N0) || N0.getOpcode() == ISD::UNDEF) 5116 return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, N0, N1); 5117 5118 // If the input is already sign extended, just drop the extension. 5119 if (DAG.ComputeNumSignBits(N0) >= VTBits-EVTBits+1) 5120 return N0; 5121 5122 // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2 5123 if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 5124 EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT())) { 5125 return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, 5126 N0.getOperand(0), N1); 5127 } 5128 5129 // fold (sext_in_reg (sext x)) -> (sext x) 5130 // fold (sext_in_reg (aext x)) -> (sext x) 5131 // if x is small enough. 5132 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) { 5133 SDValue N00 = N0.getOperand(0); 5134 if (N00.getValueType().getScalarType().getSizeInBits() <= EVTBits && 5135 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT))) 5136 return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, N00, N1); 5137 } 5138 5139 // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero. 5140 if (DAG.MaskedValueIsZero(N0, APInt::getBitsSet(VTBits, EVTBits-1, EVTBits))) 5141 return DAG.getZeroExtendInReg(N0, N->getDebugLoc(), EVT); 5142 5143 // fold operands of sext_in_reg based on knowledge that the top bits are not 5144 // demanded. 5145 if (SimplifyDemandedBits(SDValue(N, 0))) 5146 return SDValue(N, 0); 5147 5148 // fold (sext_in_reg (load x)) -> (smaller sextload x) 5149 // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits)) 5150 SDValue NarrowLoad = ReduceLoadWidth(N); 5151 if (NarrowLoad.getNode()) 5152 return NarrowLoad; 5153 5154 // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24) 5155 // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible. 5156 // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above. 5157 if (N0.getOpcode() == ISD::SRL) { 5158 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1))) 5159 if (ShAmt->getZExtValue()+EVTBits <= VTBits) { 5160 // We can turn this into an SRA iff the input to the SRL is already sign 5161 // extended enough. 5162 unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0)); 5163 if (VTBits-(ShAmt->getZExtValue()+EVTBits) < InSignBits) 5164 return DAG.getNode(ISD::SRA, N->getDebugLoc(), VT, 5165 N0.getOperand(0), N0.getOperand(1)); 5166 } 5167 } 5168 5169 // fold (sext_inreg (extload x)) -> (sextload x) 5170 if (ISD::isEXTLoad(N0.getNode()) && 5171 ISD::isUNINDEXEDLoad(N0.getNode()) && 5172 EVT == cast<LoadSDNode>(N0)->getMemoryVT() && 5173 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 5174 TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) { 5175 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5176 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT, 5177 LN0->getChain(), 5178 LN0->getBasePtr(), LN0->getPointerInfo(), 5179 EVT, 5180 LN0->isVolatile(), LN0->isNonTemporal(), 5181 LN0->getAlignment()); 5182 CombineTo(N, ExtLoad); 5183 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 5184 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5185 } 5186 // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use 5187 if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 5188 N0.hasOneUse() && 5189 EVT == cast<LoadSDNode>(N0)->getMemoryVT() && 5190 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 5191 TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) { 5192 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5193 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT, 5194 LN0->getChain(), 5195 LN0->getBasePtr(), LN0->getPointerInfo(), 5196 EVT, 5197 LN0->isVolatile(), LN0->isNonTemporal(), 5198 LN0->getAlignment()); 5199 CombineTo(N, ExtLoad); 5200 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 5201 return SDValue(N, 0); // Return N so it doesn't get rechecked! 5202 } 5203 5204 // Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16)) 5205 if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) { 5206 SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0), 5207 N0.getOperand(1), false); 5208 if (BSwap.getNode() != 0) 5209 return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT, 5210 BSwap, N1); 5211 } 5212 5213 return SDValue(); 5214} 5215 5216SDValue DAGCombiner::visitTRUNCATE(SDNode *N) { 5217 SDValue N0 = N->getOperand(0); 5218 EVT VT = N->getValueType(0); 5219 bool isLE = TLI.isLittleEndian(); 5220 5221 // noop truncate 5222 if (N0.getValueType() == N->getValueType(0)) 5223 return N0; 5224 // fold (truncate c1) -> c1 5225 if (isa<ConstantSDNode>(N0)) 5226 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0); 5227 // fold (truncate (truncate x)) -> (truncate x) 5228 if (N0.getOpcode() == ISD::TRUNCATE) 5229 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0)); 5230 // fold (truncate (ext x)) -> (ext x) or (truncate x) or x 5231 if (N0.getOpcode() == ISD::ZERO_EXTEND || 5232 N0.getOpcode() == ISD::SIGN_EXTEND || 5233 N0.getOpcode() == ISD::ANY_EXTEND) { 5234 if (N0.getOperand(0).getValueType().bitsLT(VT)) 5235 // if the source is smaller than the dest, we still need an extend 5236 return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, 5237 N0.getOperand(0)); 5238 else if (N0.getOperand(0).getValueType().bitsGT(VT)) 5239 // if the source is larger than the dest, than we just need the truncate 5240 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0)); 5241 else 5242 // if the source and dest are the same type, we can drop both the extend 5243 // and the truncate. 5244 return N0.getOperand(0); 5245 } 5246 5247 // Fold extract-and-trunc into a narrow extract. For example: 5248 // i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1) 5249 // i32 y = TRUNCATE(i64 x) 5250 // -- becomes -- 5251 // v16i8 b = BITCAST (v2i64 val) 5252 // i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8) 5253 // 5254 // Note: We only run this optimization after type legalization (which often 5255 // creates this pattern) and before operation legalization after which 5256 // we need to be more careful about the vector instructions that we generate. 5257 if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5258 LegalTypes && !LegalOperations && N0->hasOneUse()) { 5259 5260 EVT VecTy = N0.getOperand(0).getValueType(); 5261 EVT ExTy = N0.getValueType(); 5262 EVT TrTy = N->getValueType(0); 5263 5264 unsigned NumElem = VecTy.getVectorNumElements(); 5265 unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits(); 5266 5267 EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, SizeRatio * NumElem); 5268 assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size"); 5269 5270 SDValue EltNo = N0->getOperand(1); 5271 if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) { 5272 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 5273 EVT IndexTy = N0->getOperand(1).getValueType(); 5274 int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1)); 5275 5276 SDValue V = DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 5277 NVT, N0.getOperand(0)); 5278 5279 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, 5280 N->getDebugLoc(), TrTy, V, 5281 DAG.getConstant(Index, IndexTy)); 5282 } 5283 } 5284 5285 // See if we can simplify the input to this truncate through knowledge that 5286 // only the low bits are being used. 5287 // For example "trunc (or (shl x, 8), y)" // -> trunc y 5288 // Currently we only perform this optimization on scalars because vectors 5289 // may have different active low bits. 5290 if (!VT.isVector()) { 5291 SDValue Shorter = 5292 GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(), 5293 VT.getSizeInBits())); 5294 if (Shorter.getNode()) 5295 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, Shorter); 5296 } 5297 // fold (truncate (load x)) -> (smaller load x) 5298 // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits)) 5299 if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) { 5300 SDValue Reduced = ReduceLoadWidth(N); 5301 if (Reduced.getNode()) 5302 return Reduced; 5303 } 5304 5305 // Simplify the operands using demanded-bits information. 5306 if (!VT.isVector() && 5307 SimplifyDemandedBits(SDValue(N, 0))) 5308 return SDValue(N, 0); 5309 5310 return SDValue(); 5311} 5312 5313static SDNode *getBuildPairElt(SDNode *N, unsigned i) { 5314 SDValue Elt = N->getOperand(i); 5315 if (Elt.getOpcode() != ISD::MERGE_VALUES) 5316 return Elt.getNode(); 5317 return Elt.getOperand(Elt.getResNo()).getNode(); 5318} 5319 5320/// CombineConsecutiveLoads - build_pair (load, load) -> load 5321/// if load locations are consecutive. 5322SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) { 5323 assert(N->getOpcode() == ISD::BUILD_PAIR); 5324 5325 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0)); 5326 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1)); 5327 if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() || 5328 LD1->getPointerInfo().getAddrSpace() != 5329 LD2->getPointerInfo().getAddrSpace()) 5330 return SDValue(); 5331 EVT LD1VT = LD1->getValueType(0); 5332 5333 if (ISD::isNON_EXTLoad(LD2) && 5334 LD2->hasOneUse() && 5335 // If both are volatile this would reduce the number of volatile loads. 5336 // If one is volatile it might be ok, but play conservative and bail out. 5337 !LD1->isVolatile() && 5338 !LD2->isVolatile() && 5339 DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) { 5340 unsigned Align = LD1->getAlignment(); 5341 unsigned NewAlign = TLI.getTargetData()-> 5342 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 5343 5344 if (NewAlign <= Align && 5345 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) 5346 return DAG.getLoad(VT, N->getDebugLoc(), LD1->getChain(), 5347 LD1->getBasePtr(), LD1->getPointerInfo(), 5348 false, false, false, Align); 5349 } 5350 5351 return SDValue(); 5352} 5353 5354SDValue DAGCombiner::visitBITCAST(SDNode *N) { 5355 SDValue N0 = N->getOperand(0); 5356 EVT VT = N->getValueType(0); 5357 5358 // If the input is a BUILD_VECTOR with all constant elements, fold this now. 5359 // Only do this before legalize, since afterward the target may be depending 5360 // on the bitconvert. 5361 // First check to see if this is all constant. 5362 if (!LegalTypes && 5363 N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() && 5364 VT.isVector()) { 5365 bool isSimple = true; 5366 for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) 5367 if (N0.getOperand(i).getOpcode() != ISD::UNDEF && 5368 N0.getOperand(i).getOpcode() != ISD::Constant && 5369 N0.getOperand(i).getOpcode() != ISD::ConstantFP) { 5370 isSimple = false; 5371 break; 5372 } 5373 5374 EVT DestEltVT = N->getValueType(0).getVectorElementType(); 5375 assert(!DestEltVT.isVector() && 5376 "Element type of vector ValueType must not be vector!"); 5377 if (isSimple) 5378 return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT); 5379 } 5380 5381 // If the input is a constant, let getNode fold it. 5382 if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) { 5383 SDValue Res = DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, N0); 5384 if (Res.getNode() != N) { 5385 if (!LegalOperations || 5386 TLI.isOperationLegal(Res.getNode()->getOpcode(), VT)) 5387 return Res; 5388 5389 // Folding it resulted in an illegal node, and it's too late to 5390 // do that. Clean up the old node and forego the transformation. 5391 // Ideally this won't happen very often, because instcombine 5392 // and the earlier dagcombine runs (where illegal nodes are 5393 // permitted) should have folded most of them already. 5394 DAG.DeleteNode(Res.getNode()); 5395 } 5396 } 5397 5398 // (conv (conv x, t1), t2) -> (conv x, t2) 5399 if (N0.getOpcode() == ISD::BITCAST) 5400 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, 5401 N0.getOperand(0)); 5402 5403 // fold (conv (load x)) -> (load (conv*)x) 5404 // If the resultant load doesn't need a higher alignment than the original! 5405 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 5406 // Do not change the width of a volatile load. 5407 !cast<LoadSDNode>(N0)->isVolatile() && 5408 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) { 5409 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 5410 unsigned Align = TLI.getTargetData()-> 5411 getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); 5412 unsigned OrigAlign = LN0->getAlignment(); 5413 5414 if (Align <= OrigAlign) { 5415 SDValue Load = DAG.getLoad(VT, N->getDebugLoc(), LN0->getChain(), 5416 LN0->getBasePtr(), LN0->getPointerInfo(), 5417 LN0->isVolatile(), LN0->isNonTemporal(), 5418 LN0->isInvariant(), OrigAlign); 5419 AddToWorkList(N); 5420 CombineTo(N0.getNode(), 5421 DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), 5422 N0.getValueType(), Load), 5423 Load.getValue(1)); 5424 return Load; 5425 } 5426 } 5427 5428 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 5429 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 5430 // This often reduces constant pool loads. 5431 if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(VT)) || 5432 (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(VT))) && 5433 N0.getNode()->hasOneUse() && VT.isInteger() && !VT.isVector()) { 5434 SDValue NewConv = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), VT, 5435 N0.getOperand(0)); 5436 AddToWorkList(NewConv.getNode()); 5437 5438 APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); 5439 if (N0.getOpcode() == ISD::FNEG) 5440 return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, 5441 NewConv, DAG.getConstant(SignBit, VT)); 5442 assert(N0.getOpcode() == ISD::FABS); 5443 return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, 5444 NewConv, DAG.getConstant(~SignBit, VT)); 5445 } 5446 5447 // fold (bitconvert (fcopysign cst, x)) -> 5448 // (or (and (bitconvert x), sign), (and cst, (not sign))) 5449 // Note that we don't handle (copysign x, cst) because this can always be 5450 // folded to an fneg or fabs. 5451 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() && 5452 isa<ConstantFPSDNode>(N0.getOperand(0)) && 5453 VT.isInteger() && !VT.isVector()) { 5454 unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits(); 5455 EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth); 5456 if (isTypeLegal(IntXVT)) { 5457 SDValue X = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), 5458 IntXVT, N0.getOperand(1)); 5459 AddToWorkList(X.getNode()); 5460 5461 // If X has a different width than the result/lhs, sext it or truncate it. 5462 unsigned VTWidth = VT.getSizeInBits(); 5463 if (OrigXWidth < VTWidth) { 5464 X = DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, X); 5465 AddToWorkList(X.getNode()); 5466 } else if (OrigXWidth > VTWidth) { 5467 // To get the sign bit in the right place, we have to shift it right 5468 // before truncating. 5469 X = DAG.getNode(ISD::SRL, X.getDebugLoc(), 5470 X.getValueType(), X, 5471 DAG.getConstant(OrigXWidth-VTWidth, X.getValueType())); 5472 AddToWorkList(X.getNode()); 5473 X = DAG.getNode(ISD::TRUNCATE, X.getDebugLoc(), VT, X); 5474 AddToWorkList(X.getNode()); 5475 } 5476 5477 APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); 5478 X = DAG.getNode(ISD::AND, X.getDebugLoc(), VT, 5479 X, DAG.getConstant(SignBit, VT)); 5480 AddToWorkList(X.getNode()); 5481 5482 SDValue Cst = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), 5483 VT, N0.getOperand(0)); 5484 Cst = DAG.getNode(ISD::AND, Cst.getDebugLoc(), VT, 5485 Cst, DAG.getConstant(~SignBit, VT)); 5486 AddToWorkList(Cst.getNode()); 5487 5488 return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, X, Cst); 5489 } 5490 } 5491 5492 // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive. 5493 if (N0.getOpcode() == ISD::BUILD_PAIR) { 5494 SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT); 5495 if (CombineLD.getNode()) 5496 return CombineLD; 5497 } 5498 5499 return SDValue(); 5500} 5501 5502SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) { 5503 EVT VT = N->getValueType(0); 5504 return CombineConsecutiveLoads(N, VT); 5505} 5506 5507/// ConstantFoldBITCASTofBUILD_VECTOR - We know that BV is a build_vector 5508/// node with Constant, ConstantFP or Undef operands. DstEltVT indicates the 5509/// destination element value type. 5510SDValue DAGCombiner:: 5511ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { 5512 EVT SrcEltVT = BV->getValueType(0).getVectorElementType(); 5513 5514 // If this is already the right type, we're done. 5515 if (SrcEltVT == DstEltVT) return SDValue(BV, 0); 5516 5517 unsigned SrcBitSize = SrcEltVT.getSizeInBits(); 5518 unsigned DstBitSize = DstEltVT.getSizeInBits(); 5519 5520 // If this is a conversion of N elements of one type to N elements of another 5521 // type, convert each element. This handles FP<->INT cases. 5522 if (SrcBitSize == DstBitSize) { 5523 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, 5524 BV->getValueType(0).getVectorNumElements()); 5525 5526 // Due to the FP element handling below calling this routine recursively, 5527 // we can end up with a scalar-to-vector node here. 5528 if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR) 5529 return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT, 5530 DAG.getNode(ISD::BITCAST, BV->getDebugLoc(), 5531 DstEltVT, BV->getOperand(0))); 5532 5533 SmallVector<SDValue, 8> Ops; 5534 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 5535 SDValue Op = BV->getOperand(i); 5536 // If the vector element type is not legal, the BUILD_VECTOR operands 5537 // are promoted and implicitly truncated. Make that explicit here. 5538 if (Op.getValueType() != SrcEltVT) 5539 Op = DAG.getNode(ISD::TRUNCATE, BV->getDebugLoc(), SrcEltVT, Op); 5540 Ops.push_back(DAG.getNode(ISD::BITCAST, BV->getDebugLoc(), 5541 DstEltVT, Op)); 5542 AddToWorkList(Ops.back().getNode()); 5543 } 5544 return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT, 5545 &Ops[0], Ops.size()); 5546 } 5547 5548 // Otherwise, we're growing or shrinking the elements. To avoid having to 5549 // handle annoying details of growing/shrinking FP values, we convert them to 5550 // int first. 5551 if (SrcEltVT.isFloatingPoint()) { 5552 // Convert the input float vector to a int vector where the elements are the 5553 // same sizes. 5554 assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!"); 5555 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits()); 5556 BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode(); 5557 SrcEltVT = IntVT; 5558 } 5559 5560 // Now we know the input is an integer vector. If the output is a FP type, 5561 // convert to integer first, then to FP of the right size. 5562 if (DstEltVT.isFloatingPoint()) { 5563 assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!"); 5564 EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits()); 5565 SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode(); 5566 5567 // Next, convert to FP elements of the same size. 5568 return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT); 5569 } 5570 5571 // Okay, we know the src/dst types are both integers of differing types. 5572 // Handling growing first. 5573 assert(SrcEltVT.isInteger() && DstEltVT.isInteger()); 5574 if (SrcBitSize < DstBitSize) { 5575 unsigned NumInputsPerOutput = DstBitSize/SrcBitSize; 5576 5577 SmallVector<SDValue, 8> Ops; 5578 for (unsigned i = 0, e = BV->getNumOperands(); i != e; 5579 i += NumInputsPerOutput) { 5580 bool isLE = TLI.isLittleEndian(); 5581 APInt NewBits = APInt(DstBitSize, 0); 5582 bool EltIsUndef = true; 5583 for (unsigned j = 0; j != NumInputsPerOutput; ++j) { 5584 // Shift the previously computed bits over. 5585 NewBits <<= SrcBitSize; 5586 SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j)); 5587 if (Op.getOpcode() == ISD::UNDEF) continue; 5588 EltIsUndef = false; 5589 5590 NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue(). 5591 zextOrTrunc(SrcBitSize).zext(DstBitSize); 5592 } 5593 5594 if (EltIsUndef) 5595 Ops.push_back(DAG.getUNDEF(DstEltVT)); 5596 else 5597 Ops.push_back(DAG.getConstant(NewBits, DstEltVT)); 5598 } 5599 5600 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size()); 5601 return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT, 5602 &Ops[0], Ops.size()); 5603 } 5604 5605 // Finally, this must be the case where we are shrinking elements: each input 5606 // turns into multiple outputs. 5607 bool isS2V = ISD::isScalarToVector(BV); 5608 unsigned NumOutputsPerInput = SrcBitSize/DstBitSize; 5609 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, 5610 NumOutputsPerInput*BV->getNumOperands()); 5611 SmallVector<SDValue, 8> Ops; 5612 5613 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { 5614 if (BV->getOperand(i).getOpcode() == ISD::UNDEF) { 5615 for (unsigned j = 0; j != NumOutputsPerInput; ++j) 5616 Ops.push_back(DAG.getUNDEF(DstEltVT)); 5617 continue; 5618 } 5619 5620 APInt OpVal = cast<ConstantSDNode>(BV->getOperand(i))-> 5621 getAPIntValue().zextOrTrunc(SrcBitSize); 5622 5623 for (unsigned j = 0; j != NumOutputsPerInput; ++j) { 5624 APInt ThisVal = OpVal.trunc(DstBitSize); 5625 Ops.push_back(DAG.getConstant(ThisVal, DstEltVT)); 5626 if (isS2V && i == 0 && j == 0 && ThisVal.zext(SrcBitSize) == OpVal) 5627 // Simply turn this into a SCALAR_TO_VECTOR of the new type. 5628 return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT, 5629 Ops[0]); 5630 OpVal = OpVal.lshr(DstBitSize); 5631 } 5632 5633 // For big endian targets, swap the order of the pieces of each element. 5634 if (TLI.isBigEndian()) 5635 std::reverse(Ops.end()-NumOutputsPerInput, Ops.end()); 5636 } 5637 5638 return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT, 5639 &Ops[0], Ops.size()); 5640} 5641 5642SDValue DAGCombiner::visitFADD(SDNode *N) { 5643 SDValue N0 = N->getOperand(0); 5644 SDValue N1 = N->getOperand(1); 5645 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 5646 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5647 EVT VT = N->getValueType(0); 5648 5649 // fold vector ops 5650 if (VT.isVector()) { 5651 SDValue FoldedVOp = SimplifyVBinOp(N); 5652 if (FoldedVOp.getNode()) return FoldedVOp; 5653 } 5654 5655 // fold (fadd c1, c2) -> c1 + c2 5656 if (N0CFP && N1CFP && VT != MVT::ppcf128) 5657 return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N1); 5658 // canonicalize constant to RHS 5659 if (N0CFP && !N1CFP) 5660 return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N1, N0); 5661 // fold (fadd A, 0) -> A 5662 if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && 5663 N1CFP->getValueAPF().isZero()) 5664 return N0; 5665 // fold (fadd A, (fneg B)) -> (fsub A, B) 5666 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && 5667 isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options) == 2) 5668 return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, 5669 GetNegatedExpression(N1, DAG, LegalOperations)); 5670 // fold (fadd (fneg A), B) -> (fsub B, A) 5671 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && 5672 isNegatibleForFree(N0, LegalOperations, TLI, &DAG.getTarget().Options) == 2) 5673 return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N1, 5674 GetNegatedExpression(N0, DAG, LegalOperations)); 5675 5676 // If allowed, fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2)) 5677 if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && 5678 N0.getOpcode() == ISD::FADD && N0.getNode()->hasOneUse() && 5679 isa<ConstantFPSDNode>(N0.getOperand(1))) 5680 return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0.getOperand(0), 5681 DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5682 N0.getOperand(1), N1)); 5683 5684 // In unsafe math mode, we can fold chains of FADD's of the same value 5685 // into multiplications. This transform is not safe in general because 5686 // we are reducing the number of rounding steps. 5687 if (DAG.getTarget().Options.UnsafeFPMath && 5688 TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && 5689 !N0CFP && !N1CFP) { 5690 if (N0.getOpcode() == ISD::FMUL) { 5691 ConstantFPSDNode *CFP00 = dyn_cast<ConstantFPSDNode>(N0.getOperand(0)); 5692 ConstantFPSDNode *CFP01 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); 5693 5694 // (fadd (fmul c, x), x) -> (fmul c+1, x) 5695 if (CFP00 && !CFP01 && N0.getOperand(1) == N1) { 5696 SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5697 SDValue(CFP00, 0), 5698 DAG.getConstantFP(1.0, VT)); 5699 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5700 N1, NewCFP); 5701 } 5702 5703 // (fadd (fmul x, c), x) -> (fmul c+1, x) 5704 if (CFP01 && !CFP00 && N0.getOperand(0) == N1) { 5705 SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5706 SDValue(CFP01, 0), 5707 DAG.getConstantFP(1.0, VT)); 5708 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5709 N1, NewCFP); 5710 } 5711 5712 // (fadd (fadd x, x), x) -> (fmul 3.0, x) 5713 if (!CFP00 && !CFP01 && N0.getOperand(0) == N0.getOperand(1) && 5714 N0.getOperand(0) == N1) { 5715 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5716 N1, DAG.getConstantFP(3.0, VT)); 5717 } 5718 5719 // (fadd (fmul c, x), (fadd x, x)) -> (fmul c+2, x) 5720 if (CFP00 && !CFP01 && N1.getOpcode() == ISD::FADD && 5721 N1.getOperand(0) == N1.getOperand(1) && 5722 N0.getOperand(1) == N1.getOperand(0)) { 5723 SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5724 SDValue(CFP00, 0), 5725 DAG.getConstantFP(2.0, VT)); 5726 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5727 N0.getOperand(1), NewCFP); 5728 } 5729 5730 // (fadd (fmul x, c), (fadd x, x)) -> (fmul c+2, x) 5731 if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD && 5732 N1.getOperand(0) == N1.getOperand(1) && 5733 N0.getOperand(0) == N1.getOperand(0)) { 5734 SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5735 SDValue(CFP01, 0), 5736 DAG.getConstantFP(2.0, VT)); 5737 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5738 N0.getOperand(0), NewCFP); 5739 } 5740 } 5741 5742 if (N1.getOpcode() == ISD::FMUL) { 5743 ConstantFPSDNode *CFP10 = dyn_cast<ConstantFPSDNode>(N1.getOperand(0)); 5744 ConstantFPSDNode *CFP11 = dyn_cast<ConstantFPSDNode>(N1.getOperand(1)); 5745 5746 // (fadd x, (fmul c, x)) -> (fmul c+1, x) 5747 if (CFP10 && !CFP11 && N1.getOperand(1) == N0) { 5748 SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5749 SDValue(CFP10, 0), 5750 DAG.getConstantFP(1.0, VT)); 5751 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5752 N0, NewCFP); 5753 } 5754 5755 // (fadd x, (fmul x, c)) -> (fmul c+1, x) 5756 if (CFP11 && !CFP10 && N1.getOperand(0) == N0) { 5757 SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5758 SDValue(CFP11, 0), 5759 DAG.getConstantFP(1.0, VT)); 5760 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5761 N0, NewCFP); 5762 } 5763 5764 // (fadd x, (fadd x, x)) -> (fmul 3.0, x) 5765 if (!CFP10 && !CFP11 && N1.getOperand(0) == N1.getOperand(1) && 5766 N1.getOperand(0) == N0) { 5767 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5768 N0, DAG.getConstantFP(3.0, VT)); 5769 } 5770 5771 // (fadd (fadd x, x), (fmul c, x)) -> (fmul c+2, x) 5772 if (CFP10 && !CFP11 && N1.getOpcode() == ISD::FADD && 5773 N1.getOperand(0) == N1.getOperand(1) && 5774 N0.getOperand(1) == N1.getOperand(0)) { 5775 SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5776 SDValue(CFP10, 0), 5777 DAG.getConstantFP(2.0, VT)); 5778 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5779 N0.getOperand(1), NewCFP); 5780 } 5781 5782 // (fadd (fadd x, x), (fmul x, c)) -> (fmul c+2, x) 5783 if (CFP11 && !CFP10 && N1.getOpcode() == ISD::FADD && 5784 N1.getOperand(0) == N1.getOperand(1) && 5785 N0.getOperand(0) == N1.getOperand(0)) { 5786 SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, 5787 SDValue(CFP11, 0), 5788 DAG.getConstantFP(2.0, VT)); 5789 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5790 N0.getOperand(0), NewCFP); 5791 } 5792 } 5793 5794 // (fadd (fadd x, x), (fadd x, x)) -> (fmul 4.0, x) 5795 if (N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD && 5796 N0.getOperand(0) == N0.getOperand(1) && 5797 N1.getOperand(0) == N1.getOperand(1) && 5798 N0.getOperand(0) == N1.getOperand(0)) { 5799 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5800 N0.getOperand(0), 5801 DAG.getConstantFP(4.0, VT)); 5802 } 5803 } 5804 5805 // FADD -> FMA combines: 5806 if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast || 5807 DAG.getTarget().Options.UnsafeFPMath) && 5808 DAG.getTarget().getTargetLowering()->isFMAFasterThanMulAndAdd(VT) && 5809 TLI.isOperationLegalOrCustom(ISD::FMA, VT)) { 5810 5811 // fold (fadd (fmul x, y), z) -> (fma x, y, z) 5812 if (N0.getOpcode() == ISD::FMUL && N0->hasOneUse()) { 5813 return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT, 5814 N0.getOperand(0), N0.getOperand(1), N1); 5815 } 5816 5817 // fold (fadd x, (fmul y, z)) -> (fma x, y, z) 5818 // Note: Commutes FADD operands. 5819 if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse()) { 5820 return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT, 5821 N1.getOperand(0), N1.getOperand(1), N0); 5822 } 5823 } 5824 5825 return SDValue(); 5826} 5827 5828SDValue DAGCombiner::visitFSUB(SDNode *N) { 5829 SDValue N0 = N->getOperand(0); 5830 SDValue N1 = N->getOperand(1); 5831 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 5832 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5833 EVT VT = N->getValueType(0); 5834 DebugLoc dl = N->getDebugLoc(); 5835 5836 // fold vector ops 5837 if (VT.isVector()) { 5838 SDValue FoldedVOp = SimplifyVBinOp(N); 5839 if (FoldedVOp.getNode()) return FoldedVOp; 5840 } 5841 5842 // fold (fsub c1, c2) -> c1-c2 5843 if (N0CFP && N1CFP && VT != MVT::ppcf128) 5844 return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, N1); 5845 // fold (fsub A, 0) -> A 5846 if (DAG.getTarget().Options.UnsafeFPMath && 5847 N1CFP && N1CFP->getValueAPF().isZero()) 5848 return N0; 5849 // fold (fsub 0, B) -> -B 5850 if (DAG.getTarget().Options.UnsafeFPMath && 5851 N0CFP && N0CFP->getValueAPF().isZero()) { 5852 if (isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options)) 5853 return GetNegatedExpression(N1, DAG, LegalOperations); 5854 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 5855 return DAG.getNode(ISD::FNEG, dl, VT, N1); 5856 } 5857 // fold (fsub A, (fneg B)) -> (fadd A, B) 5858 if (isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options)) 5859 return DAG.getNode(ISD::FADD, dl, VT, N0, 5860 GetNegatedExpression(N1, DAG, LegalOperations)); 5861 5862 // If 'unsafe math' is enabled, fold 5863 // (fsub x, x) -> 0.0 & 5864 // (fsub x, (fadd x, y)) -> (fneg y) & 5865 // (fsub x, (fadd y, x)) -> (fneg y) 5866 if (DAG.getTarget().Options.UnsafeFPMath) { 5867 if (N0 == N1) 5868 return DAG.getConstantFP(0.0f, VT); 5869 5870 if (N1.getOpcode() == ISD::FADD) { 5871 SDValue N10 = N1->getOperand(0); 5872 SDValue N11 = N1->getOperand(1); 5873 5874 if (N10 == N0 && isNegatibleForFree(N11, LegalOperations, TLI, 5875 &DAG.getTarget().Options)) 5876 return GetNegatedExpression(N11, DAG, LegalOperations); 5877 else if (N11 == N0 && isNegatibleForFree(N10, LegalOperations, TLI, 5878 &DAG.getTarget().Options)) 5879 return GetNegatedExpression(N10, DAG, LegalOperations); 5880 } 5881 } 5882 5883 // FSUB -> FMA combines: 5884 if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast || 5885 DAG.getTarget().Options.UnsafeFPMath) && 5886 DAG.getTarget().getTargetLowering()->isFMAFasterThanMulAndAdd(VT) && 5887 TLI.isOperationLegalOrCustom(ISD::FMA, VT)) { 5888 5889 // fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z)) 5890 if (N0.getOpcode() == ISD::FMUL && N0->hasOneUse()) { 5891 return DAG.getNode(ISD::FMA, dl, VT, 5892 N0.getOperand(0), N0.getOperand(1), 5893 DAG.getNode(ISD::FNEG, dl, VT, N1)); 5894 } 5895 5896 // fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x) 5897 // Note: Commutes FSUB operands. 5898 if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse()) { 5899 return DAG.getNode(ISD::FMA, dl, VT, 5900 DAG.getNode(ISD::FNEG, dl, VT, 5901 N1.getOperand(0)), 5902 N1.getOperand(1), N0); 5903 } 5904 5905 // fold (fsub (-(fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) 5906 if (N0.getOpcode() == ISD::FNEG && 5907 N0.getOperand(0).getOpcode() == ISD::FMUL && 5908 N0->hasOneUse() && N0.getOperand(0).hasOneUse()) { 5909 SDValue N00 = N0.getOperand(0).getOperand(0); 5910 SDValue N01 = N0.getOperand(0).getOperand(1); 5911 return DAG.getNode(ISD::FMA, dl, VT, 5912 DAG.getNode(ISD::FNEG, dl, VT, N00), N01, 5913 DAG.getNode(ISD::FNEG, dl, VT, N1)); 5914 } 5915 } 5916 5917 return SDValue(); 5918} 5919 5920SDValue DAGCombiner::visitFMUL(SDNode *N) { 5921 SDValue N0 = N->getOperand(0); 5922 SDValue N1 = N->getOperand(1); 5923 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 5924 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5925 EVT VT = N->getValueType(0); 5926 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5927 5928 // fold vector ops 5929 if (VT.isVector()) { 5930 SDValue FoldedVOp = SimplifyVBinOp(N); 5931 if (FoldedVOp.getNode()) return FoldedVOp; 5932 } 5933 5934 // fold (fmul c1, c2) -> c1*c2 5935 if (N0CFP && N1CFP && VT != MVT::ppcf128) 5936 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0, N1); 5937 // canonicalize constant to RHS 5938 if (N0CFP && !N1CFP) 5939 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N1, N0); 5940 // fold (fmul A, 0) -> 0 5941 if (DAG.getTarget().Options.UnsafeFPMath && 5942 N1CFP && N1CFP->getValueAPF().isZero()) 5943 return N1; 5944 // fold (fmul A, 0) -> 0, vector edition. 5945 if (DAG.getTarget().Options.UnsafeFPMath && 5946 ISD::isBuildVectorAllZeros(N1.getNode())) 5947 return N1; 5948 // fold (fmul A, 1.0) -> A 5949 if (N1CFP && N1CFP->isExactlyValue(1.0)) 5950 return N0; 5951 // fold (fmul X, 2.0) -> (fadd X, X) 5952 if (N1CFP && N1CFP->isExactlyValue(+2.0)) 5953 return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N0); 5954 // fold (fmul X, -1.0) -> (fneg X) 5955 if (N1CFP && N1CFP->isExactlyValue(-1.0)) 5956 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 5957 return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, N0); 5958 5959 // fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y) 5960 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, 5961 &DAG.getTarget().Options)) { 5962 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, 5963 &DAG.getTarget().Options)) { 5964 // Both can be negated for free, check to see if at least one is cheaper 5965 // negated. 5966 if (LHSNeg == 2 || RHSNeg == 2) 5967 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5968 GetNegatedExpression(N0, DAG, LegalOperations), 5969 GetNegatedExpression(N1, DAG, LegalOperations)); 5970 } 5971 } 5972 5973 // If allowed, fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2)) 5974 if (DAG.getTarget().Options.UnsafeFPMath && 5975 N1CFP && N0.getOpcode() == ISD::FMUL && 5976 N0.getNode()->hasOneUse() && isa<ConstantFPSDNode>(N0.getOperand(1))) 5977 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0.getOperand(0), 5978 DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, 5979 N0.getOperand(1), N1)); 5980 5981 return SDValue(); 5982} 5983 5984SDValue DAGCombiner::visitFMA(SDNode *N) { 5985 SDValue N0 = N->getOperand(0); 5986 SDValue N1 = N->getOperand(1); 5987 SDValue N2 = N->getOperand(2); 5988 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 5989 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 5990 EVT VT = N->getValueType(0); 5991 5992 if (N0CFP && N0CFP->isExactlyValue(1.0)) 5993 return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N1, N2); 5994 if (N1CFP && N1CFP->isExactlyValue(1.0)) 5995 return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N2); 5996 5997 // Canonicalize (fma c, x, y) -> (fma x, c, y) 5998 if (N0CFP && !N1CFP) 5999 return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT, N1, N0, N2); 6000 6001 return SDValue(); 6002} 6003 6004SDValue DAGCombiner::visitFDIV(SDNode *N) { 6005 SDValue N0 = N->getOperand(0); 6006 SDValue N1 = N->getOperand(1); 6007 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6008 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6009 EVT VT = N->getValueType(0); 6010 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6011 6012 // fold vector ops 6013 if (VT.isVector()) { 6014 SDValue FoldedVOp = SimplifyVBinOp(N); 6015 if (FoldedVOp.getNode()) return FoldedVOp; 6016 } 6017 6018 // fold (fdiv c1, c2) -> c1/c2 6019 if (N0CFP && N1CFP && VT != MVT::ppcf128) 6020 return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT, N0, N1); 6021 6022 // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable. 6023 if (N1CFP && VT != MVT::ppcf128 && DAG.getTarget().Options.UnsafeFPMath) { 6024 // Compute the reciprocal 1.0 / c2. 6025 APFloat N1APF = N1CFP->getValueAPF(); 6026 APFloat Recip(N1APF.getSemantics(), 1); // 1.0 6027 APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven); 6028 // Only do the transform if the reciprocal is a legal fp immediate that 6029 // isn't too nasty (eg NaN, denormal, ...). 6030 if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty 6031 (!LegalOperations || 6032 // FIXME: custom lowering of ConstantFP might fail (see e.g. ARM 6033 // backend)... we should handle this gracefully after Legalize. 6034 // TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT) || 6035 TLI.isOperationLegal(llvm::ISD::ConstantFP, VT) || 6036 TLI.isFPImmLegal(Recip, VT))) 6037 return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0, 6038 DAG.getConstantFP(Recip, VT)); 6039 } 6040 6041 // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y) 6042 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, 6043 &DAG.getTarget().Options)) { 6044 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, 6045 &DAG.getTarget().Options)) { 6046 // Both can be negated for free, check to see if at least one is cheaper 6047 // negated. 6048 if (LHSNeg == 2 || RHSNeg == 2) 6049 return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT, 6050 GetNegatedExpression(N0, DAG, LegalOperations), 6051 GetNegatedExpression(N1, DAG, LegalOperations)); 6052 } 6053 } 6054 6055 return SDValue(); 6056} 6057 6058SDValue DAGCombiner::visitFREM(SDNode *N) { 6059 SDValue N0 = N->getOperand(0); 6060 SDValue N1 = N->getOperand(1); 6061 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6062 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6063 EVT VT = N->getValueType(0); 6064 6065 // fold (frem c1, c2) -> fmod(c1,c2) 6066 if (N0CFP && N1CFP && VT != MVT::ppcf128) 6067 return DAG.getNode(ISD::FREM, N->getDebugLoc(), VT, N0, N1); 6068 6069 return SDValue(); 6070} 6071 6072SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) { 6073 SDValue N0 = N->getOperand(0); 6074 SDValue N1 = N->getOperand(1); 6075 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6076 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 6077 EVT VT = N->getValueType(0); 6078 6079 if (N0CFP && N1CFP && VT != MVT::ppcf128) // Constant fold 6080 return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, N0, N1); 6081 6082 if (N1CFP) { 6083 const APFloat& V = N1CFP->getValueAPF(); 6084 // copysign(x, c1) -> fabs(x) iff ispos(c1) 6085 // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1) 6086 if (!V.isNegative()) { 6087 if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT)) 6088 return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0); 6089 } else { 6090 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 6091 return DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, 6092 DAG.getNode(ISD::FABS, N0.getDebugLoc(), VT, N0)); 6093 } 6094 } 6095 6096 // copysign(fabs(x), y) -> copysign(x, y) 6097 // copysign(fneg(x), y) -> copysign(x, y) 6098 // copysign(copysign(x,z), y) -> copysign(x, y) 6099 if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG || 6100 N0.getOpcode() == ISD::FCOPYSIGN) 6101 return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, 6102 N0.getOperand(0), N1); 6103 6104 // copysign(x, abs(y)) -> abs(x) 6105 if (N1.getOpcode() == ISD::FABS) 6106 return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0); 6107 6108 // copysign(x, copysign(y,z)) -> copysign(x, z) 6109 if (N1.getOpcode() == ISD::FCOPYSIGN) 6110 return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, 6111 N0, N1.getOperand(1)); 6112 6113 // copysign(x, fp_extend(y)) -> copysign(x, y) 6114 // copysign(x, fp_round(y)) -> copysign(x, y) 6115 if (N1.getOpcode() == ISD::FP_EXTEND || N1.getOpcode() == ISD::FP_ROUND) 6116 return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, 6117 N0, N1.getOperand(0)); 6118 6119 return SDValue(); 6120} 6121 6122SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) { 6123 SDValue N0 = N->getOperand(0); 6124 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 6125 EVT VT = N->getValueType(0); 6126 EVT OpVT = N0.getValueType(); 6127 6128 // fold (sint_to_fp c1) -> c1fp 6129 if (N0C && OpVT != MVT::ppcf128 && 6130 // ...but only if the target supports immediate floating-point values 6131 (!LegalOperations || 6132 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) 6133 return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0); 6134 6135 // If the input is a legal type, and SINT_TO_FP is not legal on this target, 6136 // but UINT_TO_FP is legal on this target, try to convert. 6137 if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT) && 6138 TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT)) { 6139 // If the sign bit is known to be zero, we can change this to UINT_TO_FP. 6140 if (DAG.SignBitIsZero(N0)) 6141 return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0); 6142 } 6143 6144 // The next optimizations are desireable only if SELECT_CC can be lowered. 6145 // Check against MVT::Other for SELECT_CC, which is a workaround for targets 6146 // having to say they don't support SELECT_CC on every type the DAG knows 6147 // about, since there is no way to mark an opcode illegal at all value types 6148 // (See also visitSELECT) 6149 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other)) { 6150 // fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc) 6151 if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 && 6152 !VT.isVector() && 6153 (!LegalOperations || 6154 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 6155 SDValue Ops[] = 6156 { N0.getOperand(0), N0.getOperand(1), 6157 DAG.getConstantFP(-1.0, VT) , DAG.getConstantFP(0.0, VT), 6158 N0.getOperand(2) }; 6159 return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT, Ops, 5); 6160 } 6161 6162 // fold (sint_to_fp (zext (setcc x, y, cc))) -> 6163 // (select_cc x, y, 1.0, 0.0,, cc) 6164 if (N0.getOpcode() == ISD::ZERO_EXTEND && 6165 N0.getOperand(0).getOpcode() == ISD::SETCC &&!VT.isVector() && 6166 (!LegalOperations || 6167 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 6168 SDValue Ops[] = 6169 { N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1), 6170 DAG.getConstantFP(1.0, VT) , DAG.getConstantFP(0.0, VT), 6171 N0.getOperand(0).getOperand(2) }; 6172 return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT, Ops, 5); 6173 } 6174 } 6175 6176 return SDValue(); 6177} 6178 6179SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) { 6180 SDValue N0 = N->getOperand(0); 6181 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 6182 EVT VT = N->getValueType(0); 6183 EVT OpVT = N0.getValueType(); 6184 6185 // fold (uint_to_fp c1) -> c1fp 6186 if (N0C && OpVT != MVT::ppcf128 && 6187 // ...but only if the target supports immediate floating-point values 6188 (!LegalOperations || 6189 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) 6190 return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0); 6191 6192 // If the input is a legal type, and UINT_TO_FP is not legal on this target, 6193 // but SINT_TO_FP is legal on this target, try to convert. 6194 if (!TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT) && 6195 TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT)) { 6196 // If the sign bit is known to be zero, we can change this to SINT_TO_FP. 6197 if (DAG.SignBitIsZero(N0)) 6198 return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0); 6199 } 6200 6201 // The next optimizations are desireable only if SELECT_CC can be lowered. 6202 // Check against MVT::Other for SELECT_CC, which is a workaround for targets 6203 // having to say they don't support SELECT_CC on every type the DAG knows 6204 // about, since there is no way to mark an opcode illegal at all value types 6205 // (See also visitSELECT) 6206 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other)) { 6207 // fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc) 6208 6209 if (N0.getOpcode() == ISD::SETCC && !VT.isVector() && 6210 (!LegalOperations || 6211 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 6212 SDValue Ops[] = 6213 { N0.getOperand(0), N0.getOperand(1), 6214 DAG.getConstantFP(1.0, VT), DAG.getConstantFP(0.0, VT), 6215 N0.getOperand(2) }; 6216 return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT, Ops, 5); 6217 } 6218 } 6219 6220 return SDValue(); 6221} 6222 6223SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) { 6224 SDValue N0 = N->getOperand(0); 6225 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6226 EVT VT = N->getValueType(0); 6227 6228 // fold (fp_to_sint c1fp) -> c1 6229 if (N0CFP) 6230 return DAG.getNode(ISD::FP_TO_SINT, N->getDebugLoc(), VT, N0); 6231 6232 return SDValue(); 6233} 6234 6235SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) { 6236 SDValue N0 = N->getOperand(0); 6237 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6238 EVT VT = N->getValueType(0); 6239 6240 // fold (fp_to_uint c1fp) -> c1 6241 if (N0CFP && VT != MVT::ppcf128) 6242 return DAG.getNode(ISD::FP_TO_UINT, N->getDebugLoc(), VT, N0); 6243 6244 return SDValue(); 6245} 6246 6247SDValue DAGCombiner::visitFP_ROUND(SDNode *N) { 6248 SDValue N0 = N->getOperand(0); 6249 SDValue N1 = N->getOperand(1); 6250 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6251 EVT VT = N->getValueType(0); 6252 6253 // fold (fp_round c1fp) -> c1fp 6254 if (N0CFP && N0.getValueType() != MVT::ppcf128) 6255 return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0, N1); 6256 6257 // fold (fp_round (fp_extend x)) -> x 6258 if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType()) 6259 return N0.getOperand(0); 6260 6261 // fold (fp_round (fp_round x)) -> (fp_round x) 6262 if (N0.getOpcode() == ISD::FP_ROUND) { 6263 // This is a value preserving truncation if both round's are. 6264 bool IsTrunc = N->getConstantOperandVal(1) == 1 && 6265 N0.getNode()->getConstantOperandVal(1) == 1; 6266 return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0.getOperand(0), 6267 DAG.getIntPtrConstant(IsTrunc)); 6268 } 6269 6270 // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y) 6271 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) { 6272 SDValue Tmp = DAG.getNode(ISD::FP_ROUND, N0.getDebugLoc(), VT, 6273 N0.getOperand(0), N1); 6274 AddToWorkList(Tmp.getNode()); 6275 return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, 6276 Tmp, N0.getOperand(1)); 6277 } 6278 6279 return SDValue(); 6280} 6281 6282SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) { 6283 SDValue N0 = N->getOperand(0); 6284 EVT VT = N->getValueType(0); 6285 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 6286 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6287 6288 // fold (fp_round_inreg c1fp) -> c1fp 6289 if (N0CFP && isTypeLegal(EVT)) { 6290 SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), EVT); 6291 return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, Round); 6292 } 6293 6294 return SDValue(); 6295} 6296 6297SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) { 6298 SDValue N0 = N->getOperand(0); 6299 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6300 EVT VT = N->getValueType(0); 6301 6302 // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. 6303 if (N->hasOneUse() && 6304 N->use_begin()->getOpcode() == ISD::FP_ROUND) 6305 return SDValue(); 6306 6307 // fold (fp_extend c1fp) -> c1fp 6308 if (N0CFP && VT != MVT::ppcf128) 6309 return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, N0); 6310 6311 // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the 6312 // value of X. 6313 if (N0.getOpcode() == ISD::FP_ROUND 6314 && N0.getNode()->getConstantOperandVal(1) == 1) { 6315 SDValue In = N0.getOperand(0); 6316 if (In.getValueType() == VT) return In; 6317 if (VT.bitsLT(In.getValueType())) 6318 return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, 6319 In, N0.getOperand(1)); 6320 return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, In); 6321 } 6322 6323 // fold (fpext (load x)) -> (fpext (fptrunc (extload x))) 6324 if (ISD::isNON_EXTLoad(N0.getNode()) && N0.hasOneUse() && 6325 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 6326 TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) { 6327 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6328 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT, 6329 LN0->getChain(), 6330 LN0->getBasePtr(), LN0->getPointerInfo(), 6331 N0.getValueType(), 6332 LN0->isVolatile(), LN0->isNonTemporal(), 6333 LN0->getAlignment()); 6334 CombineTo(N, ExtLoad); 6335 CombineTo(N0.getNode(), 6336 DAG.getNode(ISD::FP_ROUND, N0.getDebugLoc(), 6337 N0.getValueType(), ExtLoad, DAG.getIntPtrConstant(1)), 6338 ExtLoad.getValue(1)); 6339 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6340 } 6341 6342 return SDValue(); 6343} 6344 6345SDValue DAGCombiner::visitFNEG(SDNode *N) { 6346 SDValue N0 = N->getOperand(0); 6347 EVT VT = N->getValueType(0); 6348 6349 if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(), 6350 &DAG.getTarget().Options)) 6351 return GetNegatedExpression(N0, DAG, LegalOperations); 6352 6353 // Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading 6354 // constant pool values. 6355 if (!TLI.isFNegFree(VT) && N0.getOpcode() == ISD::BITCAST && 6356 !VT.isVector() && 6357 N0.getNode()->hasOneUse() && 6358 N0.getOperand(0).getValueType().isInteger()) { 6359 SDValue Int = N0.getOperand(0); 6360 EVT IntVT = Int.getValueType(); 6361 if (IntVT.isInteger() && !IntVT.isVector()) { 6362 Int = DAG.getNode(ISD::XOR, N0.getDebugLoc(), IntVT, Int, 6363 DAG.getConstant(APInt::getSignBit(IntVT.getSizeInBits()), IntVT)); 6364 AddToWorkList(Int.getNode()); 6365 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 6366 VT, Int); 6367 } 6368 } 6369 6370 return SDValue(); 6371} 6372 6373SDValue DAGCombiner::visitFCEIL(SDNode *N) { 6374 SDValue N0 = N->getOperand(0); 6375 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6376 EVT VT = N->getValueType(0); 6377 6378 // fold (fceil c1) -> fceil(c1) 6379 if (N0CFP && VT != MVT::ppcf128) 6380 return DAG.getNode(ISD::FCEIL, N->getDebugLoc(), VT, N0); 6381 6382 return SDValue(); 6383} 6384 6385SDValue DAGCombiner::visitFTRUNC(SDNode *N) { 6386 SDValue N0 = N->getOperand(0); 6387 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6388 EVT VT = N->getValueType(0); 6389 6390 // fold (ftrunc c1) -> ftrunc(c1) 6391 if (N0CFP && VT != MVT::ppcf128) 6392 return DAG.getNode(ISD::FTRUNC, N->getDebugLoc(), VT, N0); 6393 6394 return SDValue(); 6395} 6396 6397SDValue DAGCombiner::visitFFLOOR(SDNode *N) { 6398 SDValue N0 = N->getOperand(0); 6399 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6400 EVT VT = N->getValueType(0); 6401 6402 // fold (ffloor c1) -> ffloor(c1) 6403 if (N0CFP && VT != MVT::ppcf128) 6404 return DAG.getNode(ISD::FFLOOR, N->getDebugLoc(), VT, N0); 6405 6406 return SDValue(); 6407} 6408 6409SDValue DAGCombiner::visitFABS(SDNode *N) { 6410 SDValue N0 = N->getOperand(0); 6411 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 6412 EVT VT = N->getValueType(0); 6413 6414 // fold (fabs c1) -> fabs(c1) 6415 if (N0CFP && VT != MVT::ppcf128) 6416 return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0); 6417 // fold (fabs (fabs x)) -> (fabs x) 6418 if (N0.getOpcode() == ISD::FABS) 6419 return N->getOperand(0); 6420 // fold (fabs (fneg x)) -> (fabs x) 6421 // fold (fabs (fcopysign x, y)) -> (fabs x) 6422 if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN) 6423 return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0.getOperand(0)); 6424 6425 // Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading 6426 // constant pool values. 6427 if (!TLI.isFAbsFree(VT) && 6428 N0.getOpcode() == ISD::BITCAST && N0.getNode()->hasOneUse() && 6429 N0.getOperand(0).getValueType().isInteger() && 6430 !N0.getOperand(0).getValueType().isVector()) { 6431 SDValue Int = N0.getOperand(0); 6432 EVT IntVT = Int.getValueType(); 6433 if (IntVT.isInteger() && !IntVT.isVector()) { 6434 Int = DAG.getNode(ISD::AND, N0.getDebugLoc(), IntVT, Int, 6435 DAG.getConstant(~APInt::getSignBit(IntVT.getSizeInBits()), IntVT)); 6436 AddToWorkList(Int.getNode()); 6437 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 6438 N->getValueType(0), Int); 6439 } 6440 } 6441 6442 return SDValue(); 6443} 6444 6445SDValue DAGCombiner::visitBRCOND(SDNode *N) { 6446 SDValue Chain = N->getOperand(0); 6447 SDValue N1 = N->getOperand(1); 6448 SDValue N2 = N->getOperand(2); 6449 6450 // If N is a constant we could fold this into a fallthrough or unconditional 6451 // branch. However that doesn't happen very often in normal code, because 6452 // Instcombine/SimplifyCFG should have handled the available opportunities. 6453 // If we did this folding here, it would be necessary to update the 6454 // MachineBasicBlock CFG, which is awkward. 6455 6456 // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal 6457 // on the target. 6458 if (N1.getOpcode() == ISD::SETCC && 6459 TLI.isOperationLegalOrCustom(ISD::BR_CC, MVT::Other)) { 6460 return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), MVT::Other, 6461 Chain, N1.getOperand(2), 6462 N1.getOperand(0), N1.getOperand(1), N2); 6463 } 6464 6465 if ((N1.hasOneUse() && N1.getOpcode() == ISD::SRL) || 6466 ((N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) && 6467 (N1.getOperand(0).hasOneUse() && 6468 N1.getOperand(0).getOpcode() == ISD::SRL))) { 6469 SDNode *Trunc = 0; 6470 if (N1.getOpcode() == ISD::TRUNCATE) { 6471 // Look pass the truncate. 6472 Trunc = N1.getNode(); 6473 N1 = N1.getOperand(0); 6474 } 6475 6476 // Match this pattern so that we can generate simpler code: 6477 // 6478 // %a = ... 6479 // %b = and i32 %a, 2 6480 // %c = srl i32 %b, 1 6481 // brcond i32 %c ... 6482 // 6483 // into 6484 // 6485 // %a = ... 6486 // %b = and i32 %a, 2 6487 // %c = setcc eq %b, 0 6488 // brcond %c ... 6489 // 6490 // This applies only when the AND constant value has one bit set and the 6491 // SRL constant is equal to the log2 of the AND constant. The back-end is 6492 // smart enough to convert the result into a TEST/JMP sequence. 6493 SDValue Op0 = N1.getOperand(0); 6494 SDValue Op1 = N1.getOperand(1); 6495 6496 if (Op0.getOpcode() == ISD::AND && 6497 Op1.getOpcode() == ISD::Constant) { 6498 SDValue AndOp1 = Op0.getOperand(1); 6499 6500 if (AndOp1.getOpcode() == ISD::Constant) { 6501 const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue(); 6502 6503 if (AndConst.isPowerOf2() && 6504 cast<ConstantSDNode>(Op1)->getAPIntValue()==AndConst.logBase2()) { 6505 SDValue SetCC = 6506 DAG.getSetCC(N->getDebugLoc(), 6507 TLI.getSetCCResultType(Op0.getValueType()), 6508 Op0, DAG.getConstant(0, Op0.getValueType()), 6509 ISD::SETNE); 6510 6511 SDValue NewBRCond = DAG.getNode(ISD::BRCOND, N->getDebugLoc(), 6512 MVT::Other, Chain, SetCC, N2); 6513 // Don't add the new BRCond into the worklist or else SimplifySelectCC 6514 // will convert it back to (X & C1) >> C2. 6515 CombineTo(N, NewBRCond, false); 6516 // Truncate is dead. 6517 if (Trunc) { 6518 removeFromWorkList(Trunc); 6519 DAG.DeleteNode(Trunc); 6520 } 6521 // Replace the uses of SRL with SETCC 6522 WorkListRemover DeadNodes(*this); 6523 DAG.ReplaceAllUsesOfValueWith(N1, SetCC); 6524 removeFromWorkList(N1.getNode()); 6525 DAG.DeleteNode(N1.getNode()); 6526 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6527 } 6528 } 6529 } 6530 6531 if (Trunc) 6532 // Restore N1 if the above transformation doesn't match. 6533 N1 = N->getOperand(1); 6534 } 6535 6536 // Transform br(xor(x, y)) -> br(x != y) 6537 // Transform br(xor(xor(x,y), 1)) -> br (x == y) 6538 if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) { 6539 SDNode *TheXor = N1.getNode(); 6540 SDValue Op0 = TheXor->getOperand(0); 6541 SDValue Op1 = TheXor->getOperand(1); 6542 if (Op0.getOpcode() == Op1.getOpcode()) { 6543 // Avoid missing important xor optimizations. 6544 SDValue Tmp = visitXOR(TheXor); 6545 if (Tmp.getNode() && Tmp.getNode() != TheXor) { 6546 DEBUG(dbgs() << "\nReplacing.8 "; 6547 TheXor->dump(&DAG); 6548 dbgs() << "\nWith: "; 6549 Tmp.getNode()->dump(&DAG); 6550 dbgs() << '\n'); 6551 WorkListRemover DeadNodes(*this); 6552 DAG.ReplaceAllUsesOfValueWith(N1, Tmp); 6553 removeFromWorkList(TheXor); 6554 DAG.DeleteNode(TheXor); 6555 return DAG.getNode(ISD::BRCOND, N->getDebugLoc(), 6556 MVT::Other, Chain, Tmp, N2); 6557 } 6558 } 6559 6560 if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) { 6561 bool Equal = false; 6562 if (ConstantSDNode *RHSCI = dyn_cast<ConstantSDNode>(Op0)) 6563 if (RHSCI->getAPIntValue() == 1 && Op0.hasOneUse() && 6564 Op0.getOpcode() == ISD::XOR) { 6565 TheXor = Op0.getNode(); 6566 Equal = true; 6567 } 6568 6569 EVT SetCCVT = N1.getValueType(); 6570 if (LegalTypes) 6571 SetCCVT = TLI.getSetCCResultType(SetCCVT); 6572 SDValue SetCC = DAG.getSetCC(TheXor->getDebugLoc(), 6573 SetCCVT, 6574 Op0, Op1, 6575 Equal ? ISD::SETEQ : ISD::SETNE); 6576 // Replace the uses of XOR with SETCC 6577 WorkListRemover DeadNodes(*this); 6578 DAG.ReplaceAllUsesOfValueWith(N1, SetCC); 6579 removeFromWorkList(N1.getNode()); 6580 DAG.DeleteNode(N1.getNode()); 6581 return DAG.getNode(ISD::BRCOND, N->getDebugLoc(), 6582 MVT::Other, Chain, SetCC, N2); 6583 } 6584 } 6585 6586 return SDValue(); 6587} 6588 6589// Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB. 6590// 6591SDValue DAGCombiner::visitBR_CC(SDNode *N) { 6592 CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1)); 6593 SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3); 6594 6595 // If N is a constant we could fold this into a fallthrough or unconditional 6596 // branch. However that doesn't happen very often in normal code, because 6597 // Instcombine/SimplifyCFG should have handled the available opportunities. 6598 // If we did this folding here, it would be necessary to update the 6599 // MachineBasicBlock CFG, which is awkward. 6600 6601 // Use SimplifySetCC to simplify SETCC's. 6602 SDValue Simp = SimplifySetCC(TLI.getSetCCResultType(CondLHS.getValueType()), 6603 CondLHS, CondRHS, CC->get(), N->getDebugLoc(), 6604 false); 6605 if (Simp.getNode()) AddToWorkList(Simp.getNode()); 6606 6607 // fold to a simpler setcc 6608 if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC) 6609 return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), MVT::Other, 6610 N->getOperand(0), Simp.getOperand(2), 6611 Simp.getOperand(0), Simp.getOperand(1), 6612 N->getOperand(4)); 6613 6614 return SDValue(); 6615} 6616 6617/// canFoldInAddressingMode - Return true if 'Use' is a load or a store that 6618/// uses N as its base pointer and that N may be folded in the load / store 6619/// addressing mode. 6620static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, 6621 SelectionDAG &DAG, 6622 const TargetLowering &TLI) { 6623 EVT VT; 6624 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) { 6625 if (LD->isIndexed() || LD->getBasePtr().getNode() != N) 6626 return false; 6627 VT = Use->getValueType(0); 6628 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) { 6629 if (ST->isIndexed() || ST->getBasePtr().getNode() != N) 6630 return false; 6631 VT = ST->getValue().getValueType(); 6632 } else 6633 return false; 6634 6635 TargetLowering::AddrMode AM; 6636 if (N->getOpcode() == ISD::ADD) { 6637 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6638 if (Offset) 6639 // [reg +/- imm] 6640 AM.BaseOffs = Offset->getSExtValue(); 6641 else 6642 // [reg +/- reg] 6643 AM.Scale = 1; 6644 } else if (N->getOpcode() == ISD::SUB) { 6645 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6646 if (Offset) 6647 // [reg +/- imm] 6648 AM.BaseOffs = -Offset->getSExtValue(); 6649 else 6650 // [reg +/- reg] 6651 AM.Scale = 1; 6652 } else 6653 return false; 6654 6655 return TLI.isLegalAddressingMode(AM, VT.getTypeForEVT(*DAG.getContext())); 6656} 6657 6658/// CombineToPreIndexedLoadStore - Try turning a load / store into a 6659/// pre-indexed load / store when the base pointer is an add or subtract 6660/// and it has other uses besides the load / store. After the 6661/// transformation, the new indexed load / store has effectively folded 6662/// the add / subtract in and all of its other uses are redirected to the 6663/// new load / store. 6664bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { 6665 if (Level < AfterLegalizeDAG) 6666 return false; 6667 6668 bool isLoad = true; 6669 SDValue Ptr; 6670 EVT VT; 6671 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 6672 if (LD->isIndexed()) 6673 return false; 6674 VT = LD->getMemoryVT(); 6675 if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) && 6676 !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT)) 6677 return false; 6678 Ptr = LD->getBasePtr(); 6679 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 6680 if (ST->isIndexed()) 6681 return false; 6682 VT = ST->getMemoryVT(); 6683 if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) && 6684 !TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT)) 6685 return false; 6686 Ptr = ST->getBasePtr(); 6687 isLoad = false; 6688 } else { 6689 return false; 6690 } 6691 6692 // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail 6693 // out. There is no reason to make this a preinc/predec. 6694 if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) || 6695 Ptr.getNode()->hasOneUse()) 6696 return false; 6697 6698 // Ask the target to do addressing mode selection. 6699 SDValue BasePtr; 6700 SDValue Offset; 6701 ISD::MemIndexedMode AM = ISD::UNINDEXED; 6702 if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG)) 6703 return false; 6704 // Don't create a indexed load / store with zero offset. 6705 if (isa<ConstantSDNode>(Offset) && 6706 cast<ConstantSDNode>(Offset)->isNullValue()) 6707 return false; 6708 6709 // Try turning it into a pre-indexed load / store except when: 6710 // 1) The new base ptr is a frame index. 6711 // 2) If N is a store and the new base ptr is either the same as or is a 6712 // predecessor of the value being stored. 6713 // 3) Another use of old base ptr is a predecessor of N. If ptr is folded 6714 // that would create a cycle. 6715 // 4) All uses are load / store ops that use it as old base ptr. 6716 6717 // Check #1. Preinc'ing a frame index would require copying the stack pointer 6718 // (plus the implicit offset) to a register to preinc anyway. 6719 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr)) 6720 return false; 6721 6722 // Check #2. 6723 if (!isLoad) { 6724 SDValue Val = cast<StoreSDNode>(N)->getValue(); 6725 if (Val == BasePtr || BasePtr.getNode()->isPredecessorOf(Val.getNode())) 6726 return false; 6727 } 6728 6729 // Now check for #3 and #4. 6730 bool RealUse = false; 6731 6732 // Caches for hasPredecessorHelper 6733 SmallPtrSet<const SDNode *, 32> Visited; 6734 SmallVector<const SDNode *, 16> Worklist; 6735 6736 for (SDNode::use_iterator I = Ptr.getNode()->use_begin(), 6737 E = Ptr.getNode()->use_end(); I != E; ++I) { 6738 SDNode *Use = *I; 6739 if (Use == N) 6740 continue; 6741 if (N->hasPredecessorHelper(Use, Visited, Worklist)) 6742 return false; 6743 6744 // If Ptr may be folded in addressing mode of other use, then it's 6745 // not profitable to do this transformation. 6746 if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI)) 6747 RealUse = true; 6748 } 6749 6750 if (!RealUse) 6751 return false; 6752 6753 SDValue Result; 6754 if (isLoad) 6755 Result = DAG.getIndexedLoad(SDValue(N,0), N->getDebugLoc(), 6756 BasePtr, Offset, AM); 6757 else 6758 Result = DAG.getIndexedStore(SDValue(N,0), N->getDebugLoc(), 6759 BasePtr, Offset, AM); 6760 ++PreIndexedNodes; 6761 ++NodesCombined; 6762 DEBUG(dbgs() << "\nReplacing.4 "; 6763 N->dump(&DAG); 6764 dbgs() << "\nWith: "; 6765 Result.getNode()->dump(&DAG); 6766 dbgs() << '\n'); 6767 WorkListRemover DeadNodes(*this); 6768 if (isLoad) { 6769 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0)); 6770 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2)); 6771 } else { 6772 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1)); 6773 } 6774 6775 // Finally, since the node is now dead, remove it from the graph. 6776 DAG.DeleteNode(N); 6777 6778 // Replace the uses of Ptr with uses of the updated base value. 6779 DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0)); 6780 removeFromWorkList(Ptr.getNode()); 6781 DAG.DeleteNode(Ptr.getNode()); 6782 6783 return true; 6784} 6785 6786/// CombineToPostIndexedLoadStore - Try to combine a load / store with a 6787/// add / sub of the base pointer node into a post-indexed load / store. 6788/// The transformation folded the add / subtract into the new indexed 6789/// load / store effectively and all of its uses are redirected to the 6790/// new load / store. 6791bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { 6792 if (Level < AfterLegalizeDAG) 6793 return false; 6794 6795 bool isLoad = true; 6796 SDValue Ptr; 6797 EVT VT; 6798 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 6799 if (LD->isIndexed()) 6800 return false; 6801 VT = LD->getMemoryVT(); 6802 if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) && 6803 !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT)) 6804 return false; 6805 Ptr = LD->getBasePtr(); 6806 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 6807 if (ST->isIndexed()) 6808 return false; 6809 VT = ST->getMemoryVT(); 6810 if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) && 6811 !TLI.isIndexedStoreLegal(ISD::POST_DEC, VT)) 6812 return false; 6813 Ptr = ST->getBasePtr(); 6814 isLoad = false; 6815 } else { 6816 return false; 6817 } 6818 6819 if (Ptr.getNode()->hasOneUse()) 6820 return false; 6821 6822 for (SDNode::use_iterator I = Ptr.getNode()->use_begin(), 6823 E = Ptr.getNode()->use_end(); I != E; ++I) { 6824 SDNode *Op = *I; 6825 if (Op == N || 6826 (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)) 6827 continue; 6828 6829 SDValue BasePtr; 6830 SDValue Offset; 6831 ISD::MemIndexedMode AM = ISD::UNINDEXED; 6832 if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) { 6833 // Don't create a indexed load / store with zero offset. 6834 if (isa<ConstantSDNode>(Offset) && 6835 cast<ConstantSDNode>(Offset)->isNullValue()) 6836 continue; 6837 6838 // Try turning it into a post-indexed load / store except when 6839 // 1) All uses are load / store ops that use it as base ptr (and 6840 // it may be folded as addressing mmode). 6841 // 2) Op must be independent of N, i.e. Op is neither a predecessor 6842 // nor a successor of N. Otherwise, if Op is folded that would 6843 // create a cycle. 6844 6845 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr)) 6846 continue; 6847 6848 // Check for #1. 6849 bool TryNext = false; 6850 for (SDNode::use_iterator II = BasePtr.getNode()->use_begin(), 6851 EE = BasePtr.getNode()->use_end(); II != EE; ++II) { 6852 SDNode *Use = *II; 6853 if (Use == Ptr.getNode()) 6854 continue; 6855 6856 // If all the uses are load / store addresses, then don't do the 6857 // transformation. 6858 if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){ 6859 bool RealUse = false; 6860 for (SDNode::use_iterator III = Use->use_begin(), 6861 EEE = Use->use_end(); III != EEE; ++III) { 6862 SDNode *UseUse = *III; 6863 if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI)) 6864 RealUse = true; 6865 } 6866 6867 if (!RealUse) { 6868 TryNext = true; 6869 break; 6870 } 6871 } 6872 } 6873 6874 if (TryNext) 6875 continue; 6876 6877 // Check for #2 6878 if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) { 6879 SDValue Result = isLoad 6880 ? DAG.getIndexedLoad(SDValue(N,0), N->getDebugLoc(), 6881 BasePtr, Offset, AM) 6882 : DAG.getIndexedStore(SDValue(N,0), N->getDebugLoc(), 6883 BasePtr, Offset, AM); 6884 ++PostIndexedNodes; 6885 ++NodesCombined; 6886 DEBUG(dbgs() << "\nReplacing.5 "; 6887 N->dump(&DAG); 6888 dbgs() << "\nWith: "; 6889 Result.getNode()->dump(&DAG); 6890 dbgs() << '\n'); 6891 WorkListRemover DeadNodes(*this); 6892 if (isLoad) { 6893 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0)); 6894 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2)); 6895 } else { 6896 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1)); 6897 } 6898 6899 // Finally, since the node is now dead, remove it from the graph. 6900 DAG.DeleteNode(N); 6901 6902 // Replace the uses of Use with uses of the updated base value. 6903 DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0), 6904 Result.getValue(isLoad ? 1 : 0)); 6905 removeFromWorkList(Op); 6906 DAG.DeleteNode(Op); 6907 return true; 6908 } 6909 } 6910 } 6911 6912 return false; 6913} 6914 6915SDValue DAGCombiner::visitLOAD(SDNode *N) { 6916 LoadSDNode *LD = cast<LoadSDNode>(N); 6917 SDValue Chain = LD->getChain(); 6918 SDValue Ptr = LD->getBasePtr(); 6919 6920 // If load is not volatile and there are no uses of the loaded value (and 6921 // the updated indexed value in case of indexed loads), change uses of the 6922 // chain value into uses of the chain input (i.e. delete the dead load). 6923 if (!LD->isVolatile()) { 6924 if (N->getValueType(1) == MVT::Other) { 6925 // Unindexed loads. 6926 if (!N->hasAnyUseOfValue(0)) { 6927 // It's not safe to use the two value CombineTo variant here. e.g. 6928 // v1, chain2 = load chain1, loc 6929 // v2, chain3 = load chain2, loc 6930 // v3 = add v2, c 6931 // Now we replace use of chain2 with chain1. This makes the second load 6932 // isomorphic to the one we are deleting, and thus makes this load live. 6933 DEBUG(dbgs() << "\nReplacing.6 "; 6934 N->dump(&DAG); 6935 dbgs() << "\nWith chain: "; 6936 Chain.getNode()->dump(&DAG); 6937 dbgs() << "\n"); 6938 WorkListRemover DeadNodes(*this); 6939 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain); 6940 6941 if (N->use_empty()) { 6942 removeFromWorkList(N); 6943 DAG.DeleteNode(N); 6944 } 6945 6946 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6947 } 6948 } else { 6949 // Indexed loads. 6950 assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?"); 6951 if (!N->hasAnyUseOfValue(0) && !N->hasAnyUseOfValue(1)) { 6952 SDValue Undef = DAG.getUNDEF(N->getValueType(0)); 6953 DEBUG(dbgs() << "\nReplacing.7 "; 6954 N->dump(&DAG); 6955 dbgs() << "\nWith: "; 6956 Undef.getNode()->dump(&DAG); 6957 dbgs() << " and 2 other values\n"); 6958 WorkListRemover DeadNodes(*this); 6959 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef); 6960 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), 6961 DAG.getUNDEF(N->getValueType(1))); 6962 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain); 6963 removeFromWorkList(N); 6964 DAG.DeleteNode(N); 6965 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6966 } 6967 } 6968 } 6969 6970 // If this load is directly stored, replace the load value with the stored 6971 // value. 6972 // TODO: Handle store large -> read small portion. 6973 // TODO: Handle TRUNCSTORE/LOADEXT 6974 if (ISD::isNormalLoad(N) && !LD->isVolatile()) { 6975 if (ISD::isNON_TRUNCStore(Chain.getNode())) { 6976 StoreSDNode *PrevST = cast<StoreSDNode>(Chain); 6977 if (PrevST->getBasePtr() == Ptr && 6978 PrevST->getValue().getValueType() == N->getValueType(0)) 6979 return CombineTo(N, Chain.getOperand(1), Chain); 6980 } 6981 } 6982 6983 // Try to infer better alignment information than the load already has. 6984 if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) { 6985 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) { 6986 if (Align > LD->getAlignment()) 6987 return DAG.getExtLoad(LD->getExtensionType(), N->getDebugLoc(), 6988 LD->getValueType(0), 6989 Chain, Ptr, LD->getPointerInfo(), 6990 LD->getMemoryVT(), 6991 LD->isVolatile(), LD->isNonTemporal(), Align); 6992 } 6993 } 6994 6995 if (CombinerAA) { 6996 // Walk up chain skipping non-aliasing memory nodes. 6997 SDValue BetterChain = FindBetterChain(N, Chain); 6998 6999 // If there is a better chain. 7000 if (Chain != BetterChain) { 7001 SDValue ReplLoad; 7002 7003 // Replace the chain to void dependency. 7004 if (LD->getExtensionType() == ISD::NON_EXTLOAD) { 7005 ReplLoad = DAG.getLoad(N->getValueType(0), LD->getDebugLoc(), 7006 BetterChain, Ptr, LD->getPointerInfo(), 7007 LD->isVolatile(), LD->isNonTemporal(), 7008 LD->isInvariant(), LD->getAlignment()); 7009 } else { 7010 ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getDebugLoc(), 7011 LD->getValueType(0), 7012 BetterChain, Ptr, LD->getPointerInfo(), 7013 LD->getMemoryVT(), 7014 LD->isVolatile(), 7015 LD->isNonTemporal(), 7016 LD->getAlignment()); 7017 } 7018 7019 // Create token factor to keep old chain connected. 7020 SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), 7021 MVT::Other, Chain, ReplLoad.getValue(1)); 7022 7023 // Make sure the new and old chains are cleaned up. 7024 AddToWorkList(Token.getNode()); 7025 7026 // Replace uses with load result and token factor. Don't add users 7027 // to work list. 7028 return CombineTo(N, ReplLoad.getValue(0), Token, false); 7029 } 7030 } 7031 7032 // Try transforming N to an indexed load. 7033 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) 7034 return SDValue(N, 0); 7035 7036 return SDValue(); 7037} 7038 7039/// CheckForMaskedLoad - Check to see if V is (and load (ptr), imm), where the 7040/// load is having specific bytes cleared out. If so, return the byte size 7041/// being masked out and the shift amount. 7042static std::pair<unsigned, unsigned> 7043CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) { 7044 std::pair<unsigned, unsigned> Result(0, 0); 7045 7046 // Check for the structure we're looking for. 7047 if (V->getOpcode() != ISD::AND || 7048 !isa<ConstantSDNode>(V->getOperand(1)) || 7049 !ISD::isNormalLoad(V->getOperand(0).getNode())) 7050 return Result; 7051 7052 // Check the chain and pointer. 7053 LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0)); 7054 if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer. 7055 7056 // The store should be chained directly to the load or be an operand of a 7057 // tokenfactor. 7058 if (LD == Chain.getNode()) 7059 ; // ok. 7060 else if (Chain->getOpcode() != ISD::TokenFactor) 7061 return Result; // Fail. 7062 else { 7063 bool isOk = false; 7064 for (unsigned i = 0, e = Chain->getNumOperands(); i != e; ++i) 7065 if (Chain->getOperand(i).getNode() == LD) { 7066 isOk = true; 7067 break; 7068 } 7069 if (!isOk) return Result; 7070 } 7071 7072 // This only handles simple types. 7073 if (V.getValueType() != MVT::i16 && 7074 V.getValueType() != MVT::i32 && 7075 V.getValueType() != MVT::i64) 7076 return Result; 7077 7078 // Check the constant mask. Invert it so that the bits being masked out are 7079 // 0 and the bits being kept are 1. Use getSExtValue so that leading bits 7080 // follow the sign bit for uniformity. 7081 uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue(); 7082 unsigned NotMaskLZ = CountLeadingZeros_64(NotMask); 7083 if (NotMaskLZ & 7) return Result; // Must be multiple of a byte. 7084 unsigned NotMaskTZ = CountTrailingZeros_64(NotMask); 7085 if (NotMaskTZ & 7) return Result; // Must be multiple of a byte. 7086 if (NotMaskLZ == 64) return Result; // All zero mask. 7087 7088 // See if we have a continuous run of bits. If so, we have 0*1+0* 7089 if (CountTrailingOnes_64(NotMask >> NotMaskTZ)+NotMaskTZ+NotMaskLZ != 64) 7090 return Result; 7091 7092 // Adjust NotMaskLZ down to be from the actual size of the int instead of i64. 7093 if (V.getValueType() != MVT::i64 && NotMaskLZ) 7094 NotMaskLZ -= 64-V.getValueSizeInBits(); 7095 7096 unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8; 7097 switch (MaskedBytes) { 7098 case 1: 7099 case 2: 7100 case 4: break; 7101 default: return Result; // All one mask, or 5-byte mask. 7102 } 7103 7104 // Verify that the first bit starts at a multiple of mask so that the access 7105 // is aligned the same as the access width. 7106 if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result; 7107 7108 Result.first = MaskedBytes; 7109 Result.second = NotMaskTZ/8; 7110 return Result; 7111} 7112 7113 7114/// ShrinkLoadReplaceStoreWithStore - Check to see if IVal is something that 7115/// provides a value as specified by MaskInfo. If so, replace the specified 7116/// store with a narrower store of truncated IVal. 7117static SDNode * 7118ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo, 7119 SDValue IVal, StoreSDNode *St, 7120 DAGCombiner *DC) { 7121 unsigned NumBytes = MaskInfo.first; 7122 unsigned ByteShift = MaskInfo.second; 7123 SelectionDAG &DAG = DC->getDAG(); 7124 7125 // Check to see if IVal is all zeros in the part being masked in by the 'or' 7126 // that uses this. If not, this is not a replacement. 7127 APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(), 7128 ByteShift*8, (ByteShift+NumBytes)*8); 7129 if (!DAG.MaskedValueIsZero(IVal, Mask)) return 0; 7130 7131 // Check that it is legal on the target to do this. It is legal if the new 7132 // VT we're shrinking to (i8/i16/i32) is legal or we're still before type 7133 // legalization. 7134 MVT VT = MVT::getIntegerVT(NumBytes*8); 7135 if (!DC->isTypeLegal(VT)) 7136 return 0; 7137 7138 // Okay, we can do this! Replace the 'St' store with a store of IVal that is 7139 // shifted by ByteShift and truncated down to NumBytes. 7140 if (ByteShift) 7141 IVal = DAG.getNode(ISD::SRL, IVal->getDebugLoc(), IVal.getValueType(), IVal, 7142 DAG.getConstant(ByteShift*8, 7143 DC->getShiftAmountTy(IVal.getValueType()))); 7144 7145 // Figure out the offset for the store and the alignment of the access. 7146 unsigned StOffset; 7147 unsigned NewAlign = St->getAlignment(); 7148 7149 if (DAG.getTargetLoweringInfo().isLittleEndian()) 7150 StOffset = ByteShift; 7151 else 7152 StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes; 7153 7154 SDValue Ptr = St->getBasePtr(); 7155 if (StOffset) { 7156 Ptr = DAG.getNode(ISD::ADD, IVal->getDebugLoc(), Ptr.getValueType(), 7157 Ptr, DAG.getConstant(StOffset, Ptr.getValueType())); 7158 NewAlign = MinAlign(NewAlign, StOffset); 7159 } 7160 7161 // Truncate down to the new size. 7162 IVal = DAG.getNode(ISD::TRUNCATE, IVal->getDebugLoc(), VT, IVal); 7163 7164 ++OpsNarrowed; 7165 return DAG.getStore(St->getChain(), St->getDebugLoc(), IVal, Ptr, 7166 St->getPointerInfo().getWithOffset(StOffset), 7167 false, false, NewAlign).getNode(); 7168} 7169 7170 7171/// ReduceLoadOpStoreWidth - Look for sequence of load / op / store where op is 7172/// one of 'or', 'xor', and 'and' of immediates. If 'op' is only touching some 7173/// of the loaded bits, try narrowing the load and store if it would end up 7174/// being a win for performance or code size. 7175SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) { 7176 StoreSDNode *ST = cast<StoreSDNode>(N); 7177 if (ST->isVolatile()) 7178 return SDValue(); 7179 7180 SDValue Chain = ST->getChain(); 7181 SDValue Value = ST->getValue(); 7182 SDValue Ptr = ST->getBasePtr(); 7183 EVT VT = Value.getValueType(); 7184 7185 if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse()) 7186 return SDValue(); 7187 7188 unsigned Opc = Value.getOpcode(); 7189 7190 // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst 7191 // is a byte mask indicating a consecutive number of bytes, check to see if 7192 // Y is known to provide just those bytes. If so, we try to replace the 7193 // load + replace + store sequence with a single (narrower) store, which makes 7194 // the load dead. 7195 if (Opc == ISD::OR) { 7196 std::pair<unsigned, unsigned> MaskedLoad; 7197 MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain); 7198 if (MaskedLoad.first) 7199 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, 7200 Value.getOperand(1), ST,this)) 7201 return SDValue(NewST, 0); 7202 7203 // Or is commutative, so try swapping X and Y. 7204 MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain); 7205 if (MaskedLoad.first) 7206 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, 7207 Value.getOperand(0), ST,this)) 7208 return SDValue(NewST, 0); 7209 } 7210 7211 if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) || 7212 Value.getOperand(1).getOpcode() != ISD::Constant) 7213 return SDValue(); 7214 7215 SDValue N0 = Value.getOperand(0); 7216 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 7217 Chain == SDValue(N0.getNode(), 1)) { 7218 LoadSDNode *LD = cast<LoadSDNode>(N0); 7219 if (LD->getBasePtr() != Ptr || 7220 LD->getPointerInfo().getAddrSpace() != 7221 ST->getPointerInfo().getAddrSpace()) 7222 return SDValue(); 7223 7224 // Find the type to narrow it the load / op / store to. 7225 SDValue N1 = Value.getOperand(1); 7226 unsigned BitWidth = N1.getValueSizeInBits(); 7227 APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue(); 7228 if (Opc == ISD::AND) 7229 Imm ^= APInt::getAllOnesValue(BitWidth); 7230 if (Imm == 0 || Imm.isAllOnesValue()) 7231 return SDValue(); 7232 unsigned ShAmt = Imm.countTrailingZeros(); 7233 unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1; 7234 unsigned NewBW = NextPowerOf2(MSB - ShAmt); 7235 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW); 7236 while (NewBW < BitWidth && 7237 !(TLI.isOperationLegalOrCustom(Opc, NewVT) && 7238 TLI.isNarrowingProfitable(VT, NewVT))) { 7239 NewBW = NextPowerOf2(NewBW); 7240 NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW); 7241 } 7242 if (NewBW >= BitWidth) 7243 return SDValue(); 7244 7245 // If the lsb changed does not start at the type bitwidth boundary, 7246 // start at the previous one. 7247 if (ShAmt % NewBW) 7248 ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW; 7249 APInt Mask = APInt::getBitsSet(BitWidth, ShAmt, ShAmt + NewBW); 7250 if ((Imm & Mask) == Imm) { 7251 APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW); 7252 if (Opc == ISD::AND) 7253 NewImm ^= APInt::getAllOnesValue(NewBW); 7254 uint64_t PtrOff = ShAmt / 8; 7255 // For big endian targets, we need to adjust the offset to the pointer to 7256 // load the correct bytes. 7257 if (TLI.isBigEndian()) 7258 PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff; 7259 7260 unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff); 7261 Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext()); 7262 if (NewAlign < TLI.getTargetData()->getABITypeAlignment(NewVTTy)) 7263 return SDValue(); 7264 7265 SDValue NewPtr = DAG.getNode(ISD::ADD, LD->getDebugLoc(), 7266 Ptr.getValueType(), Ptr, 7267 DAG.getConstant(PtrOff, Ptr.getValueType())); 7268 SDValue NewLD = DAG.getLoad(NewVT, N0.getDebugLoc(), 7269 LD->getChain(), NewPtr, 7270 LD->getPointerInfo().getWithOffset(PtrOff), 7271 LD->isVolatile(), LD->isNonTemporal(), 7272 LD->isInvariant(), NewAlign); 7273 SDValue NewVal = DAG.getNode(Opc, Value.getDebugLoc(), NewVT, NewLD, 7274 DAG.getConstant(NewImm, NewVT)); 7275 SDValue NewST = DAG.getStore(Chain, N->getDebugLoc(), 7276 NewVal, NewPtr, 7277 ST->getPointerInfo().getWithOffset(PtrOff), 7278 false, false, NewAlign); 7279 7280 AddToWorkList(NewPtr.getNode()); 7281 AddToWorkList(NewLD.getNode()); 7282 AddToWorkList(NewVal.getNode()); 7283 WorkListRemover DeadNodes(*this); 7284 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1)); 7285 ++OpsNarrowed; 7286 return NewST; 7287 } 7288 } 7289 7290 return SDValue(); 7291} 7292 7293/// TransformFPLoadStorePair - For a given floating point load / store pair, 7294/// if the load value isn't used by any other operations, then consider 7295/// transforming the pair to integer load / store operations if the target 7296/// deems the transformation profitable. 7297SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) { 7298 StoreSDNode *ST = cast<StoreSDNode>(N); 7299 SDValue Chain = ST->getChain(); 7300 SDValue Value = ST->getValue(); 7301 if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) && 7302 Value.hasOneUse() && 7303 Chain == SDValue(Value.getNode(), 1)) { 7304 LoadSDNode *LD = cast<LoadSDNode>(Value); 7305 EVT VT = LD->getMemoryVT(); 7306 if (!VT.isFloatingPoint() || 7307 VT != ST->getMemoryVT() || 7308 LD->isNonTemporal() || 7309 ST->isNonTemporal() || 7310 LD->getPointerInfo().getAddrSpace() != 0 || 7311 ST->getPointerInfo().getAddrSpace() != 0) 7312 return SDValue(); 7313 7314 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 7315 if (!TLI.isOperationLegal(ISD::LOAD, IntVT) || 7316 !TLI.isOperationLegal(ISD::STORE, IntVT) || 7317 !TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) || 7318 !TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT)) 7319 return SDValue(); 7320 7321 unsigned LDAlign = LD->getAlignment(); 7322 unsigned STAlign = ST->getAlignment(); 7323 Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext()); 7324 unsigned ABIAlign = TLI.getTargetData()->getABITypeAlignment(IntVTTy); 7325 if (LDAlign < ABIAlign || STAlign < ABIAlign) 7326 return SDValue(); 7327 7328 SDValue NewLD = DAG.getLoad(IntVT, Value.getDebugLoc(), 7329 LD->getChain(), LD->getBasePtr(), 7330 LD->getPointerInfo(), 7331 false, false, false, LDAlign); 7332 7333 SDValue NewST = DAG.getStore(NewLD.getValue(1), N->getDebugLoc(), 7334 NewLD, ST->getBasePtr(), 7335 ST->getPointerInfo(), 7336 false, false, STAlign); 7337 7338 AddToWorkList(NewLD.getNode()); 7339 AddToWorkList(NewST.getNode()); 7340 WorkListRemover DeadNodes(*this); 7341 DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1)); 7342 ++LdStFP2Int; 7343 return NewST; 7344 } 7345 7346 return SDValue(); 7347} 7348 7349SDValue DAGCombiner::visitSTORE(SDNode *N) { 7350 StoreSDNode *ST = cast<StoreSDNode>(N); 7351 SDValue Chain = ST->getChain(); 7352 SDValue Value = ST->getValue(); 7353 SDValue Ptr = ST->getBasePtr(); 7354 7355 // If this is a store of a bit convert, store the input value if the 7356 // resultant store does not need a higher alignment than the original. 7357 if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() && 7358 ST->isUnindexed()) { 7359 unsigned OrigAlign = ST->getAlignment(); 7360 EVT SVT = Value.getOperand(0).getValueType(); 7361 unsigned Align = TLI.getTargetData()-> 7362 getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext())); 7363 if (Align <= OrigAlign && 7364 ((!LegalOperations && !ST->isVolatile()) || 7365 TLI.isOperationLegalOrCustom(ISD::STORE, SVT))) 7366 return DAG.getStore(Chain, N->getDebugLoc(), Value.getOperand(0), 7367 Ptr, ST->getPointerInfo(), ST->isVolatile(), 7368 ST->isNonTemporal(), OrigAlign); 7369 } 7370 7371 // Turn 'store undef, Ptr' -> nothing. 7372 if (Value.getOpcode() == ISD::UNDEF && ST->isUnindexed()) 7373 return Chain; 7374 7375 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 7376 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Value)) { 7377 // NOTE: If the original store is volatile, this transform must not increase 7378 // the number of stores. For example, on x86-32 an f64 can be stored in one 7379 // processor operation but an i64 (which is not legal) requires two. So the 7380 // transform should not be done in this case. 7381 if (Value.getOpcode() != ISD::TargetConstantFP) { 7382 SDValue Tmp; 7383 switch (CFP->getValueType(0).getSimpleVT().SimpleTy) { 7384 default: llvm_unreachable("Unknown FP type"); 7385 case MVT::f16: // We don't do this for these yet. 7386 case MVT::f80: 7387 case MVT::f128: 7388 case MVT::ppcf128: 7389 break; 7390 case MVT::f32: 7391 if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) || 7392 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) { 7393 Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF(). 7394 bitcastToAPInt().getZExtValue(), MVT::i32); 7395 return DAG.getStore(Chain, N->getDebugLoc(), Tmp, 7396 Ptr, ST->getPointerInfo(), ST->isVolatile(), 7397 ST->isNonTemporal(), ST->getAlignment()); 7398 } 7399 break; 7400 case MVT::f64: 7401 if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations && 7402 !ST->isVolatile()) || 7403 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) { 7404 Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 7405 getZExtValue(), MVT::i64); 7406 return DAG.getStore(Chain, N->getDebugLoc(), Tmp, 7407 Ptr, ST->getPointerInfo(), ST->isVolatile(), 7408 ST->isNonTemporal(), ST->getAlignment()); 7409 } 7410 7411 if (!ST->isVolatile() && 7412 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) { 7413 // Many FP stores are not made apparent until after legalize, e.g. for 7414 // argument passing. Since this is so common, custom legalize the 7415 // 64-bit integer store into two 32-bit stores. 7416 uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue(); 7417 SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, MVT::i32); 7418 SDValue Hi = DAG.getConstant(Val >> 32, MVT::i32); 7419 if (TLI.isBigEndian()) std::swap(Lo, Hi); 7420 7421 unsigned Alignment = ST->getAlignment(); 7422 bool isVolatile = ST->isVolatile(); 7423 bool isNonTemporal = ST->isNonTemporal(); 7424 7425 SDValue St0 = DAG.getStore(Chain, ST->getDebugLoc(), Lo, 7426 Ptr, ST->getPointerInfo(), 7427 isVolatile, isNonTemporal, 7428 ST->getAlignment()); 7429 Ptr = DAG.getNode(ISD::ADD, N->getDebugLoc(), Ptr.getValueType(), Ptr, 7430 DAG.getConstant(4, Ptr.getValueType())); 7431 Alignment = MinAlign(Alignment, 4U); 7432 SDValue St1 = DAG.getStore(Chain, ST->getDebugLoc(), Hi, 7433 Ptr, ST->getPointerInfo().getWithOffset(4), 7434 isVolatile, isNonTemporal, 7435 Alignment); 7436 return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other, 7437 St0, St1); 7438 } 7439 7440 break; 7441 } 7442 } 7443 } 7444 7445 // Try to infer better alignment information than the store already has. 7446 if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) { 7447 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) { 7448 if (Align > ST->getAlignment()) 7449 return DAG.getTruncStore(Chain, N->getDebugLoc(), Value, 7450 Ptr, ST->getPointerInfo(), ST->getMemoryVT(), 7451 ST->isVolatile(), ST->isNonTemporal(), Align); 7452 } 7453 } 7454 7455 // Try transforming a pair floating point load / store ops to integer 7456 // load / store ops. 7457 SDValue NewST = TransformFPLoadStorePair(N); 7458 if (NewST.getNode()) 7459 return NewST; 7460 7461 if (CombinerAA) { 7462 // Walk up chain skipping non-aliasing memory nodes. 7463 SDValue BetterChain = FindBetterChain(N, Chain); 7464 7465 // If there is a better chain. 7466 if (Chain != BetterChain) { 7467 SDValue ReplStore; 7468 7469 // Replace the chain to avoid dependency. 7470 if (ST->isTruncatingStore()) { 7471 ReplStore = DAG.getTruncStore(BetterChain, N->getDebugLoc(), Value, Ptr, 7472 ST->getPointerInfo(), 7473 ST->getMemoryVT(), ST->isVolatile(), 7474 ST->isNonTemporal(), ST->getAlignment()); 7475 } else { 7476 ReplStore = DAG.getStore(BetterChain, N->getDebugLoc(), Value, Ptr, 7477 ST->getPointerInfo(), 7478 ST->isVolatile(), ST->isNonTemporal(), 7479 ST->getAlignment()); 7480 } 7481 7482 // Create token to keep both nodes around. 7483 SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), 7484 MVT::Other, Chain, ReplStore); 7485 7486 // Make sure the new and old chains are cleaned up. 7487 AddToWorkList(Token.getNode()); 7488 7489 // Don't add users to work list. 7490 return CombineTo(N, Token, false); 7491 } 7492 } 7493 7494 // Try transforming N to an indexed store. 7495 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) 7496 return SDValue(N, 0); 7497 7498 // FIXME: is there such a thing as a truncating indexed store? 7499 if (ST->isTruncatingStore() && ST->isUnindexed() && 7500 Value.getValueType().isInteger()) { 7501 // See if we can simplify the input to this truncstore with knowledge that 7502 // only the low bits are being used. For example: 7503 // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8" 7504 SDValue Shorter = 7505 GetDemandedBits(Value, 7506 APInt::getLowBitsSet( 7507 Value.getValueType().getScalarType().getSizeInBits(), 7508 ST->getMemoryVT().getScalarType().getSizeInBits())); 7509 AddToWorkList(Value.getNode()); 7510 if (Shorter.getNode()) 7511 return DAG.getTruncStore(Chain, N->getDebugLoc(), Shorter, 7512 Ptr, ST->getPointerInfo(), ST->getMemoryVT(), 7513 ST->isVolatile(), ST->isNonTemporal(), 7514 ST->getAlignment()); 7515 7516 // Otherwise, see if we can simplify the operation with 7517 // SimplifyDemandedBits, which only works if the value has a single use. 7518 if (SimplifyDemandedBits(Value, 7519 APInt::getLowBitsSet( 7520 Value.getValueType().getScalarType().getSizeInBits(), 7521 ST->getMemoryVT().getScalarType().getSizeInBits()))) 7522 return SDValue(N, 0); 7523 } 7524 7525 // If this is a load followed by a store to the same location, then the store 7526 // is dead/noop. 7527 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) { 7528 if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() && 7529 ST->isUnindexed() && !ST->isVolatile() && 7530 // There can't be any side effects between the load and store, such as 7531 // a call or store. 7532 Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) { 7533 // The store is dead, remove it. 7534 return Chain; 7535 } 7536 } 7537 7538 // If this is an FP_ROUND or TRUNC followed by a store, fold this into a 7539 // truncating store. We can do this even if this is already a truncstore. 7540 if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE) 7541 && Value.getNode()->hasOneUse() && ST->isUnindexed() && 7542 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(), 7543 ST->getMemoryVT())) { 7544 return DAG.getTruncStore(Chain, N->getDebugLoc(), Value.getOperand(0), 7545 Ptr, ST->getPointerInfo(), ST->getMemoryVT(), 7546 ST->isVolatile(), ST->isNonTemporal(), 7547 ST->getAlignment()); 7548 } 7549 7550 return ReduceLoadOpStoreWidth(N); 7551} 7552 7553SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) { 7554 SDValue InVec = N->getOperand(0); 7555 SDValue InVal = N->getOperand(1); 7556 SDValue EltNo = N->getOperand(2); 7557 DebugLoc dl = N->getDebugLoc(); 7558 7559 // If the inserted element is an UNDEF, just use the input vector. 7560 if (InVal.getOpcode() == ISD::UNDEF) 7561 return InVec; 7562 7563 EVT VT = InVec.getValueType(); 7564 7565 // If we can't generate a legal BUILD_VECTOR, exit 7566 if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) 7567 return SDValue(); 7568 7569 // Check that we know which element is being inserted 7570 if (!isa<ConstantSDNode>(EltNo)) 7571 return SDValue(); 7572 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 7573 7574 // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially 7575 // be converted to a BUILD_VECTOR). Fill in the Ops vector with the 7576 // vector elements. 7577 SmallVector<SDValue, 8> Ops; 7578 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 7579 Ops.append(InVec.getNode()->op_begin(), 7580 InVec.getNode()->op_end()); 7581 } else if (InVec.getOpcode() == ISD::UNDEF) { 7582 unsigned NElts = VT.getVectorNumElements(); 7583 Ops.append(NElts, DAG.getUNDEF(InVal.getValueType())); 7584 } else { 7585 return SDValue(); 7586 } 7587 7588 // Insert the element 7589 if (Elt < Ops.size()) { 7590 // All the operands of BUILD_VECTOR must have the same type; 7591 // we enforce that here. 7592 EVT OpVT = Ops[0].getValueType(); 7593 if (InVal.getValueType() != OpVT) 7594 InVal = OpVT.bitsGT(InVal.getValueType()) ? 7595 DAG.getNode(ISD::ANY_EXTEND, dl, OpVT, InVal) : 7596 DAG.getNode(ISD::TRUNCATE, dl, OpVT, InVal); 7597 Ops[Elt] = InVal; 7598 } 7599 7600 // Return the new vector 7601 return DAG.getNode(ISD::BUILD_VECTOR, dl, 7602 VT, &Ops[0], Ops.size()); 7603} 7604 7605SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { 7606 // (vextract (scalar_to_vector val, 0) -> val 7607 SDValue InVec = N->getOperand(0); 7608 EVT VT = InVec.getValueType(); 7609 EVT NVT = N->getValueType(0); 7610 7611 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 7612 // Check if the result type doesn't match the inserted element type. A 7613 // SCALAR_TO_VECTOR may truncate the inserted element and the 7614 // EXTRACT_VECTOR_ELT may widen the extracted vector. 7615 SDValue InOp = InVec.getOperand(0); 7616 if (InOp.getValueType() != NVT) { 7617 assert(InOp.getValueType().isInteger() && NVT.isInteger()); 7618 return DAG.getSExtOrTrunc(InOp, InVec.getDebugLoc(), NVT); 7619 } 7620 return InOp; 7621 } 7622 7623 SDValue EltNo = N->getOperand(1); 7624 bool ConstEltNo = isa<ConstantSDNode>(EltNo); 7625 7626 // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT. 7627 // We only perform this optimization before the op legalization phase because 7628 // we may introduce new vector instructions which are not backed by TD patterns. 7629 // For example on AVX, extracting elements from a wide vector without using 7630 // extract_subvector. 7631 if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE 7632 && ConstEltNo && !LegalOperations) { 7633 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 7634 int NumElem = VT.getVectorNumElements(); 7635 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(InVec); 7636 // Find the new index to extract from. 7637 int OrigElt = SVOp->getMaskElt(Elt); 7638 7639 // Extracting an undef index is undef. 7640 if (OrigElt == -1) 7641 return DAG.getUNDEF(NVT); 7642 7643 // Select the right vector half to extract from. 7644 if (OrigElt < NumElem) { 7645 InVec = InVec->getOperand(0); 7646 } else { 7647 InVec = InVec->getOperand(1); 7648 OrigElt -= NumElem; 7649 } 7650 7651 EVT IndexTy = N->getOperand(1).getValueType(); 7652 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, N->getDebugLoc(), NVT, 7653 InVec, DAG.getConstant(OrigElt, IndexTy)); 7654 } 7655 7656 // Perform only after legalization to ensure build_vector / vector_shuffle 7657 // optimizations have already been done. 7658 if (!LegalOperations) return SDValue(); 7659 7660 // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size) 7661 // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size) 7662 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr) 7663 7664 if (ConstEltNo) { 7665 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 7666 bool NewLoad = false; 7667 bool BCNumEltsChanged = false; 7668 EVT ExtVT = VT.getVectorElementType(); 7669 EVT LVT = ExtVT; 7670 7671 // If the result of load has to be truncated, then it's not necessarily 7672 // profitable. 7673 if (NVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, NVT)) 7674 return SDValue(); 7675 7676 if (InVec.getOpcode() == ISD::BITCAST) { 7677 // Don't duplicate a load with other uses. 7678 if (!InVec.hasOneUse()) 7679 return SDValue(); 7680 7681 EVT BCVT = InVec.getOperand(0).getValueType(); 7682 if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType())) 7683 return SDValue(); 7684 if (VT.getVectorNumElements() != BCVT.getVectorNumElements()) 7685 BCNumEltsChanged = true; 7686 InVec = InVec.getOperand(0); 7687 ExtVT = BCVT.getVectorElementType(); 7688 NewLoad = true; 7689 } 7690 7691 LoadSDNode *LN0 = NULL; 7692 const ShuffleVectorSDNode *SVN = NULL; 7693 if (ISD::isNormalLoad(InVec.getNode())) { 7694 LN0 = cast<LoadSDNode>(InVec); 7695 } else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR && 7696 InVec.getOperand(0).getValueType() == ExtVT && 7697 ISD::isNormalLoad(InVec.getOperand(0).getNode())) { 7698 // Don't duplicate a load with other uses. 7699 if (!InVec.hasOneUse()) 7700 return SDValue(); 7701 7702 LN0 = cast<LoadSDNode>(InVec.getOperand(0)); 7703 } else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) { 7704 // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1) 7705 // => 7706 // (load $addr+1*size) 7707 7708 // Don't duplicate a load with other uses. 7709 if (!InVec.hasOneUse()) 7710 return SDValue(); 7711 7712 // If the bit convert changed the number of elements, it is unsafe 7713 // to examine the mask. 7714 if (BCNumEltsChanged) 7715 return SDValue(); 7716 7717 // Select the input vector, guarding against out of range extract vector. 7718 unsigned NumElems = VT.getVectorNumElements(); 7719 int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt); 7720 InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1); 7721 7722 if (InVec.getOpcode() == ISD::BITCAST) { 7723 // Don't duplicate a load with other uses. 7724 if (!InVec.hasOneUse()) 7725 return SDValue(); 7726 7727 InVec = InVec.getOperand(0); 7728 } 7729 if (ISD::isNormalLoad(InVec.getNode())) { 7730 LN0 = cast<LoadSDNode>(InVec); 7731 Elt = (Idx < (int)NumElems) ? Idx : Idx - (int)NumElems; 7732 } 7733 } 7734 7735 // Make sure we found a non-volatile load and the extractelement is 7736 // the only use. 7737 if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile()) 7738 return SDValue(); 7739 7740 // If Idx was -1 above, Elt is going to be -1, so just return undef. 7741 if (Elt == -1) 7742 return DAG.getUNDEF(LVT); 7743 7744 unsigned Align = LN0->getAlignment(); 7745 if (NewLoad) { 7746 // Check the resultant load doesn't need a higher alignment than the 7747 // original load. 7748 unsigned NewAlign = 7749 TLI.getTargetData() 7750 ->getABITypeAlignment(LVT.getTypeForEVT(*DAG.getContext())); 7751 7752 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, LVT)) 7753 return SDValue(); 7754 7755 Align = NewAlign; 7756 } 7757 7758 SDValue NewPtr = LN0->getBasePtr(); 7759 unsigned PtrOff = 0; 7760 7761 if (Elt) { 7762 PtrOff = LVT.getSizeInBits() * Elt / 8; 7763 EVT PtrType = NewPtr.getValueType(); 7764 if (TLI.isBigEndian()) 7765 PtrOff = VT.getSizeInBits() / 8 - PtrOff; 7766 NewPtr = DAG.getNode(ISD::ADD, N->getDebugLoc(), PtrType, NewPtr, 7767 DAG.getConstant(PtrOff, PtrType)); 7768 } 7769 7770 // The replacement we need to do here is a little tricky: we need to 7771 // replace an extractelement of a load with a load. 7772 // Use ReplaceAllUsesOfValuesWith to do the replacement. 7773 // Note that this replacement assumes that the extractvalue is the only 7774 // use of the load; that's okay because we don't want to perform this 7775 // transformation in other cases anyway. 7776 SDValue Load; 7777 SDValue Chain; 7778 if (NVT.bitsGT(LVT)) { 7779 // If the result type of vextract is wider than the load, then issue an 7780 // extending load instead. 7781 ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, LVT) 7782 ? ISD::ZEXTLOAD : ISD::EXTLOAD; 7783 Load = DAG.getExtLoad(ExtType, N->getDebugLoc(), NVT, LN0->getChain(), 7784 NewPtr, LN0->getPointerInfo().getWithOffset(PtrOff), 7785 LVT, LN0->isVolatile(), LN0->isNonTemporal(),Align); 7786 Chain = Load.getValue(1); 7787 } else { 7788 Load = DAG.getLoad(LVT, N->getDebugLoc(), LN0->getChain(), NewPtr, 7789 LN0->getPointerInfo().getWithOffset(PtrOff), 7790 LN0->isVolatile(), LN0->isNonTemporal(), 7791 LN0->isInvariant(), Align); 7792 Chain = Load.getValue(1); 7793 if (NVT.bitsLT(LVT)) 7794 Load = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), NVT, Load); 7795 else 7796 Load = DAG.getNode(ISD::BITCAST, N->getDebugLoc(), NVT, Load); 7797 } 7798 WorkListRemover DeadNodes(*this); 7799 SDValue From[] = { SDValue(N, 0), SDValue(LN0,1) }; 7800 SDValue To[] = { Load, Chain }; 7801 DAG.ReplaceAllUsesOfValuesWith(From, To, 2); 7802 // Since we're explcitly calling ReplaceAllUses, add the new node to the 7803 // worklist explicitly as well. 7804 AddToWorkList(Load.getNode()); 7805 AddUsersToWorkList(Load.getNode()); // Add users too 7806 // Make sure to revisit this node to clean it up; it will usually be dead. 7807 AddToWorkList(N); 7808 return SDValue(N, 0); 7809 } 7810 7811 return SDValue(); 7812} 7813 7814SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { 7815 unsigned NumInScalars = N->getNumOperands(); 7816 DebugLoc dl = N->getDebugLoc(); 7817 EVT VT = N->getValueType(0); 7818 7819 // A vector built entirely of undefs is undef. 7820 if (ISD::allOperandsUndef(N)) 7821 return DAG.getUNDEF(VT); 7822 7823 // Check to see if this is a BUILD_VECTOR of a bunch of values 7824 // which come from any_extend or zero_extend nodes. If so, we can create 7825 // a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR 7826 // optimizations. We do not handle sign-extend because we can't fill the sign 7827 // using shuffles. 7828 EVT SourceType = MVT::Other; 7829 bool AllAnyExt = true; 7830 7831 for (unsigned i = 0; i != NumInScalars; ++i) { 7832 SDValue In = N->getOperand(i); 7833 // Ignore undef inputs. 7834 if (In.getOpcode() == ISD::UNDEF) continue; 7835 7836 bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND; 7837 bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND; 7838 7839 // Abort if the element is not an extension. 7840 if (!ZeroExt && !AnyExt) { 7841 SourceType = MVT::Other; 7842 break; 7843 } 7844 7845 // The input is a ZeroExt or AnyExt. Check the original type. 7846 EVT InTy = In.getOperand(0).getValueType(); 7847 7848 // Check that all of the widened source types are the same. 7849 if (SourceType == MVT::Other) 7850 // First time. 7851 SourceType = InTy; 7852 else if (InTy != SourceType) { 7853 // Multiple income types. Abort. 7854 SourceType = MVT::Other; 7855 break; 7856 } 7857 7858 // Check if all of the extends are ANY_EXTENDs. 7859 AllAnyExt &= AnyExt; 7860 } 7861 7862 // In order to have valid types, all of the inputs must be extended from the 7863 // same source type and all of the inputs must be any or zero extend. 7864 // Scalar sizes must be a power of two. 7865 EVT OutScalarTy = N->getValueType(0).getScalarType(); 7866 bool ValidTypes = SourceType != MVT::Other && 7867 isPowerOf2_32(OutScalarTy.getSizeInBits()) && 7868 isPowerOf2_32(SourceType.getSizeInBits()); 7869 7870 // We perform this optimization post type-legalization because 7871 // the type-legalizer often scalarizes integer-promoted vectors. 7872 // Performing this optimization before may create bit-casts which 7873 // will be type-legalized to complex code sequences. 7874 // We perform this optimization only before the operation legalizer because we 7875 // may introduce illegal operations. 7876 // Create a new simpler BUILD_VECTOR sequence which other optimizations can 7877 // turn into a single shuffle instruction. 7878 if ((Level == AfterLegalizeVectorOps || Level == AfterLegalizeTypes) && 7879 ValidTypes) { 7880 bool isLE = TLI.isLittleEndian(); 7881 unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits(); 7882 assert(ElemRatio > 1 && "Invalid element size ratio"); 7883 SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType): 7884 DAG.getConstant(0, SourceType); 7885 7886 unsigned NewBVElems = ElemRatio * N->getValueType(0).getVectorNumElements(); 7887 SmallVector<SDValue, 8> Ops(NewBVElems, Filler); 7888 7889 // Populate the new build_vector 7890 for (unsigned i=0; i < N->getNumOperands(); ++i) { 7891 SDValue Cast = N->getOperand(i); 7892 assert((Cast.getOpcode() == ISD::ANY_EXTEND || 7893 Cast.getOpcode() == ISD::ZERO_EXTEND || 7894 Cast.getOpcode() == ISD::UNDEF) && "Invalid cast opcode"); 7895 SDValue In; 7896 if (Cast.getOpcode() == ISD::UNDEF) 7897 In = DAG.getUNDEF(SourceType); 7898 else 7899 In = Cast->getOperand(0); 7900 unsigned Index = isLE ? (i * ElemRatio) : 7901 (i * ElemRatio + (ElemRatio - 1)); 7902 7903 assert(Index < Ops.size() && "Invalid index"); 7904 Ops[Index] = In; 7905 } 7906 7907 // The type of the new BUILD_VECTOR node. 7908 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems); 7909 assert(VecVT.getSizeInBits() == N->getValueType(0).getSizeInBits() && 7910 "Invalid vector size"); 7911 // Check if the new vector type is legal. 7912 if (!isTypeLegal(VecVT)) return SDValue(); 7913 7914 // Make the new BUILD_VECTOR. 7915 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 7916 VecVT, &Ops[0], Ops.size()); 7917 7918 // The new BUILD_VECTOR node has the potential to be further optimized. 7919 AddToWorkList(BV.getNode()); 7920 // Bitcast to the desired type. 7921 return DAG.getNode(ISD::BITCAST, dl, N->getValueType(0), BV); 7922 } 7923 7924 // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT 7925 // operations. If so, and if the EXTRACT_VECTOR_ELT vector inputs come from 7926 // at most two distinct vectors, turn this into a shuffle node. 7927 7928 // May only combine to shuffle after legalize if shuffle is legal. 7929 if (LegalOperations && 7930 !TLI.isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT)) 7931 return SDValue(); 7932 7933 SDValue VecIn1, VecIn2; 7934 for (unsigned i = 0; i != NumInScalars; ++i) { 7935 // Ignore undef inputs. 7936 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 7937 7938 // If this input is something other than a EXTRACT_VECTOR_ELT with a 7939 // constant index, bail out. 7940 if (N->getOperand(i).getOpcode() != ISD::EXTRACT_VECTOR_ELT || 7941 !isa<ConstantSDNode>(N->getOperand(i).getOperand(1))) { 7942 VecIn1 = VecIn2 = SDValue(0, 0); 7943 break; 7944 } 7945 7946 // We allow up to two distinct input vectors. 7947 SDValue ExtractedFromVec = N->getOperand(i).getOperand(0); 7948 if (ExtractedFromVec == VecIn1 || ExtractedFromVec == VecIn2) 7949 continue; 7950 7951 if (VecIn1.getNode() == 0) { 7952 VecIn1 = ExtractedFromVec; 7953 } else if (VecIn2.getNode() == 0) { 7954 VecIn2 = ExtractedFromVec; 7955 } else { 7956 // Too many inputs. 7957 VecIn1 = VecIn2 = SDValue(0, 0); 7958 break; 7959 } 7960 } 7961 7962 // If everything is good, we can make a shuffle operation. 7963 if (VecIn1.getNode()) { 7964 SmallVector<int, 8> Mask; 7965 for (unsigned i = 0; i != NumInScalars; ++i) { 7966 if (N->getOperand(i).getOpcode() == ISD::UNDEF) { 7967 Mask.push_back(-1); 7968 continue; 7969 } 7970 7971 // If extracting from the first vector, just use the index directly. 7972 SDValue Extract = N->getOperand(i); 7973 SDValue ExtVal = Extract.getOperand(1); 7974 if (Extract.getOperand(0) == VecIn1) { 7975 unsigned ExtIndex = cast<ConstantSDNode>(ExtVal)->getZExtValue(); 7976 if (ExtIndex > VT.getVectorNumElements()) 7977 return SDValue(); 7978 7979 Mask.push_back(ExtIndex); 7980 continue; 7981 } 7982 7983 // Otherwise, use InIdx + VecSize 7984 unsigned Idx = cast<ConstantSDNode>(ExtVal)->getZExtValue(); 7985 Mask.push_back(Idx+NumInScalars); 7986 } 7987 7988 // We can't generate a shuffle node with mismatched input and output types. 7989 // Attempt to transform a single input vector to the correct type. 7990 if ((VT != VecIn1.getValueType())) { 7991 // We don't support shuffeling between TWO values of different types. 7992 if (VecIn2.getNode() != 0) 7993 return SDValue(); 7994 7995 // We only support widening of vectors which are half the size of the 7996 // output registers. For example XMM->YMM widening on X86 with AVX. 7997 if (VecIn1.getValueType().getSizeInBits()*2 != VT.getSizeInBits()) 7998 return SDValue(); 7999 8000 // Widen the input vector by adding undef values. 8001 VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 8002 VecIn1, DAG.getUNDEF(VecIn1.getValueType())); 8003 } 8004 8005 // If VecIn2 is unused then change it to undef. 8006 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); 8007 8008 // Check that we were able to transform all incoming values to the same type. 8009 if (VecIn2.getValueType() != VecIn1.getValueType() || 8010 VecIn1.getValueType() != VT) 8011 return SDValue(); 8012 8013 // Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes. 8014 if (!isTypeLegal(VT)) 8015 return SDValue(); 8016 8017 // Return the new VECTOR_SHUFFLE node. 8018 SDValue Ops[2]; 8019 Ops[0] = VecIn1; 8020 Ops[1] = VecIn2; 8021 return DAG.getVectorShuffle(VT, N->getDebugLoc(), Ops[0], Ops[1], &Mask[0]); 8022 } 8023 8024 return SDValue(); 8025} 8026 8027SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { 8028 // TODO: Check to see if this is a CONCAT_VECTORS of a bunch of 8029 // EXTRACT_SUBVECTOR operations. If so, and if the EXTRACT_SUBVECTOR vector 8030 // inputs come from at most two distinct vectors, turn this into a shuffle 8031 // node. 8032 8033 // If we only have one input vector, we don't need to do any concatenation. 8034 if (N->getNumOperands() == 1) 8035 return N->getOperand(0); 8036 8037 // Check if all of the operands are undefs. 8038 if (ISD::allOperandsUndef(N)) 8039 return DAG.getUNDEF(N->getValueType(0)); 8040 8041 return SDValue(); 8042} 8043 8044SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) { 8045 EVT NVT = N->getValueType(0); 8046 SDValue V = N->getOperand(0); 8047 8048 if (V->getOpcode() == ISD::INSERT_SUBVECTOR) { 8049 // Handle only simple case where vector being inserted and vector 8050 // being extracted are of same type, and are half size of larger vectors. 8051 EVT BigVT = V->getOperand(0).getValueType(); 8052 EVT SmallVT = V->getOperand(1).getValueType(); 8053 if (NVT != SmallVT || NVT.getSizeInBits()*2 != BigVT.getSizeInBits()) 8054 return SDValue(); 8055 8056 // Only handle cases where both indexes are constants with the same type. 8057 ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(N->getOperand(1)); 8058 ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(V->getOperand(2)); 8059 8060 if (InsIdx && ExtIdx && 8061 InsIdx->getValueType(0).getSizeInBits() <= 64 && 8062 ExtIdx->getValueType(0).getSizeInBits() <= 64) { 8063 // Combine: 8064 // (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx) 8065 // Into: 8066 // indices are equal => V1 8067 // otherwise => (extract_subvec V1, ExtIdx) 8068 if (InsIdx->getZExtValue() == ExtIdx->getZExtValue()) 8069 return V->getOperand(1); 8070 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, N->getDebugLoc(), NVT, 8071 V->getOperand(0), N->getOperand(1)); 8072 } 8073 } 8074 8075 return SDValue(); 8076} 8077 8078SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { 8079 EVT VT = N->getValueType(0); 8080 unsigned NumElts = VT.getVectorNumElements(); 8081 8082 SDValue N0 = N->getOperand(0); 8083 SDValue N1 = N->getOperand(1); 8084 8085 assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG"); 8086 8087 // Canonicalize shuffle undef, undef -> undef 8088 if (N0.getOpcode() == ISD::UNDEF && N1.getOpcode() == ISD::UNDEF) 8089 return DAG.getUNDEF(VT); 8090 8091 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 8092 8093 // Canonicalize shuffle v, v -> v, undef 8094 if (N0 == N1) { 8095 SmallVector<int, 8> NewMask; 8096 for (unsigned i = 0; i != NumElts; ++i) { 8097 int Idx = SVN->getMaskElt(i); 8098 if (Idx >= (int)NumElts) Idx -= NumElts; 8099 NewMask.push_back(Idx); 8100 } 8101 return DAG.getVectorShuffle(VT, N->getDebugLoc(), N0, DAG.getUNDEF(VT), 8102 &NewMask[0]); 8103 } 8104 8105 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 8106 if (N0.getOpcode() == ISD::UNDEF) { 8107 SmallVector<int, 8> NewMask; 8108 for (unsigned i = 0; i != NumElts; ++i) { 8109 int Idx = SVN->getMaskElt(i); 8110 if (Idx >= 0) { 8111 if (Idx < (int)NumElts) 8112 Idx += NumElts; 8113 else 8114 Idx -= NumElts; 8115 } 8116 NewMask.push_back(Idx); 8117 } 8118 return DAG.getVectorShuffle(VT, N->getDebugLoc(), N1, DAG.getUNDEF(VT), 8119 &NewMask[0]); 8120 } 8121 8122 // Remove references to rhs if it is undef 8123 if (N1.getOpcode() == ISD::UNDEF) { 8124 bool Changed = false; 8125 SmallVector<int, 8> NewMask; 8126 for (unsigned i = 0; i != NumElts; ++i) { 8127 int Idx = SVN->getMaskElt(i); 8128 if (Idx >= (int)NumElts) { 8129 Idx = -1; 8130 Changed = true; 8131 } 8132 NewMask.push_back(Idx); 8133 } 8134 if (Changed) 8135 return DAG.getVectorShuffle(VT, N->getDebugLoc(), N0, N1, &NewMask[0]); 8136 } 8137 8138 // If it is a splat, check if the argument vector is another splat or a 8139 // build_vector with all scalar elements the same. 8140 if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) { 8141 SDNode *V = N0.getNode(); 8142 8143 // If this is a bit convert that changes the element type of the vector but 8144 // not the number of vector elements, look through it. Be careful not to 8145 // look though conversions that change things like v4f32 to v2f64. 8146 if (V->getOpcode() == ISD::BITCAST) { 8147 SDValue ConvInput = V->getOperand(0); 8148 if (ConvInput.getValueType().isVector() && 8149 ConvInput.getValueType().getVectorNumElements() == NumElts) 8150 V = ConvInput.getNode(); 8151 } 8152 8153 if (V->getOpcode() == ISD::BUILD_VECTOR) { 8154 assert(V->getNumOperands() == NumElts && 8155 "BUILD_VECTOR has wrong number of operands"); 8156 SDValue Base; 8157 bool AllSame = true; 8158 for (unsigned i = 0; i != NumElts; ++i) { 8159 if (V->getOperand(i).getOpcode() != ISD::UNDEF) { 8160 Base = V->getOperand(i); 8161 break; 8162 } 8163 } 8164 // Splat of <u, u, u, u>, return <u, u, u, u> 8165 if (!Base.getNode()) 8166 return N0; 8167 for (unsigned i = 0; i != NumElts; ++i) { 8168 if (V->getOperand(i) != Base) { 8169 AllSame = false; 8170 break; 8171 } 8172 } 8173 // Splat of <x, x, x, x>, return <x, x, x, x> 8174 if (AllSame) 8175 return N0; 8176 } 8177 } 8178 8179 // If this shuffle node is simply a swizzle of another shuffle node, 8180 // and it reverses the swizzle of the previous shuffle then we can 8181 // optimize shuffle(shuffle(x, undef), undef) -> x. 8182 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG && 8183 N1.getOpcode() == ISD::UNDEF) { 8184 8185 ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0); 8186 8187 // Shuffle nodes can only reverse shuffles with a single non-undef value. 8188 if (N0.getOperand(1).getOpcode() != ISD::UNDEF) 8189 return SDValue(); 8190 8191 // The incoming shuffle must be of the same type as the result of the 8192 // current shuffle. 8193 assert(OtherSV->getOperand(0).getValueType() == VT && 8194 "Shuffle types don't match"); 8195 8196 for (unsigned i = 0; i != NumElts; ++i) { 8197 int Idx = SVN->getMaskElt(i); 8198 assert(Idx < (int)NumElts && "Index references undef operand"); 8199 // Next, this index comes from the first value, which is the incoming 8200 // shuffle. Adopt the incoming index. 8201 if (Idx >= 0) 8202 Idx = OtherSV->getMaskElt(Idx); 8203 8204 // The combined shuffle must map each index to itself. 8205 if (Idx >= 0 && (unsigned)Idx != i) 8206 return SDValue(); 8207 } 8208 8209 return OtherSV->getOperand(0); 8210 } 8211 8212 return SDValue(); 8213} 8214 8215SDValue DAGCombiner::visitMEMBARRIER(SDNode* N) { 8216 if (!TLI.getShouldFoldAtomicFences()) 8217 return SDValue(); 8218 8219 SDValue atomic = N->getOperand(0); 8220 switch (atomic.getOpcode()) { 8221 case ISD::ATOMIC_CMP_SWAP: 8222 case ISD::ATOMIC_SWAP: 8223 case ISD::ATOMIC_LOAD_ADD: 8224 case ISD::ATOMIC_LOAD_SUB: 8225 case ISD::ATOMIC_LOAD_AND: 8226 case ISD::ATOMIC_LOAD_OR: 8227 case ISD::ATOMIC_LOAD_XOR: 8228 case ISD::ATOMIC_LOAD_NAND: 8229 case ISD::ATOMIC_LOAD_MIN: 8230 case ISD::ATOMIC_LOAD_MAX: 8231 case ISD::ATOMIC_LOAD_UMIN: 8232 case ISD::ATOMIC_LOAD_UMAX: 8233 break; 8234 default: 8235 return SDValue(); 8236 } 8237 8238 SDValue fence = atomic.getOperand(0); 8239 if (fence.getOpcode() != ISD::MEMBARRIER) 8240 return SDValue(); 8241 8242 switch (atomic.getOpcode()) { 8243 case ISD::ATOMIC_CMP_SWAP: 8244 return SDValue(DAG.UpdateNodeOperands(atomic.getNode(), 8245 fence.getOperand(0), 8246 atomic.getOperand(1), atomic.getOperand(2), 8247 atomic.getOperand(3)), atomic.getResNo()); 8248 case ISD::ATOMIC_SWAP: 8249 case ISD::ATOMIC_LOAD_ADD: 8250 case ISD::ATOMIC_LOAD_SUB: 8251 case ISD::ATOMIC_LOAD_AND: 8252 case ISD::ATOMIC_LOAD_OR: 8253 case ISD::ATOMIC_LOAD_XOR: 8254 case ISD::ATOMIC_LOAD_NAND: 8255 case ISD::ATOMIC_LOAD_MIN: 8256 case ISD::ATOMIC_LOAD_MAX: 8257 case ISD::ATOMIC_LOAD_UMIN: 8258 case ISD::ATOMIC_LOAD_UMAX: 8259 return SDValue(DAG.UpdateNodeOperands(atomic.getNode(), 8260 fence.getOperand(0), 8261 atomic.getOperand(1), atomic.getOperand(2)), 8262 atomic.getResNo()); 8263 default: 8264 return SDValue(); 8265 } 8266} 8267 8268/// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform 8269/// an AND to a vector_shuffle with the destination vector and a zero vector. 8270/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==> 8271/// vector_shuffle V, Zero, <0, 4, 2, 4> 8272SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) { 8273 EVT VT = N->getValueType(0); 8274 DebugLoc dl = N->getDebugLoc(); 8275 SDValue LHS = N->getOperand(0); 8276 SDValue RHS = N->getOperand(1); 8277 if (N->getOpcode() == ISD::AND) { 8278 if (RHS.getOpcode() == ISD::BITCAST) 8279 RHS = RHS.getOperand(0); 8280 if (RHS.getOpcode() == ISD::BUILD_VECTOR) { 8281 SmallVector<int, 8> Indices; 8282 unsigned NumElts = RHS.getNumOperands(); 8283 for (unsigned i = 0; i != NumElts; ++i) { 8284 SDValue Elt = RHS.getOperand(i); 8285 if (!isa<ConstantSDNode>(Elt)) 8286 return SDValue(); 8287 8288 if (cast<ConstantSDNode>(Elt)->isAllOnesValue()) 8289 Indices.push_back(i); 8290 else if (cast<ConstantSDNode>(Elt)->isNullValue()) 8291 Indices.push_back(NumElts); 8292 else 8293 return SDValue(); 8294 } 8295 8296 // Let's see if the target supports this vector_shuffle. 8297 EVT RVT = RHS.getValueType(); 8298 if (!TLI.isVectorClearMaskLegal(Indices, RVT)) 8299 return SDValue(); 8300 8301 // Return the new VECTOR_SHUFFLE node. 8302 EVT EltVT = RVT.getVectorElementType(); 8303 SmallVector<SDValue,8> ZeroOps(RVT.getVectorNumElements(), 8304 DAG.getConstant(0, EltVT)); 8305 SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 8306 RVT, &ZeroOps[0], ZeroOps.size()); 8307 LHS = DAG.getNode(ISD::BITCAST, dl, RVT, LHS); 8308 SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]); 8309 return DAG.getNode(ISD::BITCAST, dl, VT, Shuf); 8310 } 8311 } 8312 8313 return SDValue(); 8314} 8315 8316/// SimplifyVBinOp - Visit a binary vector operation, like ADD. 8317SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) { 8318 // After legalize, the target may be depending on adds and other 8319 // binary ops to provide legal ways to construct constants or other 8320 // things. Simplifying them may result in a loss of legality. 8321 if (LegalOperations) return SDValue(); 8322 8323 assert(N->getValueType(0).isVector() && 8324 "SimplifyVBinOp only works on vectors!"); 8325 8326 SDValue LHS = N->getOperand(0); 8327 SDValue RHS = N->getOperand(1); 8328 SDValue Shuffle = XformToShuffleWithZero(N); 8329 if (Shuffle.getNode()) return Shuffle; 8330 8331 // If the LHS and RHS are BUILD_VECTOR nodes, see if we can constant fold 8332 // this operation. 8333 if (LHS.getOpcode() == ISD::BUILD_VECTOR && 8334 RHS.getOpcode() == ISD::BUILD_VECTOR) { 8335 SmallVector<SDValue, 8> Ops; 8336 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { 8337 SDValue LHSOp = LHS.getOperand(i); 8338 SDValue RHSOp = RHS.getOperand(i); 8339 // If these two elements can't be folded, bail out. 8340 if ((LHSOp.getOpcode() != ISD::UNDEF && 8341 LHSOp.getOpcode() != ISD::Constant && 8342 LHSOp.getOpcode() != ISD::ConstantFP) || 8343 (RHSOp.getOpcode() != ISD::UNDEF && 8344 RHSOp.getOpcode() != ISD::Constant && 8345 RHSOp.getOpcode() != ISD::ConstantFP)) 8346 break; 8347 8348 // Can't fold divide by zero. 8349 if (N->getOpcode() == ISD::SDIV || N->getOpcode() == ISD::UDIV || 8350 N->getOpcode() == ISD::FDIV) { 8351 if ((RHSOp.getOpcode() == ISD::Constant && 8352 cast<ConstantSDNode>(RHSOp.getNode())->isNullValue()) || 8353 (RHSOp.getOpcode() == ISD::ConstantFP && 8354 cast<ConstantFPSDNode>(RHSOp.getNode())->getValueAPF().isZero())) 8355 break; 8356 } 8357 8358 EVT VT = LHSOp.getValueType(); 8359 EVT RVT = RHSOp.getValueType(); 8360 if (RVT != VT) { 8361 // Integer BUILD_VECTOR operands may have types larger than the element 8362 // size (e.g., when the element type is not legal). Prior to type 8363 // legalization, the types may not match between the two BUILD_VECTORS. 8364 // Truncate one of the operands to make them match. 8365 if (RVT.getSizeInBits() > VT.getSizeInBits()) { 8366 RHSOp = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, RHSOp); 8367 } else { 8368 LHSOp = DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), RVT, LHSOp); 8369 VT = RVT; 8370 } 8371 } 8372 SDValue FoldOp = DAG.getNode(N->getOpcode(), LHS.getDebugLoc(), VT, 8373 LHSOp, RHSOp); 8374 if (FoldOp.getOpcode() != ISD::UNDEF && 8375 FoldOp.getOpcode() != ISD::Constant && 8376 FoldOp.getOpcode() != ISD::ConstantFP) 8377 break; 8378 Ops.push_back(FoldOp); 8379 AddToWorkList(FoldOp.getNode()); 8380 } 8381 8382 if (Ops.size() == LHS.getNumOperands()) 8383 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 8384 LHS.getValueType(), &Ops[0], Ops.size()); 8385 } 8386 8387 return SDValue(); 8388} 8389 8390SDValue DAGCombiner::SimplifySelect(DebugLoc DL, SDValue N0, 8391 SDValue N1, SDValue N2){ 8392 assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!"); 8393 8394 SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2, 8395 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 8396 8397 // If we got a simplified select_cc node back from SimplifySelectCC, then 8398 // break it down into a new SETCC node, and a new SELECT node, and then return 8399 // the SELECT node, since we were called with a SELECT node. 8400 if (SCC.getNode()) { 8401 // Check to see if we got a select_cc back (to turn into setcc/select). 8402 // Otherwise, just return whatever node we got back, like fabs. 8403 if (SCC.getOpcode() == ISD::SELECT_CC) { 8404 SDValue SETCC = DAG.getNode(ISD::SETCC, N0.getDebugLoc(), 8405 N0.getValueType(), 8406 SCC.getOperand(0), SCC.getOperand(1), 8407 SCC.getOperand(4)); 8408 AddToWorkList(SETCC.getNode()); 8409 return DAG.getNode(ISD::SELECT, SCC.getDebugLoc(), SCC.getValueType(), 8410 SCC.getOperand(2), SCC.getOperand(3), SETCC); 8411 } 8412 8413 return SCC; 8414 } 8415 return SDValue(); 8416} 8417 8418/// SimplifySelectOps - Given a SELECT or a SELECT_CC node, where LHS and RHS 8419/// are the two values being selected between, see if we can simplify the 8420/// select. Callers of this should assume that TheSelect is deleted if this 8421/// returns true. As such, they should return the appropriate thing (e.g. the 8422/// node) back to the top-level of the DAG combiner loop to avoid it being 8423/// looked at. 8424bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS, 8425 SDValue RHS) { 8426 8427 // Cannot simplify select with vector condition 8428 if (TheSelect->getOperand(0).getValueType().isVector()) return false; 8429 8430 // If this is a select from two identical things, try to pull the operation 8431 // through the select. 8432 if (LHS.getOpcode() != RHS.getOpcode() || 8433 !LHS.hasOneUse() || !RHS.hasOneUse()) 8434 return false; 8435 8436 // If this is a load and the token chain is identical, replace the select 8437 // of two loads with a load through a select of the address to load from. 8438 // This triggers in things like "select bool X, 10.0, 123.0" after the FP 8439 // constants have been dropped into the constant pool. 8440 if (LHS.getOpcode() == ISD::LOAD) { 8441 LoadSDNode *LLD = cast<LoadSDNode>(LHS); 8442 LoadSDNode *RLD = cast<LoadSDNode>(RHS); 8443 8444 // Token chains must be identical. 8445 if (LHS.getOperand(0) != RHS.getOperand(0) || 8446 // Do not let this transformation reduce the number of volatile loads. 8447 LLD->isVolatile() || RLD->isVolatile() || 8448 // If this is an EXTLOAD, the VT's must match. 8449 LLD->getMemoryVT() != RLD->getMemoryVT() || 8450 // If this is an EXTLOAD, the kind of extension must match. 8451 (LLD->getExtensionType() != RLD->getExtensionType() && 8452 // The only exception is if one of the extensions is anyext. 8453 LLD->getExtensionType() != ISD::EXTLOAD && 8454 RLD->getExtensionType() != ISD::EXTLOAD) || 8455 // FIXME: this discards src value information. This is 8456 // over-conservative. It would be beneficial to be able to remember 8457 // both potential memory locations. Since we are discarding 8458 // src value info, don't do the transformation if the memory 8459 // locations are not in the default address space. 8460 LLD->getPointerInfo().getAddrSpace() != 0 || 8461 RLD->getPointerInfo().getAddrSpace() != 0) 8462 return false; 8463 8464 // Check that the select condition doesn't reach either load. If so, 8465 // folding this will induce a cycle into the DAG. If not, this is safe to 8466 // xform, so create a select of the addresses. 8467 SDValue Addr; 8468 if (TheSelect->getOpcode() == ISD::SELECT) { 8469 SDNode *CondNode = TheSelect->getOperand(0).getNode(); 8470 if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) || 8471 (RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode))) 8472 return false; 8473 Addr = DAG.getNode(ISD::SELECT, TheSelect->getDebugLoc(), 8474 LLD->getBasePtr().getValueType(), 8475 TheSelect->getOperand(0), LLD->getBasePtr(), 8476 RLD->getBasePtr()); 8477 } else { // Otherwise SELECT_CC 8478 SDNode *CondLHS = TheSelect->getOperand(0).getNode(); 8479 SDNode *CondRHS = TheSelect->getOperand(1).getNode(); 8480 8481 if ((LLD->hasAnyUseOfValue(1) && 8482 (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) || 8483 (RLD->hasAnyUseOfValue(1) && 8484 (RLD->isPredecessorOf(CondLHS) || RLD->isPredecessorOf(CondRHS)))) 8485 return false; 8486 8487 Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(), 8488 LLD->getBasePtr().getValueType(), 8489 TheSelect->getOperand(0), 8490 TheSelect->getOperand(1), 8491 LLD->getBasePtr(), RLD->getBasePtr(), 8492 TheSelect->getOperand(4)); 8493 } 8494 8495 SDValue Load; 8496 if (LLD->getExtensionType() == ISD::NON_EXTLOAD) { 8497 Load = DAG.getLoad(TheSelect->getValueType(0), 8498 TheSelect->getDebugLoc(), 8499 // FIXME: Discards pointer info. 8500 LLD->getChain(), Addr, MachinePointerInfo(), 8501 LLD->isVolatile(), LLD->isNonTemporal(), 8502 LLD->isInvariant(), LLD->getAlignment()); 8503 } else { 8504 Load = DAG.getExtLoad(LLD->getExtensionType() == ISD::EXTLOAD ? 8505 RLD->getExtensionType() : LLD->getExtensionType(), 8506 TheSelect->getDebugLoc(), 8507 TheSelect->getValueType(0), 8508 // FIXME: Discards pointer info. 8509 LLD->getChain(), Addr, MachinePointerInfo(), 8510 LLD->getMemoryVT(), LLD->isVolatile(), 8511 LLD->isNonTemporal(), LLD->getAlignment()); 8512 } 8513 8514 // Users of the select now use the result of the load. 8515 CombineTo(TheSelect, Load); 8516 8517 // Users of the old loads now use the new load's chain. We know the 8518 // old-load value is dead now. 8519 CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1)); 8520 CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1)); 8521 return true; 8522 } 8523 8524 return false; 8525} 8526 8527/// SimplifySelectCC - Simplify an expression of the form (N0 cond N1) ? N2 : N3 8528/// where 'cond' is the comparison specified by CC. 8529SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1, 8530 SDValue N2, SDValue N3, 8531 ISD::CondCode CC, bool NotExtCompare) { 8532 // (x ? y : y) -> y. 8533 if (N2 == N3) return N2; 8534 8535 EVT VT = N2.getValueType(); 8536 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 8537 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 8538 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.getNode()); 8539 8540 // Determine if the condition we're dealing with is constant 8541 SDValue SCC = SimplifySetCC(TLI.getSetCCResultType(N0.getValueType()), 8542 N0, N1, CC, DL, false); 8543 if (SCC.getNode()) AddToWorkList(SCC.getNode()); 8544 ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode()); 8545 8546 // fold select_cc true, x, y -> x 8547 if (SCCC && !SCCC->isNullValue()) 8548 return N2; 8549 // fold select_cc false, x, y -> y 8550 if (SCCC && SCCC->isNullValue()) 8551 return N3; 8552 8553 // Check to see if we can simplify the select into an fabs node 8554 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) { 8555 // Allow either -0.0 or 0.0 8556 if (CFP->getValueAPF().isZero()) { 8557 // select (setg[te] X, +/-0.0), X, fneg(X) -> fabs 8558 if ((CC == ISD::SETGE || CC == ISD::SETGT) && 8559 N0 == N2 && N3.getOpcode() == ISD::FNEG && 8560 N2 == N3.getOperand(0)) 8561 return DAG.getNode(ISD::FABS, DL, VT, N0); 8562 8563 // select (setl[te] X, +/-0.0), fneg(X), X -> fabs 8564 if ((CC == ISD::SETLT || CC == ISD::SETLE) && 8565 N0 == N3 && N2.getOpcode() == ISD::FNEG && 8566 N2.getOperand(0) == N3) 8567 return DAG.getNode(ISD::FABS, DL, VT, N3); 8568 } 8569 } 8570 8571 // Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)" 8572 // where "tmp" is a constant pool entry containing an array with 1.0 and 2.0 8573 // in it. This is a win when the constant is not otherwise available because 8574 // it replaces two constant pool loads with one. We only do this if the FP 8575 // type is known to be legal, because if it isn't, then we are before legalize 8576 // types an we want the other legalization to happen first (e.g. to avoid 8577 // messing with soft float) and if the ConstantFP is not legal, because if 8578 // it is legal, we may not need to store the FP constant in a constant pool. 8579 if (ConstantFPSDNode *TV = dyn_cast<ConstantFPSDNode>(N2)) 8580 if (ConstantFPSDNode *FV = dyn_cast<ConstantFPSDNode>(N3)) { 8581 if (TLI.isTypeLegal(N2.getValueType()) && 8582 (TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) != 8583 TargetLowering::Legal) && 8584 // If both constants have multiple uses, then we won't need to do an 8585 // extra load, they are likely around in registers for other users. 8586 (TV->hasOneUse() || FV->hasOneUse())) { 8587 Constant *Elts[] = { 8588 const_cast<ConstantFP*>(FV->getConstantFPValue()), 8589 const_cast<ConstantFP*>(TV->getConstantFPValue()) 8590 }; 8591 Type *FPTy = Elts[0]->getType(); 8592 const TargetData &TD = *TLI.getTargetData(); 8593 8594 // Create a ConstantArray of the two constants. 8595 Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts); 8596 SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(), 8597 TD.getPrefTypeAlignment(FPTy)); 8598 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 8599 8600 // Get the offsets to the 0 and 1 element of the array so that we can 8601 // select between them. 8602 SDValue Zero = DAG.getIntPtrConstant(0); 8603 unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType()); 8604 SDValue One = DAG.getIntPtrConstant(EltSize); 8605 8606 SDValue Cond = DAG.getSetCC(DL, 8607 TLI.getSetCCResultType(N0.getValueType()), 8608 N0, N1, CC); 8609 AddToWorkList(Cond.getNode()); 8610 SDValue CstOffset = DAG.getNode(ISD::SELECT, DL, Zero.getValueType(), 8611 Cond, One, Zero); 8612 AddToWorkList(CstOffset.getNode()); 8613 CPIdx = DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(), CPIdx, 8614 CstOffset); 8615 AddToWorkList(CPIdx.getNode()); 8616 return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx, 8617 MachinePointerInfo::getConstantPool(), false, 8618 false, false, Alignment); 8619 8620 } 8621 } 8622 8623 // Check to see if we can perform the "gzip trick", transforming 8624 // (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A) 8625 if (N1C && N3C && N3C->isNullValue() && CC == ISD::SETLT && 8626 (N1C->isNullValue() || // (a < 0) ? b : 0 8627 (N1C->getAPIntValue() == 1 && N0 == N2))) { // (a < 1) ? a : 0 8628 EVT XType = N0.getValueType(); 8629 EVT AType = N2.getValueType(); 8630 if (XType.bitsGE(AType)) { 8631 // and (sra X, size(X)-1, A) -> "and (srl X, C2), A" iff A is a 8632 // single-bit constant. 8633 if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue()-1)) == 0)) { 8634 unsigned ShCtV = N2C->getAPIntValue().logBase2(); 8635 ShCtV = XType.getSizeInBits()-ShCtV-1; 8636 SDValue ShCt = DAG.getConstant(ShCtV, 8637 getShiftAmountTy(N0.getValueType())); 8638 SDValue Shift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), 8639 XType, N0, ShCt); 8640 AddToWorkList(Shift.getNode()); 8641 8642 if (XType.bitsGT(AType)) { 8643 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift); 8644 AddToWorkList(Shift.getNode()); 8645 } 8646 8647 return DAG.getNode(ISD::AND, DL, AType, Shift, N2); 8648 } 8649 8650 SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), 8651 XType, N0, 8652 DAG.getConstant(XType.getSizeInBits()-1, 8653 getShiftAmountTy(N0.getValueType()))); 8654 AddToWorkList(Shift.getNode()); 8655 8656 if (XType.bitsGT(AType)) { 8657 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift); 8658 AddToWorkList(Shift.getNode()); 8659 } 8660 8661 return DAG.getNode(ISD::AND, DL, AType, Shift, N2); 8662 } 8663 } 8664 8665 // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A) 8666 // where y is has a single bit set. 8667 // A plaintext description would be, we can turn the SELECT_CC into an AND 8668 // when the condition can be materialized as an all-ones register. Any 8669 // single bit-test can be materialized as an all-ones register with 8670 // shift-left and shift-right-arith. 8671 if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND && 8672 N0->getValueType(0) == VT && 8673 N1C && N1C->isNullValue() && 8674 N2C && N2C->isNullValue()) { 8675 SDValue AndLHS = N0->getOperand(0); 8676 ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 8677 if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) { 8678 // Shift the tested bit over the sign bit. 8679 APInt AndMask = ConstAndRHS->getAPIntValue(); 8680 SDValue ShlAmt = 8681 DAG.getConstant(AndMask.countLeadingZeros(), 8682 getShiftAmountTy(AndLHS.getValueType())); 8683 SDValue Shl = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT, AndLHS, ShlAmt); 8684 8685 // Now arithmetic right shift it all the way over, so the result is either 8686 // all-ones, or zero. 8687 SDValue ShrAmt = 8688 DAG.getConstant(AndMask.getBitWidth()-1, 8689 getShiftAmountTy(Shl.getValueType())); 8690 SDValue Shr = DAG.getNode(ISD::SRA, N0.getDebugLoc(), VT, Shl, ShrAmt); 8691 8692 return DAG.getNode(ISD::AND, DL, VT, Shr, N3); 8693 } 8694 } 8695 8696 // fold select C, 16, 0 -> shl C, 4 8697 if (N2C && N3C && N3C->isNullValue() && N2C->getAPIntValue().isPowerOf2() && 8698 TLI.getBooleanContents(N0.getValueType().isVector()) == 8699 TargetLowering::ZeroOrOneBooleanContent) { 8700 8701 // If the caller doesn't want us to simplify this into a zext of a compare, 8702 // don't do it. 8703 if (NotExtCompare && N2C->getAPIntValue() == 1) 8704 return SDValue(); 8705 8706 // Get a SetCC of the condition 8707 // FIXME: Should probably make sure that setcc is legal if we ever have a 8708 // target where it isn't. 8709 SDValue Temp, SCC; 8710 // cast from setcc result type to select result type 8711 if (LegalTypes) { 8712 SCC = DAG.getSetCC(DL, TLI.getSetCCResultType(N0.getValueType()), 8713 N0, N1, CC); 8714 if (N2.getValueType().bitsLT(SCC.getValueType())) 8715 Temp = DAG.getZeroExtendInReg(SCC, N2.getDebugLoc(), N2.getValueType()); 8716 else 8717 Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(), 8718 N2.getValueType(), SCC); 8719 } else { 8720 SCC = DAG.getSetCC(N0.getDebugLoc(), MVT::i1, N0, N1, CC); 8721 Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(), 8722 N2.getValueType(), SCC); 8723 } 8724 8725 AddToWorkList(SCC.getNode()); 8726 AddToWorkList(Temp.getNode()); 8727 8728 if (N2C->getAPIntValue() == 1) 8729 return Temp; 8730 8731 // shl setcc result by log2 n2c 8732 return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp, 8733 DAG.getConstant(N2C->getAPIntValue().logBase2(), 8734 getShiftAmountTy(Temp.getValueType()))); 8735 } 8736 8737 // Check to see if this is the equivalent of setcc 8738 // FIXME: Turn all of these into setcc if setcc if setcc is legal 8739 // otherwise, go ahead with the folds. 8740 if (0 && N3C && N3C->isNullValue() && N2C && (N2C->getAPIntValue() == 1ULL)) { 8741 EVT XType = N0.getValueType(); 8742 if (!LegalOperations || 8743 TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(XType))) { 8744 SDValue Res = DAG.getSetCC(DL, TLI.getSetCCResultType(XType), N0, N1, CC); 8745 if (Res.getValueType() != VT) 8746 Res = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Res); 8747 return Res; 8748 } 8749 8750 // fold (seteq X, 0) -> (srl (ctlz X, log2(size(X)))) 8751 if (N1C && N1C->isNullValue() && CC == ISD::SETEQ && 8752 (!LegalOperations || 8753 TLI.isOperationLegal(ISD::CTLZ, XType))) { 8754 SDValue Ctlz = DAG.getNode(ISD::CTLZ, N0.getDebugLoc(), XType, N0); 8755 return DAG.getNode(ISD::SRL, DL, XType, Ctlz, 8756 DAG.getConstant(Log2_32(XType.getSizeInBits()), 8757 getShiftAmountTy(Ctlz.getValueType()))); 8758 } 8759 // fold (setgt X, 0) -> (srl (and (-X, ~X), size(X)-1)) 8760 if (N1C && N1C->isNullValue() && CC == ISD::SETGT) { 8761 SDValue NegN0 = DAG.getNode(ISD::SUB, N0.getDebugLoc(), 8762 XType, DAG.getConstant(0, XType), N0); 8763 SDValue NotN0 = DAG.getNOT(N0.getDebugLoc(), N0, XType); 8764 return DAG.getNode(ISD::SRL, DL, XType, 8765 DAG.getNode(ISD::AND, DL, XType, NegN0, NotN0), 8766 DAG.getConstant(XType.getSizeInBits()-1, 8767 getShiftAmountTy(XType))); 8768 } 8769 // fold (setgt X, -1) -> (xor (srl (X, size(X)-1), 1)) 8770 if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) { 8771 SDValue Sign = DAG.getNode(ISD::SRL, N0.getDebugLoc(), XType, N0, 8772 DAG.getConstant(XType.getSizeInBits()-1, 8773 getShiftAmountTy(N0.getValueType()))); 8774 return DAG.getNode(ISD::XOR, DL, XType, Sign, DAG.getConstant(1, XType)); 8775 } 8776 } 8777 8778 // Check to see if this is an integer abs. 8779 // select_cc setg[te] X, 0, X, -X -> 8780 // select_cc setgt X, -1, X, -X -> 8781 // select_cc setl[te] X, 0, -X, X -> 8782 // select_cc setlt X, 1, -X, X -> 8783 // Y = sra (X, size(X)-1); xor (add (X, Y), Y) 8784 if (N1C) { 8785 ConstantSDNode *SubC = NULL; 8786 if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) || 8787 (N1C->isAllOnesValue() && CC == ISD::SETGT)) && 8788 N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) 8789 SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0)); 8790 else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) || 8791 (N1C->isOne() && CC == ISD::SETLT)) && 8792 N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1)) 8793 SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0)); 8794 8795 EVT XType = N0.getValueType(); 8796 if (SubC && SubC->isNullValue() && XType.isInteger()) { 8797 SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType, 8798 N0, 8799 DAG.getConstant(XType.getSizeInBits()-1, 8800 getShiftAmountTy(N0.getValueType()))); 8801 SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(), 8802 XType, N0, Shift); 8803 AddToWorkList(Shift.getNode()); 8804 AddToWorkList(Add.getNode()); 8805 return DAG.getNode(ISD::XOR, DL, XType, Add, Shift); 8806 } 8807 } 8808 8809 return SDValue(); 8810} 8811 8812/// SimplifySetCC - This is a stub for TargetLowering::SimplifySetCC. 8813SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, 8814 SDValue N1, ISD::CondCode Cond, 8815 DebugLoc DL, bool foldBooleans) { 8816 TargetLowering::DAGCombinerInfo 8817 DagCombineInfo(DAG, !LegalTypes, !LegalOperations, false, this); 8818 return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL); 8819} 8820 8821/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant, 8822/// return a DAG expression to select that will generate the same value by 8823/// multiplying by a magic number. See: 8824/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 8825SDValue DAGCombiner::BuildSDIV(SDNode *N) { 8826 std::vector<SDNode*> Built; 8827 SDValue S = TLI.BuildSDIV(N, DAG, LegalOperations, &Built); 8828 8829 for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end(); 8830 ii != ee; ++ii) 8831 AddToWorkList(*ii); 8832 return S; 8833} 8834 8835/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant, 8836/// return a DAG expression to select that will generate the same value by 8837/// multiplying by a magic number. See: 8838/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 8839SDValue DAGCombiner::BuildUDIV(SDNode *N) { 8840 std::vector<SDNode*> Built; 8841 SDValue S = TLI.BuildUDIV(N, DAG, LegalOperations, &Built); 8842 8843 for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end(); 8844 ii != ee; ++ii) 8845 AddToWorkList(*ii); 8846 return S; 8847} 8848 8849/// FindBaseOffset - Return true if base is a frame index, which is known not 8850// to alias with anything but itself. Provides base object and offset as 8851// results. 8852static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset, 8853 const GlobalValue *&GV, void *&CV) { 8854 // Assume it is a primitive operation. 8855 Base = Ptr; Offset = 0; GV = 0; CV = 0; 8856 8857 // If it's an adding a simple constant then integrate the offset. 8858 if (Base.getOpcode() == ISD::ADD) { 8859 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Base.getOperand(1))) { 8860 Base = Base.getOperand(0); 8861 Offset += C->getZExtValue(); 8862 } 8863 } 8864 8865 // Return the underlying GlobalValue, and update the Offset. Return false 8866 // for GlobalAddressSDNode since the same GlobalAddress may be represented 8867 // by multiple nodes with different offsets. 8868 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Base)) { 8869 GV = G->getGlobal(); 8870 Offset += G->getOffset(); 8871 return false; 8872 } 8873 8874 // Return the underlying Constant value, and update the Offset. Return false 8875 // for ConstantSDNodes since the same constant pool entry may be represented 8876 // by multiple nodes with different offsets. 8877 if (ConstantPoolSDNode *C = dyn_cast<ConstantPoolSDNode>(Base)) { 8878 CV = C->isMachineConstantPoolEntry() ? (void *)C->getMachineCPVal() 8879 : (void *)C->getConstVal(); 8880 Offset += C->getOffset(); 8881 return false; 8882 } 8883 // If it's any of the following then it can't alias with anything but itself. 8884 return isa<FrameIndexSDNode>(Base); 8885} 8886 8887/// isAlias - Return true if there is any possibility that the two addresses 8888/// overlap. 8889bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1, 8890 const Value *SrcValue1, int SrcValueOffset1, 8891 unsigned SrcValueAlign1, 8892 const MDNode *TBAAInfo1, 8893 SDValue Ptr2, int64_t Size2, 8894 const Value *SrcValue2, int SrcValueOffset2, 8895 unsigned SrcValueAlign2, 8896 const MDNode *TBAAInfo2) const { 8897 // If they are the same then they must be aliases. 8898 if (Ptr1 == Ptr2) return true; 8899 8900 // Gather base node and offset information. 8901 SDValue Base1, Base2; 8902 int64_t Offset1, Offset2; 8903 const GlobalValue *GV1, *GV2; 8904 void *CV1, *CV2; 8905 bool isFrameIndex1 = FindBaseOffset(Ptr1, Base1, Offset1, GV1, CV1); 8906 bool isFrameIndex2 = FindBaseOffset(Ptr2, Base2, Offset2, GV2, CV2); 8907 8908 // If they have a same base address then check to see if they overlap. 8909 if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2))) 8910 return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1); 8911 8912 // It is possible for different frame indices to alias each other, mostly 8913 // when tail call optimization reuses return address slots for arguments. 8914 // To catch this case, look up the actual index of frame indices to compute 8915 // the real alias relationship. 8916 if (isFrameIndex1 && isFrameIndex2) { 8917 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 8918 Offset1 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex()); 8919 Offset2 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex()); 8920 return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1); 8921 } 8922 8923 // Otherwise, if we know what the bases are, and they aren't identical, then 8924 // we know they cannot alias. 8925 if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2)) 8926 return false; 8927 8928 // If we know required SrcValue1 and SrcValue2 have relatively large alignment 8929 // compared to the size and offset of the access, we may be able to prove they 8930 // do not alias. This check is conservative for now to catch cases created by 8931 // splitting vector types. 8932 if ((SrcValueAlign1 == SrcValueAlign2) && 8933 (SrcValueOffset1 != SrcValueOffset2) && 8934 (Size1 == Size2) && (SrcValueAlign1 > Size1)) { 8935 int64_t OffAlign1 = SrcValueOffset1 % SrcValueAlign1; 8936 int64_t OffAlign2 = SrcValueOffset2 % SrcValueAlign1; 8937 8938 // There is no overlap between these relatively aligned accesses of similar 8939 // size, return no alias. 8940 if ((OffAlign1 + Size1) <= OffAlign2 || (OffAlign2 + Size2) <= OffAlign1) 8941 return false; 8942 } 8943 8944 if (CombinerGlobalAA) { 8945 // Use alias analysis information. 8946 int64_t MinOffset = std::min(SrcValueOffset1, SrcValueOffset2); 8947 int64_t Overlap1 = Size1 + SrcValueOffset1 - MinOffset; 8948 int64_t Overlap2 = Size2 + SrcValueOffset2 - MinOffset; 8949 AliasAnalysis::AliasResult AAResult = 8950 AA.alias(AliasAnalysis::Location(SrcValue1, Overlap1, TBAAInfo1), 8951 AliasAnalysis::Location(SrcValue2, Overlap2, TBAAInfo2)); 8952 if (AAResult == AliasAnalysis::NoAlias) 8953 return false; 8954 } 8955 8956 // Otherwise we have to assume they alias. 8957 return true; 8958} 8959 8960/// FindAliasInfo - Extracts the relevant alias information from the memory 8961/// node. Returns true if the operand was a load. 8962bool DAGCombiner::FindAliasInfo(SDNode *N, 8963 SDValue &Ptr, int64_t &Size, 8964 const Value *&SrcValue, 8965 int &SrcValueOffset, 8966 unsigned &SrcValueAlign, 8967 const MDNode *&TBAAInfo) const { 8968 LSBaseSDNode *LS = cast<LSBaseSDNode>(N); 8969 8970 Ptr = LS->getBasePtr(); 8971 Size = LS->getMemoryVT().getSizeInBits() >> 3; 8972 SrcValue = LS->getSrcValue(); 8973 SrcValueOffset = LS->getSrcValueOffset(); 8974 SrcValueAlign = LS->getOriginalAlignment(); 8975 TBAAInfo = LS->getTBAAInfo(); 8976 return isa<LoadSDNode>(LS); 8977} 8978 8979/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes, 8980/// looking for aliasing nodes and adding them to the Aliases vector. 8981void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain, 8982 SmallVector<SDValue, 8> &Aliases) { 8983 SmallVector<SDValue, 8> Chains; // List of chains to visit. 8984 SmallPtrSet<SDNode *, 16> Visited; // Visited node set. 8985 8986 // Get alias information for node. 8987 SDValue Ptr; 8988 int64_t Size; 8989 const Value *SrcValue; 8990 int SrcValueOffset; 8991 unsigned SrcValueAlign; 8992 const MDNode *SrcTBAAInfo; 8993 bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset, 8994 SrcValueAlign, SrcTBAAInfo); 8995 8996 // Starting off. 8997 Chains.push_back(OriginalChain); 8998 unsigned Depth = 0; 8999 9000 // Look at each chain and determine if it is an alias. If so, add it to the 9001 // aliases list. If not, then continue up the chain looking for the next 9002 // candidate. 9003 while (!Chains.empty()) { 9004 SDValue Chain = Chains.back(); 9005 Chains.pop_back(); 9006 9007 // For TokenFactor nodes, look at each operand and only continue up the 9008 // chain until we find two aliases. If we've seen two aliases, assume we'll 9009 // find more and revert to original chain since the xform is unlikely to be 9010 // profitable. 9011 // 9012 // FIXME: The depth check could be made to return the last non-aliasing 9013 // chain we found before we hit a tokenfactor rather than the original 9014 // chain. 9015 if (Depth > 6 || Aliases.size() == 2) { 9016 Aliases.clear(); 9017 Aliases.push_back(OriginalChain); 9018 break; 9019 } 9020 9021 // Don't bother if we've been before. 9022 if (!Visited.insert(Chain.getNode())) 9023 continue; 9024 9025 switch (Chain.getOpcode()) { 9026 case ISD::EntryToken: 9027 // Entry token is ideal chain operand, but handled in FindBetterChain. 9028 break; 9029 9030 case ISD::LOAD: 9031 case ISD::STORE: { 9032 // Get alias information for Chain. 9033 SDValue OpPtr; 9034 int64_t OpSize; 9035 const Value *OpSrcValue; 9036 int OpSrcValueOffset; 9037 unsigned OpSrcValueAlign; 9038 const MDNode *OpSrcTBAAInfo; 9039 bool IsOpLoad = FindAliasInfo(Chain.getNode(), OpPtr, OpSize, 9040 OpSrcValue, OpSrcValueOffset, 9041 OpSrcValueAlign, 9042 OpSrcTBAAInfo); 9043 9044 // If chain is alias then stop here. 9045 if (!(IsLoad && IsOpLoad) && 9046 isAlias(Ptr, Size, SrcValue, SrcValueOffset, SrcValueAlign, 9047 SrcTBAAInfo, 9048 OpPtr, OpSize, OpSrcValue, OpSrcValueOffset, 9049 OpSrcValueAlign, OpSrcTBAAInfo)) { 9050 Aliases.push_back(Chain); 9051 } else { 9052 // Look further up the chain. 9053 Chains.push_back(Chain.getOperand(0)); 9054 ++Depth; 9055 } 9056 break; 9057 } 9058 9059 case ISD::TokenFactor: 9060 // We have to check each of the operands of the token factor for "small" 9061 // token factors, so we queue them up. Adding the operands to the queue 9062 // (stack) in reverse order maintains the original order and increases the 9063 // likelihood that getNode will find a matching token factor (CSE.) 9064 if (Chain.getNumOperands() > 16) { 9065 Aliases.push_back(Chain); 9066 break; 9067 } 9068 for (unsigned n = Chain.getNumOperands(); n;) 9069 Chains.push_back(Chain.getOperand(--n)); 9070 ++Depth; 9071 break; 9072 9073 default: 9074 // For all other instructions we will just have to take what we can get. 9075 Aliases.push_back(Chain); 9076 break; 9077 } 9078 } 9079} 9080 9081/// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, looking 9082/// for a better chain (aliasing node.) 9083SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) { 9084 SmallVector<SDValue, 8> Aliases; // Ops for replacing token factor. 9085 9086 // Accumulate all the aliases to this node. 9087 GatherAllAliases(N, OldChain, Aliases); 9088 9089 // If no operands then chain to entry token. 9090 if (Aliases.size() == 0) 9091 return DAG.getEntryNode(); 9092 9093 // If a single operand then chain to it. We don't need to revisit it. 9094 if (Aliases.size() == 1) 9095 return Aliases[0]; 9096 9097 // Construct a custom tailored token factor. 9098 return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other, 9099 &Aliases[0], Aliases.size()); 9100} 9101 9102// SelectionDAG::Combine - This is the entry point for the file. 9103// 9104void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis &AA, 9105 CodeGenOpt::Level OptLevel) { 9106 /// run - This is the main entry point to this class. 9107 /// 9108 DAGCombiner(*this, AA, OptLevel).Run(Level); 9109} 9110