SelectionDAGBuilder.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements routines for translating from LLVM IR into SelectionDAG IR. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "isel" 15#include "SelectionDAGBuilder.h" 16#include "SDNodeDbgValue.h" 17#include "llvm/ADT/BitVector.h" 18#include "llvm/ADT/Optional.h" 19#include "llvm/ADT/SmallSet.h" 20#include "llvm/Analysis/AliasAnalysis.h" 21#include "llvm/Analysis/BranchProbabilityInfo.h" 22#include "llvm/Analysis/ConstantFolding.h" 23#include "llvm/Analysis/ValueTracking.h" 24#include "llvm/CodeGen/Analysis.h" 25#include "llvm/CodeGen/FastISel.h" 26#include "llvm/CodeGen/FunctionLoweringInfo.h" 27#include "llvm/CodeGen/GCMetadata.h" 28#include "llvm/CodeGen/GCStrategy.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineFunction.h" 31#include "llvm/CodeGen/MachineInstrBuilder.h" 32#include "llvm/CodeGen/MachineJumpTableInfo.h" 33#include "llvm/CodeGen/MachineModuleInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/SelectionDAG.h" 36#include "llvm/CodeGen/StackMaps.h" 37#include "llvm/IR/CallingConv.h" 38#include "llvm/IR/Constants.h" 39#include "llvm/IR/DataLayout.h" 40#include "llvm/IR/DebugInfo.h" 41#include "llvm/IR/DerivedTypes.h" 42#include "llvm/IR/Function.h" 43#include "llvm/IR/GlobalVariable.h" 44#include "llvm/IR/InlineAsm.h" 45#include "llvm/IR/Instructions.h" 46#include "llvm/IR/IntrinsicInst.h" 47#include "llvm/IR/Intrinsics.h" 48#include "llvm/IR/LLVMContext.h" 49#include "llvm/IR/Module.h" 50#include "llvm/Support/CommandLine.h" 51#include "llvm/Support/Debug.h" 52#include "llvm/Support/ErrorHandling.h" 53#include "llvm/Support/MathExtras.h" 54#include "llvm/Support/raw_ostream.h" 55#include "llvm/Target/TargetFrameLowering.h" 56#include "llvm/Target/TargetInstrInfo.h" 57#include "llvm/Target/TargetIntrinsicInfo.h" 58#include "llvm/Target/TargetLibraryInfo.h" 59#include "llvm/Target/TargetLowering.h" 60#include "llvm/Target/TargetOptions.h" 61#include "llvm/Target/TargetSelectionDAGInfo.h" 62#include <algorithm> 63using namespace llvm; 64 65/// LimitFloatPrecision - Generate low-precision inline sequences for 66/// some float libcalls (6, 8 or 12 bits). 67static unsigned LimitFloatPrecision; 68 69static cl::opt<unsigned, true> 70LimitFPPrecision("limit-float-precision", 71 cl::desc("Generate low-precision inline sequences " 72 "for some float libcalls"), 73 cl::location(LimitFloatPrecision), 74 cl::init(0)); 75 76// Limit the width of DAG chains. This is important in general to prevent 77// prevent DAG-based analysis from blowing up. For example, alias analysis and 78// load clustering may not complete in reasonable time. It is difficult to 79// recognize and avoid this situation within each individual analysis, and 80// future analyses are likely to have the same behavior. Limiting DAG width is 81// the safe approach, and will be especially important with global DAGs. 82// 83// MaxParallelChains default is arbitrarily high to avoid affecting 84// optimization, but could be lowered to improve compile time. Any ld-ld-st-st 85// sequence over this should have been converted to llvm.memcpy by the 86// frontend. It easy to induce this behavior with .ll code such as: 87// %buffer = alloca [4096 x i8] 88// %data = load [4096 x i8]* %argPtr 89// store [4096 x i8] %data, [4096 x i8]* %buffer 90static const unsigned MaxParallelChains = 64; 91 92static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL, 93 const SDValue *Parts, unsigned NumParts, 94 MVT PartVT, EVT ValueVT, const Value *V); 95 96/// getCopyFromParts - Create a value that contains the specified legal parts 97/// combined into the value they represent. If the parts combine to a type 98/// larger then ValueVT then AssertOp can be used to specify whether the extra 99/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT 100/// (ISD::AssertSext). 101static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL, 102 const SDValue *Parts, 103 unsigned NumParts, MVT PartVT, EVT ValueVT, 104 const Value *V, 105 ISD::NodeType AssertOp = ISD::DELETED_NODE) { 106 if (ValueVT.isVector()) 107 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, 108 PartVT, ValueVT, V); 109 110 assert(NumParts > 0 && "No parts to assemble!"); 111 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 112 SDValue Val = Parts[0]; 113 114 if (NumParts > 1) { 115 // Assemble the value from multiple parts. 116 if (ValueVT.isInteger()) { 117 unsigned PartBits = PartVT.getSizeInBits(); 118 unsigned ValueBits = ValueVT.getSizeInBits(); 119 120 // Assemble the power of 2 part. 121 unsigned RoundParts = NumParts & (NumParts - 1) ? 122 1 << Log2_32(NumParts) : NumParts; 123 unsigned RoundBits = PartBits * RoundParts; 124 EVT RoundVT = RoundBits == ValueBits ? 125 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits); 126 SDValue Lo, Hi; 127 128 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2); 129 130 if (RoundParts > 2) { 131 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, 132 PartVT, HalfVT, V); 133 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, 134 RoundParts / 2, PartVT, HalfVT, V); 135 } else { 136 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]); 137 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]); 138 } 139 140 if (TLI.isBigEndian()) 141 std::swap(Lo, Hi); 142 143 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi); 144 145 if (RoundParts < NumParts) { 146 // Assemble the trailing non-power-of-2 part. 147 unsigned OddParts = NumParts - RoundParts; 148 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits); 149 Hi = getCopyFromParts(DAG, DL, 150 Parts + RoundParts, OddParts, PartVT, OddVT, V); 151 152 // Combine the round and odd parts. 153 Lo = Val; 154 if (TLI.isBigEndian()) 155 std::swap(Lo, Hi); 156 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 157 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi); 158 Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi, 159 DAG.getConstant(Lo.getValueType().getSizeInBits(), 160 TLI.getPointerTy())); 161 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo); 162 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi); 163 } 164 } else if (PartVT.isFloatingPoint()) { 165 // FP split into multiple FP parts (for ppcf128) 166 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 && 167 "Unexpected split"); 168 SDValue Lo, Hi; 169 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]); 170 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]); 171 if (TLI.isBigEndian()) 172 std::swap(Lo, Hi); 173 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi); 174 } else { 175 // FP split into integer parts (soft fp) 176 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() && 177 !PartVT.isVector() && "Unexpected split"); 178 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 179 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V); 180 } 181 } 182 183 // There is now one part, held in Val. Correct it to match ValueVT. 184 EVT PartEVT = Val.getValueType(); 185 186 if (PartEVT == ValueVT) 187 return Val; 188 189 if (PartEVT.isInteger() && ValueVT.isInteger()) { 190 if (ValueVT.bitsLT(PartEVT)) { 191 // For a truncate, see if we have any information to 192 // indicate whether the truncated bits will always be 193 // zero or sign-extension. 194 if (AssertOp != ISD::DELETED_NODE) 195 Val = DAG.getNode(AssertOp, DL, PartEVT, Val, 196 DAG.getValueType(ValueVT)); 197 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 198 } 199 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val); 200 } 201 202 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { 203 // FP_ROUND's are always exact here. 204 if (ValueVT.bitsLT(Val.getValueType())) 205 return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val, 206 DAG.getTargetConstant(1, TLI.getPointerTy())); 207 208 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val); 209 } 210 211 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits()) 212 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 213 214 llvm_unreachable("Unknown mismatch!"); 215} 216 217static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, 218 const Twine &ErrMsg) { 219 const Instruction *I = dyn_cast_or_null<Instruction>(V); 220 if (!V) 221 return Ctx.emitError(ErrMsg); 222 223 const char *AsmError = ", possible invalid constraint for vector type"; 224 if (const CallInst *CI = dyn_cast<CallInst>(I)) 225 if (isa<InlineAsm>(CI->getCalledValue())) 226 return Ctx.emitError(I, ErrMsg + AsmError); 227 228 return Ctx.emitError(I, ErrMsg); 229} 230 231/// getCopyFromPartsVector - Create a value that contains the specified legal 232/// parts combined into the value they represent. If the parts combine to a 233/// type larger then ValueVT then AssertOp can be used to specify whether the 234/// extra bits are known to be zero (ISD::AssertZext) or sign extended from 235/// ValueVT (ISD::AssertSext). 236static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL, 237 const SDValue *Parts, unsigned NumParts, 238 MVT PartVT, EVT ValueVT, const Value *V) { 239 assert(ValueVT.isVector() && "Not a vector value"); 240 assert(NumParts > 0 && "No parts to assemble!"); 241 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 242 SDValue Val = Parts[0]; 243 244 // Handle a multi-element vector. 245 if (NumParts > 1) { 246 EVT IntermediateVT; 247 MVT RegisterVT; 248 unsigned NumIntermediates; 249 unsigned NumRegs = 250 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT, 251 NumIntermediates, RegisterVT); 252 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); 253 NumParts = NumRegs; // Silence a compiler warning. 254 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); 255 assert(RegisterVT == Parts[0].getSimpleValueType() && 256 "Part type doesn't match part!"); 257 258 // Assemble the parts into intermediate operands. 259 SmallVector<SDValue, 8> Ops(NumIntermediates); 260 if (NumIntermediates == NumParts) { 261 // If the register was not expanded, truncate or copy the value, 262 // as appropriate. 263 for (unsigned i = 0; i != NumParts; ++i) 264 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, 265 PartVT, IntermediateVT, V); 266 } else if (NumParts > 0) { 267 // If the intermediate type was expanded, build the intermediate 268 // operands from the parts. 269 assert(NumParts % NumIntermediates == 0 && 270 "Must expand into a divisible number of parts!"); 271 unsigned Factor = NumParts / NumIntermediates; 272 for (unsigned i = 0; i != NumIntermediates; ++i) 273 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, 274 PartVT, IntermediateVT, V); 275 } 276 277 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the 278 // intermediate operands. 279 Val = DAG.getNode(IntermediateVT.isVector() ? 280 ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, DL, 281 ValueVT, &Ops[0], NumIntermediates); 282 } 283 284 // There is now one part, held in Val. Correct it to match ValueVT. 285 EVT PartEVT = Val.getValueType(); 286 287 if (PartEVT == ValueVT) 288 return Val; 289 290 if (PartEVT.isVector()) { 291 // If the element type of the source/dest vectors are the same, but the 292 // parts vector has more elements than the value vector, then we have a 293 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the 294 // elements we want. 295 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) { 296 assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() && 297 "Cannot narrow, it would be a lossy transformation"); 298 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val, 299 DAG.getConstant(0, TLI.getVectorIdxTy())); 300 } 301 302 // Vector/Vector bitcast. 303 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) 304 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 305 306 assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() && 307 "Cannot handle this kind of promotion"); 308 // Promoted vector extract 309 bool Smaller = ValueVT.bitsLE(PartEVT); 310 return DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND), 311 DL, ValueVT, Val); 312 313 } 314 315 // Trivial bitcast if the types are the same size and the destination 316 // vector type is legal. 317 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() && 318 TLI.isTypeLegal(ValueVT)) 319 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 320 321 // Handle cases such as i8 -> <1 x i1> 322 if (ValueVT.getVectorNumElements() != 1) { 323 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V, 324 "non-trivial scalar-to-vector conversion"); 325 return DAG.getUNDEF(ValueVT); 326 } 327 328 if (ValueVT.getVectorNumElements() == 1 && 329 ValueVT.getVectorElementType() != PartEVT) { 330 bool Smaller = ValueVT.bitsLE(PartEVT); 331 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND), 332 DL, ValueVT.getScalarType(), Val); 333 } 334 335 return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val); 336} 337 338static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc dl, 339 SDValue Val, SDValue *Parts, unsigned NumParts, 340 MVT PartVT, const Value *V); 341 342/// getCopyToParts - Create a series of nodes that contain the specified value 343/// split into legal parts. If the parts contain more bits than Val, then, for 344/// integers, ExtendKind can be used to specify how to generate the extra bits. 345static void getCopyToParts(SelectionDAG &DAG, SDLoc DL, 346 SDValue Val, SDValue *Parts, unsigned NumParts, 347 MVT PartVT, const Value *V, 348 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) { 349 EVT ValueVT = Val.getValueType(); 350 351 // Handle the vector case separately. 352 if (ValueVT.isVector()) 353 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V); 354 355 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 356 unsigned PartBits = PartVT.getSizeInBits(); 357 unsigned OrigNumParts = NumParts; 358 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!"); 359 360 if (NumParts == 0) 361 return; 362 363 assert(!ValueVT.isVector() && "Vector case handled elsewhere"); 364 EVT PartEVT = PartVT; 365 if (PartEVT == ValueVT) { 366 assert(NumParts == 1 && "No-op copy with multiple parts!"); 367 Parts[0] = Val; 368 return; 369 } 370 371 if (NumParts * PartBits > ValueVT.getSizeInBits()) { 372 // If the parts cover more bits than the value has, promote the value. 373 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { 374 assert(NumParts == 1 && "Do not know what to promote to!"); 375 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val); 376 } else { 377 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && 378 ValueVT.isInteger() && 379 "Unknown mismatch!"); 380 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 381 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val); 382 if (PartVT == MVT::x86mmx) 383 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 384 } 385 } else if (PartBits == ValueVT.getSizeInBits()) { 386 // Different types of the same size. 387 assert(NumParts == 1 && PartEVT != ValueVT); 388 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 389 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) { 390 // If the parts cover less bits than value has, truncate the value. 391 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && 392 ValueVT.isInteger() && 393 "Unknown mismatch!"); 394 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 395 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 396 if (PartVT == MVT::x86mmx) 397 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 398 } 399 400 // The value may have changed - recompute ValueVT. 401 ValueVT = Val.getValueType(); 402 assert(NumParts * PartBits == ValueVT.getSizeInBits() && 403 "Failed to tile the value with PartVT!"); 404 405 if (NumParts == 1) { 406 if (PartEVT != ValueVT) 407 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V, 408 "scalar-to-vector conversion failed"); 409 410 Parts[0] = Val; 411 return; 412 } 413 414 // Expand the value into multiple parts. 415 if (NumParts & (NumParts - 1)) { 416 // The number of parts is not a power of 2. Split off and copy the tail. 417 assert(PartVT.isInteger() && ValueVT.isInteger() && 418 "Do not know what to expand to!"); 419 unsigned RoundParts = 1 << Log2_32(NumParts); 420 unsigned RoundBits = RoundParts * PartBits; 421 unsigned OddParts = NumParts - RoundParts; 422 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val, 423 DAG.getIntPtrConstant(RoundBits)); 424 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V); 425 426 if (TLI.isBigEndian()) 427 // The odd parts were reversed by getCopyToParts - unreverse them. 428 std::reverse(Parts + RoundParts, Parts + NumParts); 429 430 NumParts = RoundParts; 431 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 432 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 433 } 434 435 // The number of parts is a power of 2. Repeatedly bisect the value using 436 // EXTRACT_ELEMENT. 437 Parts[0] = DAG.getNode(ISD::BITCAST, DL, 438 EVT::getIntegerVT(*DAG.getContext(), 439 ValueVT.getSizeInBits()), 440 Val); 441 442 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) { 443 for (unsigned i = 0; i < NumParts; i += StepSize) { 444 unsigned ThisBits = StepSize * PartBits / 2; 445 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits); 446 SDValue &Part0 = Parts[i]; 447 SDValue &Part1 = Parts[i+StepSize/2]; 448 449 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, 450 ThisVT, Part0, DAG.getIntPtrConstant(1)); 451 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, 452 ThisVT, Part0, DAG.getIntPtrConstant(0)); 453 454 if (ThisBits == PartBits && ThisVT != PartVT) { 455 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0); 456 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1); 457 } 458 } 459 } 460 461 if (TLI.isBigEndian()) 462 std::reverse(Parts, Parts + OrigNumParts); 463} 464 465 466/// getCopyToPartsVector - Create a series of nodes that contain the specified 467/// value split into legal parts. 468static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL, 469 SDValue Val, SDValue *Parts, unsigned NumParts, 470 MVT PartVT, const Value *V) { 471 EVT ValueVT = Val.getValueType(); 472 assert(ValueVT.isVector() && "Not a vector"); 473 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 474 475 if (NumParts == 1) { 476 EVT PartEVT = PartVT; 477 if (PartEVT == ValueVT) { 478 // Nothing to do. 479 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) { 480 // Bitconvert vector->vector case. 481 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 482 } else if (PartVT.isVector() && 483 PartEVT.getVectorElementType() == ValueVT.getVectorElementType() && 484 PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) { 485 EVT ElementVT = PartVT.getVectorElementType(); 486 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in 487 // undef elements. 488 SmallVector<SDValue, 16> Ops; 489 for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i) 490 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, 491 ElementVT, Val, DAG.getConstant(i, 492 TLI.getVectorIdxTy()))); 493 494 for (unsigned i = ValueVT.getVectorNumElements(), 495 e = PartVT.getVectorNumElements(); i != e; ++i) 496 Ops.push_back(DAG.getUNDEF(ElementVT)); 497 498 Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, &Ops[0], Ops.size()); 499 500 // FIXME: Use CONCAT for 2x -> 4x. 501 502 //SDValue UndefElts = DAG.getUNDEF(VectorTy); 503 //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts); 504 } else if (PartVT.isVector() && 505 PartEVT.getVectorElementType().bitsGE( 506 ValueVT.getVectorElementType()) && 507 PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) { 508 509 // Promoted vector extract 510 bool Smaller = PartEVT.bitsLE(ValueVT); 511 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND), 512 DL, PartVT, Val); 513 } else{ 514 // Vector -> scalar conversion. 515 assert(ValueVT.getVectorNumElements() == 1 && 516 "Only trivial vector-to-scalar conversions should get here!"); 517 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, 518 PartVT, Val, DAG.getConstant(0, TLI.getVectorIdxTy())); 519 520 bool Smaller = ValueVT.bitsLE(PartVT); 521 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND), 522 DL, PartVT, Val); 523 } 524 525 Parts[0] = Val; 526 return; 527 } 528 529 // Handle a multi-element vector. 530 EVT IntermediateVT; 531 MVT RegisterVT; 532 unsigned NumIntermediates; 533 unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, 534 IntermediateVT, 535 NumIntermediates, RegisterVT); 536 unsigned NumElements = ValueVT.getVectorNumElements(); 537 538 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); 539 NumParts = NumRegs; // Silence a compiler warning. 540 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); 541 542 // Split the vector into intermediate operands. 543 SmallVector<SDValue, 8> Ops(NumIntermediates); 544 for (unsigned i = 0; i != NumIntermediates; ++i) { 545 if (IntermediateVT.isVector()) 546 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, 547 IntermediateVT, Val, 548 DAG.getConstant(i * (NumElements / NumIntermediates), 549 TLI.getVectorIdxTy())); 550 else 551 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, 552 IntermediateVT, Val, 553 DAG.getConstant(i, TLI.getVectorIdxTy())); 554 } 555 556 // Split the intermediate operands into legal parts. 557 if (NumParts == NumIntermediates) { 558 // If the register was not expanded, promote or copy the value, 559 // as appropriate. 560 for (unsigned i = 0; i != NumParts; ++i) 561 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V); 562 } else if (NumParts > 0) { 563 // If the intermediate type was expanded, split each the value into 564 // legal parts. 565 assert(NumParts % NumIntermediates == 0 && 566 "Must expand into a divisible number of parts!"); 567 unsigned Factor = NumParts / NumIntermediates; 568 for (unsigned i = 0; i != NumIntermediates; ++i) 569 getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V); 570 } 571} 572 573namespace { 574 /// RegsForValue - This struct represents the registers (physical or virtual) 575 /// that a particular set of values is assigned, and the type information 576 /// about the value. The most common situation is to represent one value at a 577 /// time, but struct or array values are handled element-wise as multiple 578 /// values. The splitting of aggregates is performed recursively, so that we 579 /// never have aggregate-typed registers. The values at this point do not 580 /// necessarily have legal types, so each value may require one or more 581 /// registers of some legal type. 582 /// 583 struct RegsForValue { 584 /// ValueVTs - The value types of the values, which may not be legal, and 585 /// may need be promoted or synthesized from one or more registers. 586 /// 587 SmallVector<EVT, 4> ValueVTs; 588 589 /// RegVTs - The value types of the registers. This is the same size as 590 /// ValueVTs and it records, for each value, what the type of the assigned 591 /// register or registers are. (Individual values are never synthesized 592 /// from more than one type of register.) 593 /// 594 /// With virtual registers, the contents of RegVTs is redundant with TLI's 595 /// getRegisterType member function, however when with physical registers 596 /// it is necessary to have a separate record of the types. 597 /// 598 SmallVector<MVT, 4> RegVTs; 599 600 /// Regs - This list holds the registers assigned to the values. 601 /// Each legal or promoted value requires one register, and each 602 /// expanded value requires multiple registers. 603 /// 604 SmallVector<unsigned, 4> Regs; 605 606 RegsForValue() {} 607 608 RegsForValue(const SmallVector<unsigned, 4> ®s, 609 MVT regvt, EVT valuevt) 610 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {} 611 612 RegsForValue(LLVMContext &Context, const TargetLowering &tli, 613 unsigned Reg, Type *Ty) { 614 ComputeValueVTs(tli, Ty, ValueVTs); 615 616 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) { 617 EVT ValueVT = ValueVTs[Value]; 618 unsigned NumRegs = tli.getNumRegisters(Context, ValueVT); 619 MVT RegisterVT = tli.getRegisterType(Context, ValueVT); 620 for (unsigned i = 0; i != NumRegs; ++i) 621 Regs.push_back(Reg + i); 622 RegVTs.push_back(RegisterVT); 623 Reg += NumRegs; 624 } 625 } 626 627 /// append - Add the specified values to this one. 628 void append(const RegsForValue &RHS) { 629 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end()); 630 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end()); 631 Regs.append(RHS.Regs.begin(), RHS.Regs.end()); 632 } 633 634 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from 635 /// this value and returns the result as a ValueVTs value. This uses 636 /// Chain/Flag as the input and updates them for the output Chain/Flag. 637 /// If the Flag pointer is NULL, no flag is used. 638 SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, 639 SDLoc dl, 640 SDValue &Chain, SDValue *Flag, 641 const Value *V = 0) const; 642 643 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 644 /// specified value into the registers specified by this object. This uses 645 /// Chain/Flag as the input and updates them for the output Chain/Flag. 646 /// If the Flag pointer is NULL, no flag is used. 647 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl, 648 SDValue &Chain, SDValue *Flag, const Value *V) const; 649 650 /// AddInlineAsmOperands - Add this value to the specified inlineasm node 651 /// operand list. This adds the code marker, matching input operand index 652 /// (if applicable), and includes the number of values added into it. 653 void AddInlineAsmOperands(unsigned Kind, 654 bool HasMatching, unsigned MatchingIdx, 655 SelectionDAG &DAG, 656 std::vector<SDValue> &Ops) const; 657 }; 658} 659 660/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from 661/// this value and returns the result as a ValueVT value. This uses 662/// Chain/Flag as the input and updates them for the output Chain/Flag. 663/// If the Flag pointer is NULL, no flag is used. 664SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 665 FunctionLoweringInfo &FuncInfo, 666 SDLoc dl, 667 SDValue &Chain, SDValue *Flag, 668 const Value *V) const { 669 // A Value with type {} or [0 x %t] needs no registers. 670 if (ValueVTs.empty()) 671 return SDValue(); 672 673 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 674 675 // Assemble the legal parts into the final values. 676 SmallVector<SDValue, 4> Values(ValueVTs.size()); 677 SmallVector<SDValue, 8> Parts; 678 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { 679 // Copy the legal parts from the registers. 680 EVT ValueVT = ValueVTs[Value]; 681 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT); 682 MVT RegisterVT = RegVTs[Value]; 683 684 Parts.resize(NumRegs); 685 for (unsigned i = 0; i != NumRegs; ++i) { 686 SDValue P; 687 if (Flag == 0) { 688 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT); 689 } else { 690 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag); 691 *Flag = P.getValue(2); 692 } 693 694 Chain = P.getValue(1); 695 Parts[i] = P; 696 697 // If the source register was virtual and if we know something about it, 698 // add an assert node. 699 if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) || 700 !RegisterVT.isInteger() || RegisterVT.isVector()) 701 continue; 702 703 const FunctionLoweringInfo::LiveOutInfo *LOI = 704 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]); 705 if (!LOI) 706 continue; 707 708 unsigned RegSize = RegisterVT.getSizeInBits(); 709 unsigned NumSignBits = LOI->NumSignBits; 710 unsigned NumZeroBits = LOI->KnownZero.countLeadingOnes(); 711 712 if (NumZeroBits == RegSize) { 713 // The current value is a zero. 714 // Explicitly express that as it would be easier for 715 // optimizations to kick in. 716 Parts[i] = DAG.getConstant(0, RegisterVT); 717 continue; 718 } 719 720 // FIXME: We capture more information than the dag can represent. For 721 // now, just use the tightest assertzext/assertsext possible. 722 bool isSExt = true; 723 EVT FromVT(MVT::Other); 724 if (NumSignBits == RegSize) 725 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1 726 else if (NumZeroBits >= RegSize-1) 727 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1 728 else if (NumSignBits > RegSize-8) 729 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8 730 else if (NumZeroBits >= RegSize-8) 731 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8 732 else if (NumSignBits > RegSize-16) 733 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16 734 else if (NumZeroBits >= RegSize-16) 735 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16 736 else if (NumSignBits > RegSize-32) 737 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32 738 else if (NumZeroBits >= RegSize-32) 739 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32 740 else 741 continue; 742 743 // Add an assertion node. 744 assert(FromVT != MVT::Other); 745 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl, 746 RegisterVT, P, DAG.getValueType(FromVT)); 747 } 748 749 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), 750 NumRegs, RegisterVT, ValueVT, V); 751 Part += NumRegs; 752 Parts.clear(); 753 } 754 755 return DAG.getNode(ISD::MERGE_VALUES, dl, 756 DAG.getVTList(&ValueVTs[0], ValueVTs.size()), 757 &Values[0], ValueVTs.size()); 758} 759 760/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 761/// specified value into the registers specified by this object. This uses 762/// Chain/Flag as the input and updates them for the output Chain/Flag. 763/// If the Flag pointer is NULL, no flag is used. 764void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl, 765 SDValue &Chain, SDValue *Flag, 766 const Value *V) const { 767 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 768 769 // Get the list of the values's legal parts. 770 unsigned NumRegs = Regs.size(); 771 SmallVector<SDValue, 8> Parts(NumRegs); 772 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { 773 EVT ValueVT = ValueVTs[Value]; 774 unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT); 775 MVT RegisterVT = RegVTs[Value]; 776 ISD::NodeType ExtendKind = 777 TLI.isZExtFree(Val, RegisterVT)? ISD::ZERO_EXTEND: ISD::ANY_EXTEND; 778 779 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), 780 &Parts[Part], NumParts, RegisterVT, V, ExtendKind); 781 Part += NumParts; 782 } 783 784 // Copy the parts into the registers. 785 SmallVector<SDValue, 8> Chains(NumRegs); 786 for (unsigned i = 0; i != NumRegs; ++i) { 787 SDValue Part; 788 if (Flag == 0) { 789 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]); 790 } else { 791 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag); 792 *Flag = Part.getValue(1); 793 } 794 795 Chains[i] = Part.getValue(0); 796 } 797 798 if (NumRegs == 1 || Flag) 799 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is 800 // flagged to it. That is the CopyToReg nodes and the user are considered 801 // a single scheduling unit. If we create a TokenFactor and return it as 802 // chain, then the TokenFactor is both a predecessor (operand) of the 803 // user as well as a successor (the TF operands are flagged to the user). 804 // c1, f1 = CopyToReg 805 // c2, f2 = CopyToReg 806 // c3 = TokenFactor c1, c2 807 // ... 808 // = op c3, ..., f2 809 Chain = Chains[NumRegs-1]; 810 else 811 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs); 812} 813 814/// AddInlineAsmOperands - Add this value to the specified inlineasm node 815/// operand list. This adds the code marker and includes the number of 816/// values added into it. 817void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching, 818 unsigned MatchingIdx, 819 SelectionDAG &DAG, 820 std::vector<SDValue> &Ops) const { 821 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 822 823 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size()); 824 if (HasMatching) 825 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx); 826 else if (!Regs.empty() && 827 TargetRegisterInfo::isVirtualRegister(Regs.front())) { 828 // Put the register class of the virtual registers in the flag word. That 829 // way, later passes can recompute register class constraints for inline 830 // assembly as well as normal instructions. 831 // Don't do this for tied operands that can use the regclass information 832 // from the def. 833 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 834 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front()); 835 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID()); 836 } 837 838 SDValue Res = DAG.getTargetConstant(Flag, MVT::i32); 839 Ops.push_back(Res); 840 841 unsigned SP = TLI.getStackPointerRegisterToSaveRestore(); 842 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { 843 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]); 844 MVT RegisterVT = RegVTs[Value]; 845 for (unsigned i = 0; i != NumRegs; ++i) { 846 assert(Reg < Regs.size() && "Mismatch in # registers expected"); 847 unsigned TheReg = Regs[Reg++]; 848 Ops.push_back(DAG.getRegister(TheReg, RegisterVT)); 849 850 if (TheReg == SP && Code == InlineAsm::Kind_Clobber) { 851 // If we clobbered the stack pointer, MFI should know about it. 852 assert(DAG.getMachineFunction().getFrameInfo()-> 853 hasInlineAsmWithSPAdjust()); 854 } 855 } 856 } 857} 858 859void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa, 860 const TargetLibraryInfo *li) { 861 AA = &aa; 862 GFI = gfi; 863 LibInfo = li; 864 DL = DAG.getTarget().getDataLayout(); 865 Context = DAG.getContext(); 866 LPadToCallSiteMap.clear(); 867} 868 869/// clear - Clear out the current SelectionDAG and the associated 870/// state and prepare this SelectionDAGBuilder object to be used 871/// for a new block. This doesn't clear out information about 872/// additional blocks that are needed to complete switch lowering 873/// or PHI node updating; that information is cleared out as it is 874/// consumed. 875void SelectionDAGBuilder::clear() { 876 NodeMap.clear(); 877 UnusedArgNodeMap.clear(); 878 PendingLoads.clear(); 879 PendingExports.clear(); 880 CurInst = NULL; 881 HasTailCall = false; 882 SDNodeOrder = LowestSDNodeOrder; 883} 884 885/// clearDanglingDebugInfo - Clear the dangling debug information 886/// map. This function is separated from the clear so that debug 887/// information that is dangling in a basic block can be properly 888/// resolved in a different basic block. This allows the 889/// SelectionDAG to resolve dangling debug information attached 890/// to PHI nodes. 891void SelectionDAGBuilder::clearDanglingDebugInfo() { 892 DanglingDebugInfoMap.clear(); 893} 894 895/// getRoot - Return the current virtual root of the Selection DAG, 896/// flushing any PendingLoad items. This must be done before emitting 897/// a store or any other node that may need to be ordered after any 898/// prior load instructions. 899/// 900SDValue SelectionDAGBuilder::getRoot() { 901 if (PendingLoads.empty()) 902 return DAG.getRoot(); 903 904 if (PendingLoads.size() == 1) { 905 SDValue Root = PendingLoads[0]; 906 DAG.setRoot(Root); 907 PendingLoads.clear(); 908 return Root; 909 } 910 911 // Otherwise, we have to make a token factor node. 912 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, 913 &PendingLoads[0], PendingLoads.size()); 914 PendingLoads.clear(); 915 DAG.setRoot(Root); 916 return Root; 917} 918 919/// getControlRoot - Similar to getRoot, but instead of flushing all the 920/// PendingLoad items, flush all the PendingExports items. It is necessary 921/// to do this before emitting a terminator instruction. 922/// 923SDValue SelectionDAGBuilder::getControlRoot() { 924 SDValue Root = DAG.getRoot(); 925 926 if (PendingExports.empty()) 927 return Root; 928 929 // Turn all of the CopyToReg chains into one factored node. 930 if (Root.getOpcode() != ISD::EntryToken) { 931 unsigned i = 0, e = PendingExports.size(); 932 for (; i != e; ++i) { 933 assert(PendingExports[i].getNode()->getNumOperands() > 1); 934 if (PendingExports[i].getNode()->getOperand(0) == Root) 935 break; // Don't add the root if we already indirectly depend on it. 936 } 937 938 if (i == e) 939 PendingExports.push_back(Root); 940 } 941 942 Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, 943 &PendingExports[0], 944 PendingExports.size()); 945 PendingExports.clear(); 946 DAG.setRoot(Root); 947 return Root; 948} 949 950void SelectionDAGBuilder::visit(const Instruction &I) { 951 // Set up outgoing PHI node register values before emitting the terminator. 952 if (isa<TerminatorInst>(&I)) 953 HandlePHINodesInSuccessorBlocks(I.getParent()); 954 955 ++SDNodeOrder; 956 957 CurInst = &I; 958 959 visit(I.getOpcode(), I); 960 961 if (!isa<TerminatorInst>(&I) && !HasTailCall) 962 CopyToExportRegsIfNeeded(&I); 963 964 CurInst = NULL; 965} 966 967void SelectionDAGBuilder::visitPHI(const PHINode &) { 968 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!"); 969} 970 971void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) { 972 // Note: this doesn't use InstVisitor, because it has to work with 973 // ConstantExpr's in addition to instructions. 974 switch (Opcode) { 975 default: llvm_unreachable("Unknown instruction type encountered!"); 976 // Build the switch statement using the Instruction.def file. 977#define HANDLE_INST(NUM, OPCODE, CLASS) \ 978 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break; 979#include "llvm/IR/Instruction.def" 980 } 981} 982 983// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V, 984// generate the debug data structures now that we've seen its definition. 985void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V, 986 SDValue Val) { 987 DanglingDebugInfo &DDI = DanglingDebugInfoMap[V]; 988 if (DDI.getDI()) { 989 const DbgValueInst *DI = DDI.getDI(); 990 DebugLoc dl = DDI.getdl(); 991 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder(); 992 MDNode *Variable = DI->getVariable(); 993 uint64_t Offset = DI->getOffset(); 994 SDDbgValue *SDV; 995 if (Val.getNode()) { 996 if (!EmitFuncArgumentDbgValue(V, Variable, Offset, Val)) { 997 SDV = DAG.getDbgValue(Variable, Val.getNode(), 998 Val.getResNo(), Offset, dl, DbgSDNodeOrder); 999 DAG.AddDbgValue(SDV, Val.getNode(), false); 1000 } 1001 } else 1002 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1003 DanglingDebugInfoMap[V] = DanglingDebugInfo(); 1004 } 1005} 1006 1007/// getValue - Return an SDValue for the given Value. 1008SDValue SelectionDAGBuilder::getValue(const Value *V) { 1009 // If we already have an SDValue for this value, use it. It's important 1010 // to do this first, so that we don't create a CopyFromReg if we already 1011 // have a regular SDValue. 1012 SDValue &N = NodeMap[V]; 1013 if (N.getNode()) return N; 1014 1015 // If there's a virtual register allocated and initialized for this 1016 // value, use it. 1017 DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V); 1018 if (It != FuncInfo.ValueMap.end()) { 1019 unsigned InReg = It->second; 1020 RegsForValue RFV(*DAG.getContext(), *TM.getTargetLowering(), 1021 InReg, V->getType()); 1022 SDValue Chain = DAG.getEntryNode(); 1023 N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, NULL, V); 1024 resolveDanglingDebugInfo(V, N); 1025 return N; 1026 } 1027 1028 // Otherwise create a new SDValue and remember it. 1029 SDValue Val = getValueImpl(V); 1030 NodeMap[V] = Val; 1031 resolveDanglingDebugInfo(V, Val); 1032 return Val; 1033} 1034 1035/// getNonRegisterValue - Return an SDValue for the given Value, but 1036/// don't look in FuncInfo.ValueMap for a virtual register. 1037SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) { 1038 // If we already have an SDValue for this value, use it. 1039 SDValue &N = NodeMap[V]; 1040 if (N.getNode()) return N; 1041 1042 // Otherwise create a new SDValue and remember it. 1043 SDValue Val = getValueImpl(V); 1044 NodeMap[V] = Val; 1045 resolveDanglingDebugInfo(V, Val); 1046 return Val; 1047} 1048 1049/// getValueImpl - Helper function for getValue and getNonRegisterValue. 1050/// Create an SDValue for the given value. 1051SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { 1052 const TargetLowering *TLI = TM.getTargetLowering(); 1053 1054 if (const Constant *C = dyn_cast<Constant>(V)) { 1055 EVT VT = TLI->getValueType(V->getType(), true); 1056 1057 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1058 return DAG.getConstant(*CI, VT); 1059 1060 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 1061 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT); 1062 1063 if (isa<ConstantPointerNull>(C)) { 1064 unsigned AS = V->getType()->getPointerAddressSpace(); 1065 return DAG.getConstant(0, TLI->getPointerTy(AS)); 1066 } 1067 1068 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 1069 return DAG.getConstantFP(*CFP, VT); 1070 1071 if (isa<UndefValue>(C) && !V->getType()->isAggregateType()) 1072 return DAG.getUNDEF(VT); 1073 1074 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 1075 visit(CE->getOpcode(), *CE); 1076 SDValue N1 = NodeMap[V]; 1077 assert(N1.getNode() && "visit didn't populate the NodeMap!"); 1078 return N1; 1079 } 1080 1081 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) { 1082 SmallVector<SDValue, 4> Constants; 1083 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end(); 1084 OI != OE; ++OI) { 1085 SDNode *Val = getValue(*OI).getNode(); 1086 // If the operand is an empty aggregate, there are no values. 1087 if (!Val) continue; 1088 // Add each leaf value from the operand to the Constants list 1089 // to form a flattened list of all the values. 1090 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) 1091 Constants.push_back(SDValue(Val, i)); 1092 } 1093 1094 return DAG.getMergeValues(&Constants[0], Constants.size(), 1095 getCurSDLoc()); 1096 } 1097 1098 if (const ConstantDataSequential *CDS = 1099 dyn_cast<ConstantDataSequential>(C)) { 1100 SmallVector<SDValue, 4> Ops; 1101 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1102 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode(); 1103 // Add each leaf value from the operand to the Constants list 1104 // to form a flattened list of all the values. 1105 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) 1106 Ops.push_back(SDValue(Val, i)); 1107 } 1108 1109 if (isa<ArrayType>(CDS->getType())) 1110 return DAG.getMergeValues(&Ops[0], Ops.size(), getCurSDLoc()); 1111 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), 1112 VT, &Ops[0], Ops.size()); 1113 } 1114 1115 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) { 1116 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && 1117 "Unknown struct or array constant!"); 1118 1119 SmallVector<EVT, 4> ValueVTs; 1120 ComputeValueVTs(*TLI, C->getType(), ValueVTs); 1121 unsigned NumElts = ValueVTs.size(); 1122 if (NumElts == 0) 1123 return SDValue(); // empty struct 1124 SmallVector<SDValue, 4> Constants(NumElts); 1125 for (unsigned i = 0; i != NumElts; ++i) { 1126 EVT EltVT = ValueVTs[i]; 1127 if (isa<UndefValue>(C)) 1128 Constants[i] = DAG.getUNDEF(EltVT); 1129 else if (EltVT.isFloatingPoint()) 1130 Constants[i] = DAG.getConstantFP(0, EltVT); 1131 else 1132 Constants[i] = DAG.getConstant(0, EltVT); 1133 } 1134 1135 return DAG.getMergeValues(&Constants[0], NumElts, 1136 getCurSDLoc()); 1137 } 1138 1139 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C)) 1140 return DAG.getBlockAddress(BA, VT); 1141 1142 VectorType *VecTy = cast<VectorType>(V->getType()); 1143 unsigned NumElements = VecTy->getNumElements(); 1144 1145 // Now that we know the number and type of the elements, get that number of 1146 // elements into the Ops array based on what kind of constant it is. 1147 SmallVector<SDValue, 16> Ops; 1148 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) { 1149 for (unsigned i = 0; i != NumElements; ++i) 1150 Ops.push_back(getValue(CV->getOperand(i))); 1151 } else { 1152 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!"); 1153 EVT EltVT = TLI->getValueType(VecTy->getElementType()); 1154 1155 SDValue Op; 1156 if (EltVT.isFloatingPoint()) 1157 Op = DAG.getConstantFP(0, EltVT); 1158 else 1159 Op = DAG.getConstant(0, EltVT); 1160 Ops.assign(NumElements, Op); 1161 } 1162 1163 // Create a BUILD_VECTOR node. 1164 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), 1165 VT, &Ops[0], Ops.size()); 1166 } 1167 1168 // If this is a static alloca, generate it as the frameindex instead of 1169 // computation. 1170 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1171 DenseMap<const AllocaInst*, int>::iterator SI = 1172 FuncInfo.StaticAllocaMap.find(AI); 1173 if (SI != FuncInfo.StaticAllocaMap.end()) 1174 return DAG.getFrameIndex(SI->second, TLI->getPointerTy()); 1175 } 1176 1177 // If this is an instruction which fast-isel has deferred, select it now. 1178 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 1179 unsigned InReg = FuncInfo.InitializeRegForValue(Inst); 1180 RegsForValue RFV(*DAG.getContext(), *TLI, InReg, Inst->getType()); 1181 SDValue Chain = DAG.getEntryNode(); 1182 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, NULL, V); 1183 } 1184 1185 llvm_unreachable("Can't get register for value!"); 1186} 1187 1188void SelectionDAGBuilder::visitRet(const ReturnInst &I) { 1189 const TargetLowering *TLI = TM.getTargetLowering(); 1190 SDValue Chain = getControlRoot(); 1191 SmallVector<ISD::OutputArg, 8> Outs; 1192 SmallVector<SDValue, 8> OutVals; 1193 1194 if (!FuncInfo.CanLowerReturn) { 1195 unsigned DemoteReg = FuncInfo.DemoteRegister; 1196 const Function *F = I.getParent()->getParent(); 1197 1198 // Emit a store of the return value through the virtual register. 1199 // Leave Outs empty so that LowerReturn won't try to load return 1200 // registers the usual way. 1201 SmallVector<EVT, 1> PtrValueVTs; 1202 ComputeValueVTs(*TLI, PointerType::getUnqual(F->getReturnType()), 1203 PtrValueVTs); 1204 1205 SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]); 1206 SDValue RetOp = getValue(I.getOperand(0)); 1207 1208 SmallVector<EVT, 4> ValueVTs; 1209 SmallVector<uint64_t, 4> Offsets; 1210 ComputeValueVTs(*TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets); 1211 unsigned NumValues = ValueVTs.size(); 1212 1213 SmallVector<SDValue, 4> Chains(NumValues); 1214 for (unsigned i = 0; i != NumValues; ++i) { 1215 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(), 1216 RetPtr.getValueType(), RetPtr, 1217 DAG.getIntPtrConstant(Offsets[i])); 1218 Chains[i] = 1219 DAG.getStore(Chain, getCurSDLoc(), 1220 SDValue(RetOp.getNode(), RetOp.getResNo() + i), 1221 // FIXME: better loc info would be nice. 1222 Add, MachinePointerInfo(), false, false, 0); 1223 } 1224 1225 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), 1226 MVT::Other, &Chains[0], NumValues); 1227 } else if (I.getNumOperands() != 0) { 1228 SmallVector<EVT, 4> ValueVTs; 1229 ComputeValueVTs(*TLI, I.getOperand(0)->getType(), ValueVTs); 1230 unsigned NumValues = ValueVTs.size(); 1231 if (NumValues) { 1232 SDValue RetOp = getValue(I.getOperand(0)); 1233 for (unsigned j = 0, f = NumValues; j != f; ++j) { 1234 EVT VT = ValueVTs[j]; 1235 1236 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1237 1238 const Function *F = I.getParent()->getParent(); 1239 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 1240 Attribute::SExt)) 1241 ExtendKind = ISD::SIGN_EXTEND; 1242 else if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 1243 Attribute::ZExt)) 1244 ExtendKind = ISD::ZERO_EXTEND; 1245 1246 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) 1247 VT = TLI->getTypeForExtArgOrReturn(VT.getSimpleVT(), ExtendKind); 1248 1249 unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), VT); 1250 MVT PartVT = TLI->getRegisterType(*DAG.getContext(), VT); 1251 SmallVector<SDValue, 4> Parts(NumParts); 1252 getCopyToParts(DAG, getCurSDLoc(), 1253 SDValue(RetOp.getNode(), RetOp.getResNo() + j), 1254 &Parts[0], NumParts, PartVT, &I, ExtendKind); 1255 1256 // 'inreg' on function refers to return value 1257 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1258 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 1259 Attribute::InReg)) 1260 Flags.setInReg(); 1261 1262 // Propagate extension type if any 1263 if (ExtendKind == ISD::SIGN_EXTEND) 1264 Flags.setSExt(); 1265 else if (ExtendKind == ISD::ZERO_EXTEND) 1266 Flags.setZExt(); 1267 1268 for (unsigned i = 0; i < NumParts; ++i) { 1269 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(), 1270 VT, /*isfixed=*/true, 0, 0)); 1271 OutVals.push_back(Parts[i]); 1272 } 1273 } 1274 } 1275 } 1276 1277 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 1278 CallingConv::ID CallConv = 1279 DAG.getMachineFunction().getFunction()->getCallingConv(); 1280 Chain = TM.getTargetLowering()->LowerReturn(Chain, CallConv, isVarArg, 1281 Outs, OutVals, getCurSDLoc(), 1282 DAG); 1283 1284 // Verify that the target's LowerReturn behaved as expected. 1285 assert(Chain.getNode() && Chain.getValueType() == MVT::Other && 1286 "LowerReturn didn't return a valid chain!"); 1287 1288 // Update the DAG with the new chain value resulting from return lowering. 1289 DAG.setRoot(Chain); 1290} 1291 1292/// CopyToExportRegsIfNeeded - If the given value has virtual registers 1293/// created for it, emit nodes to copy the value into the virtual 1294/// registers. 1295void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) { 1296 // Skip empty types 1297 if (V->getType()->isEmptyTy()) 1298 return; 1299 1300 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V); 1301 if (VMI != FuncInfo.ValueMap.end()) { 1302 assert(!V->use_empty() && "Unused value assigned virtual registers!"); 1303 CopyValueToVirtualRegister(V, VMI->second); 1304 } 1305} 1306 1307/// ExportFromCurrentBlock - If this condition isn't known to be exported from 1308/// the current basic block, add it to ValueMap now so that we'll get a 1309/// CopyTo/FromReg. 1310void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) { 1311 // No need to export constants. 1312 if (!isa<Instruction>(V) && !isa<Argument>(V)) return; 1313 1314 // Already exported? 1315 if (FuncInfo.isExportedInst(V)) return; 1316 1317 unsigned Reg = FuncInfo.InitializeRegForValue(V); 1318 CopyValueToVirtualRegister(V, Reg); 1319} 1320 1321bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V, 1322 const BasicBlock *FromBB) { 1323 // The operands of the setcc have to be in this block. We don't know 1324 // how to export them from some other block. 1325 if (const Instruction *VI = dyn_cast<Instruction>(V)) { 1326 // Can export from current BB. 1327 if (VI->getParent() == FromBB) 1328 return true; 1329 1330 // Is already exported, noop. 1331 return FuncInfo.isExportedInst(V); 1332 } 1333 1334 // If this is an argument, we can export it if the BB is the entry block or 1335 // if it is already exported. 1336 if (isa<Argument>(V)) { 1337 if (FromBB == &FromBB->getParent()->getEntryBlock()) 1338 return true; 1339 1340 // Otherwise, can only export this if it is already exported. 1341 return FuncInfo.isExportedInst(V); 1342 } 1343 1344 // Otherwise, constants can always be exported. 1345 return true; 1346} 1347 1348/// Return branch probability calculated by BranchProbabilityInfo for IR blocks. 1349uint32_t SelectionDAGBuilder::getEdgeWeight(const MachineBasicBlock *Src, 1350 const MachineBasicBlock *Dst) const { 1351 BranchProbabilityInfo *BPI = FuncInfo.BPI; 1352 if (!BPI) 1353 return 0; 1354 const BasicBlock *SrcBB = Src->getBasicBlock(); 1355 const BasicBlock *DstBB = Dst->getBasicBlock(); 1356 return BPI->getEdgeWeight(SrcBB, DstBB); 1357} 1358 1359void SelectionDAGBuilder:: 1360addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst, 1361 uint32_t Weight /* = 0 */) { 1362 if (!Weight) 1363 Weight = getEdgeWeight(Src, Dst); 1364 Src->addSuccessor(Dst, Weight); 1365} 1366 1367 1368static bool InBlock(const Value *V, const BasicBlock *BB) { 1369 if (const Instruction *I = dyn_cast<Instruction>(V)) 1370 return I->getParent() == BB; 1371 return true; 1372} 1373 1374/// EmitBranchForMergedCondition - Helper method for FindMergedConditions. 1375/// This function emits a branch and is used at the leaves of an OR or an 1376/// AND operator tree. 1377/// 1378void 1379SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond, 1380 MachineBasicBlock *TBB, 1381 MachineBasicBlock *FBB, 1382 MachineBasicBlock *CurBB, 1383 MachineBasicBlock *SwitchBB, 1384 uint32_t TWeight, 1385 uint32_t FWeight) { 1386 const BasicBlock *BB = CurBB->getBasicBlock(); 1387 1388 // If the leaf of the tree is a comparison, merge the condition into 1389 // the caseblock. 1390 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) { 1391 // The operands of the cmp have to be in this block. We don't know 1392 // how to export them from some other block. If this is the first block 1393 // of the sequence, no exporting is needed. 1394 if (CurBB == SwitchBB || 1395 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) && 1396 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) { 1397 ISD::CondCode Condition; 1398 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) { 1399 Condition = getICmpCondCode(IC->getPredicate()); 1400 } else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) { 1401 Condition = getFCmpCondCode(FC->getPredicate()); 1402 if (TM.Options.NoNaNsFPMath) 1403 Condition = getFCmpCodeWithoutNaN(Condition); 1404 } else { 1405 Condition = ISD::SETEQ; // silence warning. 1406 llvm_unreachable("Unknown compare instruction"); 1407 } 1408 1409 CaseBlock CB(Condition, BOp->getOperand(0), 1410 BOp->getOperand(1), NULL, TBB, FBB, CurBB, TWeight, FWeight); 1411 SwitchCases.push_back(CB); 1412 return; 1413 } 1414 } 1415 1416 // Create a CaseBlock record representing this branch. 1417 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()), 1418 NULL, TBB, FBB, CurBB, TWeight, FWeight); 1419 SwitchCases.push_back(CB); 1420} 1421 1422/// Scale down both weights to fit into uint32_t. 1423static void ScaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 1424 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 1425 uint32_t Scale = (NewMax / UINT32_MAX) + 1; 1426 NewTrue = NewTrue / Scale; 1427 NewFalse = NewFalse / Scale; 1428} 1429 1430/// FindMergedConditions - If Cond is an expression like 1431void SelectionDAGBuilder::FindMergedConditions(const Value *Cond, 1432 MachineBasicBlock *TBB, 1433 MachineBasicBlock *FBB, 1434 MachineBasicBlock *CurBB, 1435 MachineBasicBlock *SwitchBB, 1436 unsigned Opc, uint32_t TWeight, 1437 uint32_t FWeight) { 1438 // If this node is not part of the or/and tree, emit it as a branch. 1439 const Instruction *BOp = dyn_cast<Instruction>(Cond); 1440 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) || 1441 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() || 1442 BOp->getParent() != CurBB->getBasicBlock() || 1443 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) || 1444 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) { 1445 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, 1446 TWeight, FWeight); 1447 return; 1448 } 1449 1450 // Create TmpBB after CurBB. 1451 MachineFunction::iterator BBI = CurBB; 1452 MachineFunction &MF = DAG.getMachineFunction(); 1453 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock()); 1454 CurBB->getParent()->insert(++BBI, TmpBB); 1455 1456 if (Opc == Instruction::Or) { 1457 // Codegen X | Y as: 1458 // BB1: 1459 // jmp_if_X TBB 1460 // jmp TmpBB 1461 // TmpBB: 1462 // jmp_if_Y TBB 1463 // jmp FBB 1464 // 1465 1466 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 1467 // The requirement is that 1468 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 1469 // = TrueProb for orignal BB. 1470 // Assuming the orignal weights are A and B, one choice is to set BB1's 1471 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 1472 // assumes that 1473 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 1474 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 1475 // TmpBB, but the math is more complicated. 1476 1477 uint64_t NewTrueWeight = TWeight; 1478 uint64_t NewFalseWeight = (uint64_t)TWeight + 2 * (uint64_t)FWeight; 1479 ScaleWeights(NewTrueWeight, NewFalseWeight); 1480 // Emit the LHS condition. 1481 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc, 1482 NewTrueWeight, NewFalseWeight); 1483 1484 NewTrueWeight = TWeight; 1485 NewFalseWeight = 2 * (uint64_t)FWeight; 1486 ScaleWeights(NewTrueWeight, NewFalseWeight); 1487 // Emit the RHS condition into TmpBB. 1488 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc, 1489 NewTrueWeight, NewFalseWeight); 1490 } else { 1491 assert(Opc == Instruction::And && "Unknown merge op!"); 1492 // Codegen X & Y as: 1493 // BB1: 1494 // jmp_if_X TmpBB 1495 // jmp FBB 1496 // TmpBB: 1497 // jmp_if_Y TBB 1498 // jmp FBB 1499 // 1500 // This requires creation of TmpBB after CurBB. 1501 1502 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 1503 // The requirement is that 1504 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 1505 // = FalseProb for orignal BB. 1506 // Assuming the orignal weights are A and B, one choice is to set BB1's 1507 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 1508 // assumes that 1509 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 1510 1511 uint64_t NewTrueWeight = 2 * (uint64_t)TWeight + (uint64_t)FWeight; 1512 uint64_t NewFalseWeight = FWeight; 1513 ScaleWeights(NewTrueWeight, NewFalseWeight); 1514 // Emit the LHS condition. 1515 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc, 1516 NewTrueWeight, NewFalseWeight); 1517 1518 NewTrueWeight = 2 * (uint64_t)TWeight; 1519 NewFalseWeight = FWeight; 1520 ScaleWeights(NewTrueWeight, NewFalseWeight); 1521 // Emit the RHS condition into TmpBB. 1522 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc, 1523 NewTrueWeight, NewFalseWeight); 1524 } 1525} 1526 1527/// If the set of cases should be emitted as a series of branches, return true. 1528/// If we should emit this as a bunch of and/or'd together conditions, return 1529/// false. 1530bool 1531SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) { 1532 if (Cases.size() != 2) return true; 1533 1534 // If this is two comparisons of the same values or'd or and'd together, they 1535 // will get folded into a single comparison, so don't emit two blocks. 1536 if ((Cases[0].CmpLHS == Cases[1].CmpLHS && 1537 Cases[0].CmpRHS == Cases[1].CmpRHS) || 1538 (Cases[0].CmpRHS == Cases[1].CmpLHS && 1539 Cases[0].CmpLHS == Cases[1].CmpRHS)) { 1540 return false; 1541 } 1542 1543 // Handle: (X != null) | (Y != null) --> (X|Y) != 0 1544 // Handle: (X == null) & (Y == null) --> (X|Y) == 0 1545 if (Cases[0].CmpRHS == Cases[1].CmpRHS && 1546 Cases[0].CC == Cases[1].CC && 1547 isa<Constant>(Cases[0].CmpRHS) && 1548 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) { 1549 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB) 1550 return false; 1551 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB) 1552 return false; 1553 } 1554 1555 return true; 1556} 1557 1558void SelectionDAGBuilder::visitBr(const BranchInst &I) { 1559 MachineBasicBlock *BrMBB = FuncInfo.MBB; 1560 1561 // Update machine-CFG edges. 1562 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; 1563 1564 // Figure out which block is immediately after the current one. 1565 MachineBasicBlock *NextBlock = 0; 1566 MachineFunction::iterator BBI = BrMBB; 1567 if (++BBI != FuncInfo.MF->end()) 1568 NextBlock = BBI; 1569 1570 if (I.isUnconditional()) { 1571 // Update machine-CFG edges. 1572 BrMBB->addSuccessor(Succ0MBB); 1573 1574 // If this is not a fall-through branch or optimizations are switched off, 1575 // emit the branch. 1576 if (Succ0MBB != NextBlock || TM.getOptLevel() == CodeGenOpt::None) 1577 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), 1578 MVT::Other, getControlRoot(), 1579 DAG.getBasicBlock(Succ0MBB))); 1580 1581 return; 1582 } 1583 1584 // If this condition is one of the special cases we handle, do special stuff 1585 // now. 1586 const Value *CondVal = I.getCondition(); 1587 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)]; 1588 1589 // If this is a series of conditions that are or'd or and'd together, emit 1590 // this as a sequence of branches instead of setcc's with and/or operations. 1591 // As long as jumps are not expensive, this should improve performance. 1592 // For example, instead of something like: 1593 // cmp A, B 1594 // C = seteq 1595 // cmp D, E 1596 // F = setle 1597 // or C, F 1598 // jnz foo 1599 // Emit: 1600 // cmp A, B 1601 // je foo 1602 // cmp D, E 1603 // jle foo 1604 // 1605 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) { 1606 if (!TM.getTargetLowering()->isJumpExpensive() && 1607 BOp->hasOneUse() && 1608 (BOp->getOpcode() == Instruction::And || 1609 BOp->getOpcode() == Instruction::Or)) { 1610 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, 1611 BOp->getOpcode(), getEdgeWeight(BrMBB, Succ0MBB), 1612 getEdgeWeight(BrMBB, Succ1MBB)); 1613 // If the compares in later blocks need to use values not currently 1614 // exported from this block, export them now. This block should always 1615 // be the first entry. 1616 assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!"); 1617 1618 // Allow some cases to be rejected. 1619 if (ShouldEmitAsBranches(SwitchCases)) { 1620 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) { 1621 ExportFromCurrentBlock(SwitchCases[i].CmpLHS); 1622 ExportFromCurrentBlock(SwitchCases[i].CmpRHS); 1623 } 1624 1625 // Emit the branch for this block. 1626 visitSwitchCase(SwitchCases[0], BrMBB); 1627 SwitchCases.erase(SwitchCases.begin()); 1628 return; 1629 } 1630 1631 // Okay, we decided not to do this, remove any inserted MBB's and clear 1632 // SwitchCases. 1633 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) 1634 FuncInfo.MF->erase(SwitchCases[i].ThisBB); 1635 1636 SwitchCases.clear(); 1637 } 1638 } 1639 1640 // Create a CaseBlock record representing this branch. 1641 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()), 1642 NULL, Succ0MBB, Succ1MBB, BrMBB); 1643 1644 // Use visitSwitchCase to actually insert the fast branch sequence for this 1645 // cond branch. 1646 visitSwitchCase(CB, BrMBB); 1647} 1648 1649/// visitSwitchCase - Emits the necessary code to represent a single node in 1650/// the binary search tree resulting from lowering a switch instruction. 1651void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB, 1652 MachineBasicBlock *SwitchBB) { 1653 SDValue Cond; 1654 SDValue CondLHS = getValue(CB.CmpLHS); 1655 SDLoc dl = getCurSDLoc(); 1656 1657 // Build the setcc now. 1658 if (CB.CmpMHS == NULL) { 1659 // Fold "(X == true)" to X and "(X == false)" to !X to 1660 // handle common cases produced by branch lowering. 1661 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) && 1662 CB.CC == ISD::SETEQ) 1663 Cond = CondLHS; 1664 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) && 1665 CB.CC == ISD::SETEQ) { 1666 SDValue True = DAG.getConstant(1, CondLHS.getValueType()); 1667 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True); 1668 } else 1669 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC); 1670 } else { 1671 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now"); 1672 1673 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); 1674 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); 1675 1676 SDValue CmpOp = getValue(CB.CmpMHS); 1677 EVT VT = CmpOp.getValueType(); 1678 1679 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { 1680 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT), 1681 ISD::SETLE); 1682 } else { 1683 SDValue SUB = DAG.getNode(ISD::SUB, dl, 1684 VT, CmpOp, DAG.getConstant(Low, VT)); 1685 Cond = DAG.getSetCC(dl, MVT::i1, SUB, 1686 DAG.getConstant(High-Low, VT), ISD::SETULE); 1687 } 1688 } 1689 1690 // Update successor info 1691 addSuccessorWithWeight(SwitchBB, CB.TrueBB, CB.TrueWeight); 1692 // TrueBB and FalseBB are always different unless the incoming IR is 1693 // degenerate. This only happens when running llc on weird IR. 1694 if (CB.TrueBB != CB.FalseBB) 1695 addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight); 1696 1697 // Set NextBlock to be the MBB immediately after the current one, if any. 1698 // This is used to avoid emitting unnecessary branches to the next block. 1699 MachineBasicBlock *NextBlock = 0; 1700 MachineFunction::iterator BBI = SwitchBB; 1701 if (++BBI != FuncInfo.MF->end()) 1702 NextBlock = BBI; 1703 1704 // If the lhs block is the next block, invert the condition so that we can 1705 // fall through to the lhs instead of the rhs block. 1706 if (CB.TrueBB == NextBlock) { 1707 std::swap(CB.TrueBB, CB.FalseBB); 1708 SDValue True = DAG.getConstant(1, Cond.getValueType()); 1709 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True); 1710 } 1711 1712 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, 1713 MVT::Other, getControlRoot(), Cond, 1714 DAG.getBasicBlock(CB.TrueBB)); 1715 1716 // Insert the false branch. Do this even if it's a fall through branch, 1717 // this makes it easier to do DAG optimizations which require inverting 1718 // the branch condition. 1719 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, 1720 DAG.getBasicBlock(CB.FalseBB)); 1721 1722 DAG.setRoot(BrCond); 1723} 1724 1725/// visitJumpTable - Emit JumpTable node in the current MBB 1726void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) { 1727 // Emit the code for the jump table 1728 assert(JT.Reg != -1U && "Should lower JT Header first!"); 1729 EVT PTy = TM.getTargetLowering()->getPointerTy(); 1730 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(), 1731 JT.Reg, PTy); 1732 SDValue Table = DAG.getJumpTable(JT.JTI, PTy); 1733 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(), 1734 MVT::Other, Index.getValue(1), 1735 Table, Index); 1736 DAG.setRoot(BrJumpTable); 1737} 1738 1739/// visitJumpTableHeader - This function emits necessary code to produce index 1740/// in the JumpTable from switch case. 1741void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT, 1742 JumpTableHeader &JTH, 1743 MachineBasicBlock *SwitchBB) { 1744 // Subtract the lowest switch case value from the value being switched on and 1745 // conditional branch to default mbb if the result is greater than the 1746 // difference between smallest and largest cases. 1747 SDValue SwitchOp = getValue(JTH.SValue); 1748 EVT VT = SwitchOp.getValueType(); 1749 SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, SwitchOp, 1750 DAG.getConstant(JTH.First, VT)); 1751 1752 // The SDNode we just created, which holds the value being switched on minus 1753 // the smallest case value, needs to be copied to a virtual register so it 1754 // can be used as an index into the jump table in a subsequent basic block. 1755 // This value may be smaller or larger than the target's pointer type, and 1756 // therefore require extension or truncating. 1757 const TargetLowering *TLI = TM.getTargetLowering(); 1758 SwitchOp = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), TLI->getPointerTy()); 1759 1760 unsigned JumpTableReg = FuncInfo.CreateReg(TLI->getPointerTy()); 1761 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurSDLoc(), 1762 JumpTableReg, SwitchOp); 1763 JT.Reg = JumpTableReg; 1764 1765 // Emit the range check for the jump table, and branch to the default block 1766 // for the switch statement if the value being switched on exceeds the largest 1767 // case in the switch. 1768 SDValue CMP = DAG.getSetCC(getCurSDLoc(), 1769 TLI->getSetCCResultType(*DAG.getContext(), 1770 Sub.getValueType()), 1771 Sub, 1772 DAG.getConstant(JTH.Last - JTH.First,VT), 1773 ISD::SETUGT); 1774 1775 // Set NextBlock to be the MBB immediately after the current one, if any. 1776 // This is used to avoid emitting unnecessary branches to the next block. 1777 MachineBasicBlock *NextBlock = 0; 1778 MachineFunction::iterator BBI = SwitchBB; 1779 1780 if (++BBI != FuncInfo.MF->end()) 1781 NextBlock = BBI; 1782 1783 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurSDLoc(), 1784 MVT::Other, CopyTo, CMP, 1785 DAG.getBasicBlock(JT.Default)); 1786 1787 if (JT.MBB != NextBlock) 1788 BrCond = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, BrCond, 1789 DAG.getBasicBlock(JT.MBB)); 1790 1791 DAG.setRoot(BrCond); 1792} 1793 1794/// Codegen a new tail for a stack protector check ParentMBB which has had its 1795/// tail spliced into a stack protector check success bb. 1796/// 1797/// For a high level explanation of how this fits into the stack protector 1798/// generation see the comment on the declaration of class 1799/// StackProtectorDescriptor. 1800void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD, 1801 MachineBasicBlock *ParentBB) { 1802 1803 // First create the loads to the guard/stack slot for the comparison. 1804 const TargetLowering *TLI = TM.getTargetLowering(); 1805 EVT PtrTy = TLI->getPointerTy(); 1806 1807 MachineFrameInfo *MFI = ParentBB->getParent()->getFrameInfo(); 1808 int FI = MFI->getStackProtectorIndex(); 1809 1810 const Value *IRGuard = SPD.getGuard(); 1811 SDValue GuardPtr = getValue(IRGuard); 1812 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy); 1813 1814 unsigned Align = 1815 TLI->getDataLayout()->getPrefTypeAlignment(IRGuard->getType()); 1816 SDValue Guard = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(), 1817 GuardPtr, MachinePointerInfo(IRGuard, 0), 1818 true, false, false, Align); 1819 1820 SDValue StackSlot = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(), 1821 StackSlotPtr, 1822 MachinePointerInfo::getFixedStack(FI), 1823 true, false, false, Align); 1824 1825 // Perform the comparison via a subtract/getsetcc. 1826 EVT VT = Guard.getValueType(); 1827 SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, Guard, StackSlot); 1828 1829 SDValue Cmp = DAG.getSetCC(getCurSDLoc(), 1830 TLI->getSetCCResultType(*DAG.getContext(), 1831 Sub.getValueType()), 1832 Sub, DAG.getConstant(0, VT), 1833 ISD::SETNE); 1834 1835 // If the sub is not 0, then we know the guard/stackslot do not equal, so 1836 // branch to failure MBB. 1837 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurSDLoc(), 1838 MVT::Other, StackSlot.getOperand(0), 1839 Cmp, DAG.getBasicBlock(SPD.getFailureMBB())); 1840 // Otherwise branch to success MBB. 1841 SDValue Br = DAG.getNode(ISD::BR, getCurSDLoc(), 1842 MVT::Other, BrCond, 1843 DAG.getBasicBlock(SPD.getSuccessMBB())); 1844 1845 DAG.setRoot(Br); 1846} 1847 1848/// Codegen the failure basic block for a stack protector check. 1849/// 1850/// A failure stack protector machine basic block consists simply of a call to 1851/// __stack_chk_fail(). 1852/// 1853/// For a high level explanation of how this fits into the stack protector 1854/// generation see the comment on the declaration of class 1855/// StackProtectorDescriptor. 1856void 1857SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) { 1858 const TargetLowering *TLI = TM.getTargetLowering(); 1859 SDValue Chain = TLI->makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, 1860 MVT::isVoid, 0, 0, false, getCurSDLoc(), 1861 false, false).second; 1862 DAG.setRoot(Chain); 1863} 1864 1865/// visitBitTestHeader - This function emits necessary code to produce value 1866/// suitable for "bit tests" 1867void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B, 1868 MachineBasicBlock *SwitchBB) { 1869 // Subtract the minimum value 1870 SDValue SwitchOp = getValue(B.SValue); 1871 EVT VT = SwitchOp.getValueType(); 1872 SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, SwitchOp, 1873 DAG.getConstant(B.First, VT)); 1874 1875 // Check range 1876 const TargetLowering *TLI = TM.getTargetLowering(); 1877 SDValue RangeCmp = DAG.getSetCC(getCurSDLoc(), 1878 TLI->getSetCCResultType(*DAG.getContext(), 1879 Sub.getValueType()), 1880 Sub, DAG.getConstant(B.Range, VT), 1881 ISD::SETUGT); 1882 1883 // Determine the type of the test operands. 1884 bool UsePtrType = false; 1885 if (!TLI->isTypeLegal(VT)) 1886 UsePtrType = true; 1887 else { 1888 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i) 1889 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) { 1890 // Switch table case range are encoded into series of masks. 1891 // Just use pointer type, it's guaranteed to fit. 1892 UsePtrType = true; 1893 break; 1894 } 1895 } 1896 if (UsePtrType) { 1897 VT = TLI->getPointerTy(); 1898 Sub = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), VT); 1899 } 1900 1901 B.RegVT = VT.getSimpleVT(); 1902 B.Reg = FuncInfo.CreateReg(B.RegVT); 1903 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurSDLoc(), 1904 B.Reg, Sub); 1905 1906 // Set NextBlock to be the MBB immediately after the current one, if any. 1907 // This is used to avoid emitting unnecessary branches to the next block. 1908 MachineBasicBlock *NextBlock = 0; 1909 MachineFunction::iterator BBI = SwitchBB; 1910 if (++BBI != FuncInfo.MF->end()) 1911 NextBlock = BBI; 1912 1913 MachineBasicBlock* MBB = B.Cases[0].ThisBB; 1914 1915 addSuccessorWithWeight(SwitchBB, B.Default); 1916 addSuccessorWithWeight(SwitchBB, MBB); 1917 1918 SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurSDLoc(), 1919 MVT::Other, CopyTo, RangeCmp, 1920 DAG.getBasicBlock(B.Default)); 1921 1922 if (MBB != NextBlock) 1923 BrRange = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, CopyTo, 1924 DAG.getBasicBlock(MBB)); 1925 1926 DAG.setRoot(BrRange); 1927} 1928 1929/// visitBitTestCase - this function produces one "bit test" 1930void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB, 1931 MachineBasicBlock* NextMBB, 1932 uint32_t BranchWeightToNext, 1933 unsigned Reg, 1934 BitTestCase &B, 1935 MachineBasicBlock *SwitchBB) { 1936 MVT VT = BB.RegVT; 1937 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(), 1938 Reg, VT); 1939 SDValue Cmp; 1940 unsigned PopCount = CountPopulation_64(B.Mask); 1941 const TargetLowering *TLI = TM.getTargetLowering(); 1942 if (PopCount == 1) { 1943 // Testing for a single bit; just compare the shift count with what it 1944 // would need to be to shift a 1 bit in that position. 1945 Cmp = DAG.getSetCC(getCurSDLoc(), 1946 TLI->getSetCCResultType(*DAG.getContext(), VT), 1947 ShiftOp, 1948 DAG.getConstant(countTrailingZeros(B.Mask), VT), 1949 ISD::SETEQ); 1950 } else if (PopCount == BB.Range) { 1951 // There is only one zero bit in the range, test for it directly. 1952 Cmp = DAG.getSetCC(getCurSDLoc(), 1953 TLI->getSetCCResultType(*DAG.getContext(), VT), 1954 ShiftOp, 1955 DAG.getConstant(CountTrailingOnes_64(B.Mask), VT), 1956 ISD::SETNE); 1957 } else { 1958 // Make desired shift 1959 SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurSDLoc(), VT, 1960 DAG.getConstant(1, VT), ShiftOp); 1961 1962 // Emit bit tests and jumps 1963 SDValue AndOp = DAG.getNode(ISD::AND, getCurSDLoc(), 1964 VT, SwitchVal, DAG.getConstant(B.Mask, VT)); 1965 Cmp = DAG.getSetCC(getCurSDLoc(), 1966 TLI->getSetCCResultType(*DAG.getContext(), VT), 1967 AndOp, DAG.getConstant(0, VT), 1968 ISD::SETNE); 1969 } 1970 1971 // The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight. 1972 addSuccessorWithWeight(SwitchBB, B.TargetBB, B.ExtraWeight); 1973 // The branch weight from SwitchBB to NextMBB is BranchWeightToNext. 1974 addSuccessorWithWeight(SwitchBB, NextMBB, BranchWeightToNext); 1975 1976 SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurSDLoc(), 1977 MVT::Other, getControlRoot(), 1978 Cmp, DAG.getBasicBlock(B.TargetBB)); 1979 1980 // Set NextBlock to be the MBB immediately after the current one, if any. 1981 // This is used to avoid emitting unnecessary branches to the next block. 1982 MachineBasicBlock *NextBlock = 0; 1983 MachineFunction::iterator BBI = SwitchBB; 1984 if (++BBI != FuncInfo.MF->end()) 1985 NextBlock = BBI; 1986 1987 if (NextMBB != NextBlock) 1988 BrAnd = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, BrAnd, 1989 DAG.getBasicBlock(NextMBB)); 1990 1991 DAG.setRoot(BrAnd); 1992} 1993 1994void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { 1995 MachineBasicBlock *InvokeMBB = FuncInfo.MBB; 1996 1997 // Retrieve successors. 1998 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)]; 1999 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)]; 2000 2001 const Value *Callee(I.getCalledValue()); 2002 const Function *Fn = dyn_cast<Function>(Callee); 2003 if (isa<InlineAsm>(Callee)) 2004 visitInlineAsm(&I); 2005 else if (Fn && Fn->isIntrinsic()) { 2006 assert(Fn->getIntrinsicID() == Intrinsic::donothing); 2007 // Ignore invokes to @llvm.donothing: jump directly to the next BB. 2008 } else 2009 LowerCallTo(&I, getValue(Callee), false, LandingPad); 2010 2011 // If the value of the invoke is used outside of its defining block, make it 2012 // available as a virtual register. 2013 CopyToExportRegsIfNeeded(&I); 2014 2015 // Update successor info 2016 addSuccessorWithWeight(InvokeMBB, Return); 2017 addSuccessorWithWeight(InvokeMBB, LandingPad); 2018 2019 // Drop into normal successor. 2020 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), 2021 MVT::Other, getControlRoot(), 2022 DAG.getBasicBlock(Return))); 2023} 2024 2025void SelectionDAGBuilder::visitResume(const ResumeInst &RI) { 2026 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!"); 2027} 2028 2029void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) { 2030 assert(FuncInfo.MBB->isLandingPad() && 2031 "Call to landingpad not in landing pad!"); 2032 2033 MachineBasicBlock *MBB = FuncInfo.MBB; 2034 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); 2035 AddLandingPadInfo(LP, MMI, MBB); 2036 2037 // If there aren't registers to copy the values into (e.g., during SjLj 2038 // exceptions), then don't bother to create these DAG nodes. 2039 const TargetLowering *TLI = TM.getTargetLowering(); 2040 if (TLI->getExceptionPointerRegister() == 0 && 2041 TLI->getExceptionSelectorRegister() == 0) 2042 return; 2043 2044 SmallVector<EVT, 2> ValueVTs; 2045 ComputeValueVTs(*TLI, LP.getType(), ValueVTs); 2046 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported"); 2047 2048 // Get the two live-in registers as SDValues. The physregs have already been 2049 // copied into virtual registers. 2050 SDValue Ops[2]; 2051 Ops[0] = DAG.getZExtOrTrunc( 2052 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), 2053 FuncInfo.ExceptionPointerVirtReg, TLI->getPointerTy()), 2054 getCurSDLoc(), ValueVTs[0]); 2055 Ops[1] = DAG.getZExtOrTrunc( 2056 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), 2057 FuncInfo.ExceptionSelectorVirtReg, TLI->getPointerTy()), 2058 getCurSDLoc(), ValueVTs[1]); 2059 2060 // Merge into one. 2061 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 2062 DAG.getVTList(&ValueVTs[0], ValueVTs.size()), 2063 &Ops[0], 2); 2064 setValue(&LP, Res); 2065} 2066 2067/// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for 2068/// small case ranges). 2069bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR, 2070 CaseRecVector& WorkList, 2071 const Value* SV, 2072 MachineBasicBlock *Default, 2073 MachineBasicBlock *SwitchBB) { 2074 // Size is the number of Cases represented by this range. 2075 size_t Size = CR.Range.second - CR.Range.first; 2076 if (Size > 3) 2077 return false; 2078 2079 // Get the MachineFunction which holds the current MBB. This is used when 2080 // inserting any additional MBBs necessary to represent the switch. 2081 MachineFunction *CurMF = FuncInfo.MF; 2082 2083 // Figure out which block is immediately after the current one. 2084 MachineBasicBlock *NextBlock = 0; 2085 MachineFunction::iterator BBI = CR.CaseBB; 2086 2087 if (++BBI != FuncInfo.MF->end()) 2088 NextBlock = BBI; 2089 2090 BranchProbabilityInfo *BPI = FuncInfo.BPI; 2091 // If any two of the cases has the same destination, and if one value 2092 // is the same as the other, but has one bit unset that the other has set, 2093 // use bit manipulation to do two compares at once. For example: 2094 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)" 2095 // TODO: This could be extended to merge any 2 cases in switches with 3 cases. 2096 // TODO: Handle cases where CR.CaseBB != SwitchBB. 2097 if (Size == 2 && CR.CaseBB == SwitchBB) { 2098 Case &Small = *CR.Range.first; 2099 Case &Big = *(CR.Range.second-1); 2100 2101 if (Small.Low == Small.High && Big.Low == Big.High && Small.BB == Big.BB) { 2102 const APInt& SmallValue = cast<ConstantInt>(Small.Low)->getValue(); 2103 const APInt& BigValue = cast<ConstantInt>(Big.Low)->getValue(); 2104 2105 // Check that there is only one bit different. 2106 if (BigValue.countPopulation() == SmallValue.countPopulation() + 1 && 2107 (SmallValue | BigValue) == BigValue) { 2108 // Isolate the common bit. 2109 APInt CommonBit = BigValue & ~SmallValue; 2110 assert((SmallValue | CommonBit) == BigValue && 2111 CommonBit.countPopulation() == 1 && "Not a common bit?"); 2112 2113 SDValue CondLHS = getValue(SV); 2114 EVT VT = CondLHS.getValueType(); 2115 SDLoc DL = getCurSDLoc(); 2116 2117 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS, 2118 DAG.getConstant(CommonBit, VT)); 2119 SDValue Cond = DAG.getSetCC(DL, MVT::i1, 2120 Or, DAG.getConstant(BigValue, VT), 2121 ISD::SETEQ); 2122 2123 // Update successor info. 2124 // Both Small and Big will jump to Small.BB, so we sum up the weights. 2125 addSuccessorWithWeight(SwitchBB, Small.BB, 2126 Small.ExtraWeight + Big.ExtraWeight); 2127 addSuccessorWithWeight(SwitchBB, Default, 2128 // The default destination is the first successor in IR. 2129 BPI ? BPI->getEdgeWeight(SwitchBB->getBasicBlock(), (unsigned)0) : 0); 2130 2131 // Insert the true branch. 2132 SDValue BrCond = DAG.getNode(ISD::BRCOND, DL, MVT::Other, 2133 getControlRoot(), Cond, 2134 DAG.getBasicBlock(Small.BB)); 2135 2136 // Insert the false branch. 2137 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond, 2138 DAG.getBasicBlock(Default)); 2139 2140 DAG.setRoot(BrCond); 2141 return true; 2142 } 2143 } 2144 } 2145 2146 // Order cases by weight so the most likely case will be checked first. 2147 uint32_t UnhandledWeights = 0; 2148 if (BPI) { 2149 for (CaseItr I = CR.Range.first, IE = CR.Range.second; I != IE; ++I) { 2150 uint32_t IWeight = I->ExtraWeight; 2151 UnhandledWeights += IWeight; 2152 for (CaseItr J = CR.Range.first; J < I; ++J) { 2153 uint32_t JWeight = J->ExtraWeight; 2154 if (IWeight > JWeight) 2155 std::swap(*I, *J); 2156 } 2157 } 2158 } 2159 // Rearrange the case blocks so that the last one falls through if possible. 2160 Case &BackCase = *(CR.Range.second-1); 2161 if (Size > 1 && 2162 NextBlock && Default != NextBlock && BackCase.BB != NextBlock) { 2163 // The last case block won't fall through into 'NextBlock' if we emit the 2164 // branches in this order. See if rearranging a case value would help. 2165 // We start at the bottom as it's the case with the least weight. 2166 for (Case *I = &*(CR.Range.second-2), *E = &*CR.Range.first-1; I != E; --I) 2167 if (I->BB == NextBlock) { 2168 std::swap(*I, BackCase); 2169 break; 2170 } 2171 } 2172 2173 // Create a CaseBlock record representing a conditional branch to 2174 // the Case's target mbb if the value being switched on SV is equal 2175 // to C. 2176 MachineBasicBlock *CurBlock = CR.CaseBB; 2177 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) { 2178 MachineBasicBlock *FallThrough; 2179 if (I != E-1) { 2180 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock()); 2181 CurMF->insert(BBI, FallThrough); 2182 2183 // Put SV in a virtual register to make it available from the new blocks. 2184 ExportFromCurrentBlock(SV); 2185 } else { 2186 // If the last case doesn't match, go to the default block. 2187 FallThrough = Default; 2188 } 2189 2190 const Value *RHS, *LHS, *MHS; 2191 ISD::CondCode CC; 2192 if (I->High == I->Low) { 2193 // This is just small small case range :) containing exactly 1 case 2194 CC = ISD::SETEQ; 2195 LHS = SV; RHS = I->High; MHS = NULL; 2196 } else { 2197 CC = ISD::SETLE; 2198 LHS = I->Low; MHS = SV; RHS = I->High; 2199 } 2200 2201 // The false weight should be sum of all un-handled cases. 2202 UnhandledWeights -= I->ExtraWeight; 2203 CaseBlock CB(CC, LHS, RHS, MHS, /* truebb */ I->BB, /* falsebb */ FallThrough, 2204 /* me */ CurBlock, 2205 /* trueweight */ I->ExtraWeight, 2206 /* falseweight */ UnhandledWeights); 2207 2208 // If emitting the first comparison, just call visitSwitchCase to emit the 2209 // code into the current block. Otherwise, push the CaseBlock onto the 2210 // vector to be later processed by SDISel, and insert the node's MBB 2211 // before the next MBB. 2212 if (CurBlock == SwitchBB) 2213 visitSwitchCase(CB, SwitchBB); 2214 else 2215 SwitchCases.push_back(CB); 2216 2217 CurBlock = FallThrough; 2218 } 2219 2220 return true; 2221} 2222 2223static inline bool areJTsAllowed(const TargetLowering &TLI) { 2224 return TLI.supportJumpTables() && 2225 (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || 2226 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other)); 2227} 2228 2229static APInt ComputeRange(const APInt &First, const APInt &Last) { 2230 uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1; 2231 APInt LastExt = Last.sext(BitWidth), FirstExt = First.sext(BitWidth); 2232 return (LastExt - FirstExt + 1ULL); 2233} 2234 2235/// handleJTSwitchCase - Emit jumptable for current switch case range 2236bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR, 2237 CaseRecVector &WorkList, 2238 const Value *SV, 2239 MachineBasicBlock *Default, 2240 MachineBasicBlock *SwitchBB) { 2241 Case& FrontCase = *CR.Range.first; 2242 Case& BackCase = *(CR.Range.second-1); 2243 2244 const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue(); 2245 const APInt &Last = cast<ConstantInt>(BackCase.High)->getValue(); 2246 2247 APInt TSize(First.getBitWidth(), 0); 2248 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) 2249 TSize += I->size(); 2250 2251 const TargetLowering *TLI = TM.getTargetLowering(); 2252 if (!areJTsAllowed(*TLI) || TSize.ult(TLI->getMinimumJumpTableEntries())) 2253 return false; 2254 2255 APInt Range = ComputeRange(First, Last); 2256 // The density is TSize / Range. Require at least 40%. 2257 // It should not be possible for IntTSize to saturate for sane code, but make 2258 // sure we handle Range saturation correctly. 2259 uint64_t IntRange = Range.getLimitedValue(UINT64_MAX/10); 2260 uint64_t IntTSize = TSize.getLimitedValue(UINT64_MAX/10); 2261 if (IntTSize * 10 < IntRange * 4) 2262 return false; 2263 2264 DEBUG(dbgs() << "Lowering jump table\n" 2265 << "First entry: " << First << ". Last entry: " << Last << '\n' 2266 << "Range: " << Range << ". Size: " << TSize << ".\n\n"); 2267 2268 // Get the MachineFunction which holds the current MBB. This is used when 2269 // inserting any additional MBBs necessary to represent the switch. 2270 MachineFunction *CurMF = FuncInfo.MF; 2271 2272 // Figure out which block is immediately after the current one. 2273 MachineFunction::iterator BBI = CR.CaseBB; 2274 ++BBI; 2275 2276 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock(); 2277 2278 // Create a new basic block to hold the code for loading the address 2279 // of the jump table, and jumping to it. Update successor information; 2280 // we will either branch to the default case for the switch, or the jump 2281 // table. 2282 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB); 2283 CurMF->insert(BBI, JumpTableBB); 2284 2285 addSuccessorWithWeight(CR.CaseBB, Default); 2286 addSuccessorWithWeight(CR.CaseBB, JumpTableBB); 2287 2288 // Build a vector of destination BBs, corresponding to each target 2289 // of the jump table. If the value of the jump table slot corresponds to 2290 // a case statement, push the case's BB onto the vector, otherwise, push 2291 // the default BB. 2292 std::vector<MachineBasicBlock*> DestBBs; 2293 APInt TEI = First; 2294 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) { 2295 const APInt &Low = cast<ConstantInt>(I->Low)->getValue(); 2296 const APInt &High = cast<ConstantInt>(I->High)->getValue(); 2297 2298 if (Low.sle(TEI) && TEI.sle(High)) { 2299 DestBBs.push_back(I->BB); 2300 if (TEI==High) 2301 ++I; 2302 } else { 2303 DestBBs.push_back(Default); 2304 } 2305 } 2306 2307 // Calculate weight for each unique destination in CR. 2308 DenseMap<MachineBasicBlock*, uint32_t> DestWeights; 2309 if (FuncInfo.BPI) 2310 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) { 2311 DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr = 2312 DestWeights.find(I->BB); 2313 if (Itr != DestWeights.end()) 2314 Itr->second += I->ExtraWeight; 2315 else 2316 DestWeights[I->BB] = I->ExtraWeight; 2317 } 2318 2319 // Update successor info. Add one edge to each unique successor. 2320 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs()); 2321 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(), 2322 E = DestBBs.end(); I != E; ++I) { 2323 if (!SuccsHandled[(*I)->getNumber()]) { 2324 SuccsHandled[(*I)->getNumber()] = true; 2325 DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr = 2326 DestWeights.find(*I); 2327 addSuccessorWithWeight(JumpTableBB, *I, 2328 Itr != DestWeights.end() ? Itr->second : 0); 2329 } 2330 } 2331 2332 // Create a jump table index for this jump table. 2333 unsigned JTEncoding = TLI->getJumpTableEncoding(); 2334 unsigned JTI = CurMF->getOrCreateJumpTableInfo(JTEncoding) 2335 ->createJumpTableIndex(DestBBs); 2336 2337 // Set the jump table information so that we can codegen it as a second 2338 // MachineBasicBlock 2339 JumpTable JT(-1U, JTI, JumpTableBB, Default); 2340 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == SwitchBB)); 2341 if (CR.CaseBB == SwitchBB) 2342 visitJumpTableHeader(JT, JTH, SwitchBB); 2343 2344 JTCases.push_back(JumpTableBlock(JTH, JT)); 2345 return true; 2346} 2347 2348/// handleBTSplitSwitchCase - emit comparison and split binary search tree into 2349/// 2 subtrees. 2350bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR, 2351 CaseRecVector& WorkList, 2352 const Value* SV, 2353 MachineBasicBlock* Default, 2354 MachineBasicBlock* SwitchBB) { 2355 // Get the MachineFunction which holds the current MBB. This is used when 2356 // inserting any additional MBBs necessary to represent the switch. 2357 MachineFunction *CurMF = FuncInfo.MF; 2358 2359 // Figure out which block is immediately after the current one. 2360 MachineFunction::iterator BBI = CR.CaseBB; 2361 ++BBI; 2362 2363 Case& FrontCase = *CR.Range.first; 2364 Case& BackCase = *(CR.Range.second-1); 2365 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock(); 2366 2367 // Size is the number of Cases represented by this range. 2368 unsigned Size = CR.Range.second - CR.Range.first; 2369 2370 const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue(); 2371 const APInt &Last = cast<ConstantInt>(BackCase.High)->getValue(); 2372 double FMetric = 0; 2373 CaseItr Pivot = CR.Range.first + Size/2; 2374 2375 // Select optimal pivot, maximizing sum density of LHS and RHS. This will 2376 // (heuristically) allow us to emit JumpTable's later. 2377 APInt TSize(First.getBitWidth(), 0); 2378 for (CaseItr I = CR.Range.first, E = CR.Range.second; 2379 I!=E; ++I) 2380 TSize += I->size(); 2381 2382 APInt LSize = FrontCase.size(); 2383 APInt RSize = TSize-LSize; 2384 DEBUG(dbgs() << "Selecting best pivot: \n" 2385 << "First: " << First << ", Last: " << Last <<'\n' 2386 << "LSize: " << LSize << ", RSize: " << RSize << '\n'); 2387 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second; 2388 J!=E; ++I, ++J) { 2389 const APInt &LEnd = cast<ConstantInt>(I->High)->getValue(); 2390 const APInt &RBegin = cast<ConstantInt>(J->Low)->getValue(); 2391 APInt Range = ComputeRange(LEnd, RBegin); 2392 assert((Range - 2ULL).isNonNegative() && 2393 "Invalid case distance"); 2394 // Use volatile double here to avoid excess precision issues on some hosts, 2395 // e.g. that use 80-bit X87 registers. 2396 volatile double LDensity = 2397 (double)LSize.roundToDouble() / 2398 (LEnd - First + 1ULL).roundToDouble(); 2399 volatile double RDensity = 2400 (double)RSize.roundToDouble() / 2401 (Last - RBegin + 1ULL).roundToDouble(); 2402 volatile double Metric = Range.logBase2()*(LDensity+RDensity); 2403 // Should always split in some non-trivial place 2404 DEBUG(dbgs() <<"=>Step\n" 2405 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n' 2406 << "LDensity: " << LDensity 2407 << ", RDensity: " << RDensity << '\n' 2408 << "Metric: " << Metric << '\n'); 2409 if (FMetric < Metric) { 2410 Pivot = J; 2411 FMetric = Metric; 2412 DEBUG(dbgs() << "Current metric set to: " << FMetric << '\n'); 2413 } 2414 2415 LSize += J->size(); 2416 RSize -= J->size(); 2417 } 2418 2419 const TargetLowering *TLI = TM.getTargetLowering(); 2420 if (areJTsAllowed(*TLI)) { 2421 // If our case is dense we *really* should handle it earlier! 2422 assert((FMetric > 0) && "Should handle dense range earlier!"); 2423 } else { 2424 Pivot = CR.Range.first + Size/2; 2425 } 2426 2427 CaseRange LHSR(CR.Range.first, Pivot); 2428 CaseRange RHSR(Pivot, CR.Range.second); 2429 const Constant *C = Pivot->Low; 2430 MachineBasicBlock *FalseBB = 0, *TrueBB = 0; 2431 2432 // We know that we branch to the LHS if the Value being switched on is 2433 // less than the Pivot value, C. We use this to optimize our binary 2434 // tree a bit, by recognizing that if SV is greater than or equal to the 2435 // LHS's Case Value, and that Case Value is exactly one less than the 2436 // Pivot's Value, then we can branch directly to the LHS's Target, 2437 // rather than creating a leaf node for it. 2438 if ((LHSR.second - LHSR.first) == 1 && 2439 LHSR.first->High == CR.GE && 2440 cast<ConstantInt>(C)->getValue() == 2441 (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) { 2442 TrueBB = LHSR.first->BB; 2443 } else { 2444 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB); 2445 CurMF->insert(BBI, TrueBB); 2446 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR)); 2447 2448 // Put SV in a virtual register to make it available from the new blocks. 2449 ExportFromCurrentBlock(SV); 2450 } 2451 2452 // Similar to the optimization above, if the Value being switched on is 2453 // known to be less than the Constant CR.LT, and the current Case Value 2454 // is CR.LT - 1, then we can branch directly to the target block for 2455 // the current Case Value, rather than emitting a RHS leaf node for it. 2456 if ((RHSR.second - RHSR.first) == 1 && CR.LT && 2457 cast<ConstantInt>(RHSR.first->Low)->getValue() == 2458 (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) { 2459 FalseBB = RHSR.first->BB; 2460 } else { 2461 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB); 2462 CurMF->insert(BBI, FalseBB); 2463 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR)); 2464 2465 // Put SV in a virtual register to make it available from the new blocks. 2466 ExportFromCurrentBlock(SV); 2467 } 2468 2469 // Create a CaseBlock record representing a conditional branch to 2470 // the LHS node if the value being switched on SV is less than C. 2471 // Otherwise, branch to LHS. 2472 CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB); 2473 2474 if (CR.CaseBB == SwitchBB) 2475 visitSwitchCase(CB, SwitchBB); 2476 else 2477 SwitchCases.push_back(CB); 2478 2479 return true; 2480} 2481 2482/// handleBitTestsSwitchCase - if current case range has few destination and 2483/// range span less, than machine word bitwidth, encode case range into series 2484/// of masks and emit bit tests with these masks. 2485bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR, 2486 CaseRecVector& WorkList, 2487 const Value* SV, 2488 MachineBasicBlock* Default, 2489 MachineBasicBlock* SwitchBB) { 2490 const TargetLowering *TLI = TM.getTargetLowering(); 2491 EVT PTy = TLI->getPointerTy(); 2492 unsigned IntPtrBits = PTy.getSizeInBits(); 2493 2494 Case& FrontCase = *CR.Range.first; 2495 Case& BackCase = *(CR.Range.second-1); 2496 2497 // Get the MachineFunction which holds the current MBB. This is used when 2498 // inserting any additional MBBs necessary to represent the switch. 2499 MachineFunction *CurMF = FuncInfo.MF; 2500 2501 // If target does not have legal shift left, do not emit bit tests at all. 2502 if (!TLI->isOperationLegal(ISD::SHL, PTy)) 2503 return false; 2504 2505 size_t numCmps = 0; 2506 for (CaseItr I = CR.Range.first, E = CR.Range.second; 2507 I!=E; ++I) { 2508 // Single case counts one, case range - two. 2509 numCmps += (I->Low == I->High ? 1 : 2); 2510 } 2511 2512 // Count unique destinations 2513 SmallSet<MachineBasicBlock*, 4> Dests; 2514 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) { 2515 Dests.insert(I->BB); 2516 if (Dests.size() > 3) 2517 // Don't bother the code below, if there are too much unique destinations 2518 return false; 2519 } 2520 DEBUG(dbgs() << "Total number of unique destinations: " 2521 << Dests.size() << '\n' 2522 << "Total number of comparisons: " << numCmps << '\n'); 2523 2524 // Compute span of values. 2525 const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue(); 2526 const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue(); 2527 APInt cmpRange = maxValue - minValue; 2528 2529 DEBUG(dbgs() << "Compare range: " << cmpRange << '\n' 2530 << "Low bound: " << minValue << '\n' 2531 << "High bound: " << maxValue << '\n'); 2532 2533 if (cmpRange.uge(IntPtrBits) || 2534 (!(Dests.size() == 1 && numCmps >= 3) && 2535 !(Dests.size() == 2 && numCmps >= 5) && 2536 !(Dests.size() >= 3 && numCmps >= 6))) 2537 return false; 2538 2539 DEBUG(dbgs() << "Emitting bit tests\n"); 2540 APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth()); 2541 2542 // Optimize the case where all the case values fit in a 2543 // word without having to subtract minValue. In this case, 2544 // we can optimize away the subtraction. 2545 if (minValue.isNonNegative() && maxValue.slt(IntPtrBits)) { 2546 cmpRange = maxValue; 2547 } else { 2548 lowBound = minValue; 2549 } 2550 2551 CaseBitsVector CasesBits; 2552 unsigned i, count = 0; 2553 2554 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) { 2555 MachineBasicBlock* Dest = I->BB; 2556 for (i = 0; i < count; ++i) 2557 if (Dest == CasesBits[i].BB) 2558 break; 2559 2560 if (i == count) { 2561 assert((count < 3) && "Too much destinations to test!"); 2562 CasesBits.push_back(CaseBits(0, Dest, 0, 0/*Weight*/)); 2563 count++; 2564 } 2565 2566 const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue(); 2567 const APInt& highValue = cast<ConstantInt>(I->High)->getValue(); 2568 2569 uint64_t lo = (lowValue - lowBound).getZExtValue(); 2570 uint64_t hi = (highValue - lowBound).getZExtValue(); 2571 CasesBits[i].ExtraWeight += I->ExtraWeight; 2572 2573 for (uint64_t j = lo; j <= hi; j++) { 2574 CasesBits[i].Mask |= 1ULL << j; 2575 CasesBits[i].Bits++; 2576 } 2577 2578 } 2579 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp()); 2580 2581 BitTestInfo BTC; 2582 2583 // Figure out which block is immediately after the current one. 2584 MachineFunction::iterator BBI = CR.CaseBB; 2585 ++BBI; 2586 2587 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock(); 2588 2589 DEBUG(dbgs() << "Cases:\n"); 2590 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) { 2591 DEBUG(dbgs() << "Mask: " << CasesBits[i].Mask 2592 << ", Bits: " << CasesBits[i].Bits 2593 << ", BB: " << CasesBits[i].BB << '\n'); 2594 2595 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB); 2596 CurMF->insert(BBI, CaseBB); 2597 BTC.push_back(BitTestCase(CasesBits[i].Mask, 2598 CaseBB, 2599 CasesBits[i].BB, CasesBits[i].ExtraWeight)); 2600 2601 // Put SV in a virtual register to make it available from the new blocks. 2602 ExportFromCurrentBlock(SV); 2603 } 2604 2605 BitTestBlock BTB(lowBound, cmpRange, SV, 2606 -1U, MVT::Other, (CR.CaseBB == SwitchBB), 2607 CR.CaseBB, Default, BTC); 2608 2609 if (CR.CaseBB == SwitchBB) 2610 visitBitTestHeader(BTB, SwitchBB); 2611 2612 BitTestCases.push_back(BTB); 2613 2614 return true; 2615} 2616 2617/// Clusterify - Transform simple list of Cases into list of CaseRange's 2618size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases, 2619 const SwitchInst& SI) { 2620 size_t numCmps = 0; 2621 2622 BranchProbabilityInfo *BPI = FuncInfo.BPI; 2623 // Start with "simple" cases 2624 for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end(); 2625 i != e; ++i) { 2626 const BasicBlock *SuccBB = i.getCaseSuccessor(); 2627 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SuccBB]; 2628 2629 uint32_t ExtraWeight = 2630 BPI ? BPI->getEdgeWeight(SI.getParent(), i.getSuccessorIndex()) : 0; 2631 2632 Cases.push_back(Case(i.getCaseValue(), i.getCaseValue(), 2633 SMBB, ExtraWeight)); 2634 } 2635 std::sort(Cases.begin(), Cases.end(), CaseCmp()); 2636 2637 // Merge case into clusters 2638 if (Cases.size() >= 2) 2639 // Must recompute end() each iteration because it may be 2640 // invalidated by erase if we hold on to it 2641 for (CaseItr I = Cases.begin(), J = std::next(Cases.begin()); 2642 J != Cases.end(); ) { 2643 const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue(); 2644 const APInt& currentValue = cast<ConstantInt>(I->High)->getValue(); 2645 MachineBasicBlock* nextBB = J->BB; 2646 MachineBasicBlock* currentBB = I->BB; 2647 2648 // If the two neighboring cases go to the same destination, merge them 2649 // into a single case. 2650 if ((nextValue - currentValue == 1) && (currentBB == nextBB)) { 2651 I->High = J->High; 2652 I->ExtraWeight += J->ExtraWeight; 2653 J = Cases.erase(J); 2654 } else { 2655 I = J++; 2656 } 2657 } 2658 2659 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) { 2660 if (I->Low != I->High) 2661 // A range counts double, since it requires two compares. 2662 ++numCmps; 2663 } 2664 2665 return numCmps; 2666} 2667 2668void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First, 2669 MachineBasicBlock *Last) { 2670 // Update JTCases. 2671 for (unsigned i = 0, e = JTCases.size(); i != e; ++i) 2672 if (JTCases[i].first.HeaderBB == First) 2673 JTCases[i].first.HeaderBB = Last; 2674 2675 // Update BitTestCases. 2676 for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i) 2677 if (BitTestCases[i].Parent == First) 2678 BitTestCases[i].Parent = Last; 2679} 2680 2681void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { 2682 MachineBasicBlock *SwitchMBB = FuncInfo.MBB; 2683 2684 // Figure out which block is immediately after the current one. 2685 MachineBasicBlock *NextBlock = 0; 2686 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()]; 2687 2688 // If there is only the default destination, branch to it if it is not the 2689 // next basic block. Otherwise, just fall through. 2690 if (!SI.getNumCases()) { 2691 // Update machine-CFG edges. 2692 2693 // If this is not a fall-through branch, emit the branch. 2694 SwitchMBB->addSuccessor(Default); 2695 if (Default != NextBlock) 2696 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), 2697 MVT::Other, getControlRoot(), 2698 DAG.getBasicBlock(Default))); 2699 2700 return; 2701 } 2702 2703 // If there are any non-default case statements, create a vector of Cases 2704 // representing each one, and sort the vector so that we can efficiently 2705 // create a binary search tree from them. 2706 CaseVector Cases; 2707 size_t numCmps = Clusterify(Cases, SI); 2708 DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size() 2709 << ". Total compares: " << numCmps << '\n'); 2710 (void)numCmps; 2711 2712 // Get the Value to be switched on and default basic blocks, which will be 2713 // inserted into CaseBlock records, representing basic blocks in the binary 2714 // search tree. 2715 const Value *SV = SI.getCondition(); 2716 2717 // Push the initial CaseRec onto the worklist 2718 CaseRecVector WorkList; 2719 WorkList.push_back(CaseRec(SwitchMBB,0,0, 2720 CaseRange(Cases.begin(),Cases.end()))); 2721 2722 while (!WorkList.empty()) { 2723 // Grab a record representing a case range to process off the worklist 2724 CaseRec CR = WorkList.back(); 2725 WorkList.pop_back(); 2726 2727 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default, SwitchMBB)) 2728 continue; 2729 2730 // If the range has few cases (two or less) emit a series of specific 2731 // tests. 2732 if (handleSmallSwitchRange(CR, WorkList, SV, Default, SwitchMBB)) 2733 continue; 2734 2735 // If the switch has more than N blocks, and is at least 40% dense, and the 2736 // target supports indirect branches, then emit a jump table rather than 2737 // lowering the switch to a binary tree of conditional branches. 2738 // N defaults to 4 and is controlled via TLS.getMinimumJumpTableEntries(). 2739 if (handleJTSwitchCase(CR, WorkList, SV, Default, SwitchMBB)) 2740 continue; 2741 2742 // Emit binary tree. We need to pick a pivot, and push left and right ranges 2743 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call. 2744 handleBTSplitSwitchCase(CR, WorkList, SV, Default, SwitchMBB); 2745 } 2746} 2747 2748void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) { 2749 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB; 2750 2751 // Update machine-CFG edges with unique successors. 2752 SmallSet<BasicBlock*, 32> Done; 2753 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) { 2754 BasicBlock *BB = I.getSuccessor(i); 2755 bool Inserted = Done.insert(BB); 2756 if (!Inserted) 2757 continue; 2758 2759 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB]; 2760 addSuccessorWithWeight(IndirectBrMBB, Succ); 2761 } 2762 2763 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(), 2764 MVT::Other, getControlRoot(), 2765 getValue(I.getAddress()))); 2766} 2767 2768void SelectionDAGBuilder::visitFSub(const User &I) { 2769 // -0.0 - X --> fneg 2770 Type *Ty = I.getType(); 2771 if (isa<Constant>(I.getOperand(0)) && 2772 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) { 2773 SDValue Op2 = getValue(I.getOperand(1)); 2774 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(), 2775 Op2.getValueType(), Op2)); 2776 return; 2777 } 2778 2779 visitBinary(I, ISD::FSUB); 2780} 2781 2782void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) { 2783 SDValue Op1 = getValue(I.getOperand(0)); 2784 SDValue Op2 = getValue(I.getOperand(1)); 2785 setValue(&I, DAG.getNode(OpCode, getCurSDLoc(), 2786 Op1.getValueType(), Op1, Op2)); 2787} 2788 2789void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) { 2790 SDValue Op1 = getValue(I.getOperand(0)); 2791 SDValue Op2 = getValue(I.getOperand(1)); 2792 2793 EVT ShiftTy = TM.getTargetLowering()->getShiftAmountTy(Op2.getValueType()); 2794 2795 // Coerce the shift amount to the right type if we can. 2796 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) { 2797 unsigned ShiftSize = ShiftTy.getSizeInBits(); 2798 unsigned Op2Size = Op2.getValueType().getSizeInBits(); 2799 SDLoc DL = getCurSDLoc(); 2800 2801 // If the operand is smaller than the shift count type, promote it. 2802 if (ShiftSize > Op2Size) 2803 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2); 2804 2805 // If the operand is larger than the shift count type but the shift 2806 // count type has enough bits to represent any shift value, truncate 2807 // it now. This is a common case and it exposes the truncate to 2808 // optimization early. 2809 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits())) 2810 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2); 2811 // Otherwise we'll need to temporarily settle for some other convenient 2812 // type. Type legalization will make adjustments once the shiftee is split. 2813 else 2814 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32); 2815 } 2816 2817 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), 2818 Op1.getValueType(), Op1, Op2)); 2819} 2820 2821void SelectionDAGBuilder::visitSDiv(const User &I) { 2822 SDValue Op1 = getValue(I.getOperand(0)); 2823 SDValue Op2 = getValue(I.getOperand(1)); 2824 2825 // Turn exact SDivs into multiplications. 2826 // FIXME: This should be in DAGCombiner, but it doesn't have access to the 2827 // exact bit. 2828 if (isa<BinaryOperator>(&I) && cast<BinaryOperator>(&I)->isExact() && 2829 !isa<ConstantSDNode>(Op1) && 2830 isa<ConstantSDNode>(Op2) && !cast<ConstantSDNode>(Op2)->isNullValue()) 2831 setValue(&I, TM.getTargetLowering()->BuildExactSDIV(Op1, Op2, 2832 getCurSDLoc(), DAG)); 2833 else 2834 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), 2835 Op1, Op2)); 2836} 2837 2838void SelectionDAGBuilder::visitICmp(const User &I) { 2839 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE; 2840 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I)) 2841 predicate = IC->getPredicate(); 2842 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I)) 2843 predicate = ICmpInst::Predicate(IC->getPredicate()); 2844 SDValue Op1 = getValue(I.getOperand(0)); 2845 SDValue Op2 = getValue(I.getOperand(1)); 2846 ISD::CondCode Opcode = getICmpCondCode(predicate); 2847 2848 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2849 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode)); 2850} 2851 2852void SelectionDAGBuilder::visitFCmp(const User &I) { 2853 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE; 2854 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I)) 2855 predicate = FC->getPredicate(); 2856 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I)) 2857 predicate = FCmpInst::Predicate(FC->getPredicate()); 2858 SDValue Op1 = getValue(I.getOperand(0)); 2859 SDValue Op2 = getValue(I.getOperand(1)); 2860 ISD::CondCode Condition = getFCmpCondCode(predicate); 2861 if (TM.Options.NoNaNsFPMath) 2862 Condition = getFCmpCodeWithoutNaN(Condition); 2863 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2864 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition)); 2865} 2866 2867void SelectionDAGBuilder::visitSelect(const User &I) { 2868 SmallVector<EVT, 4> ValueVTs; 2869 ComputeValueVTs(*TM.getTargetLowering(), I.getType(), ValueVTs); 2870 unsigned NumValues = ValueVTs.size(); 2871 if (NumValues == 0) return; 2872 2873 SmallVector<SDValue, 4> Values(NumValues); 2874 SDValue Cond = getValue(I.getOperand(0)); 2875 SDValue TrueVal = getValue(I.getOperand(1)); 2876 SDValue FalseVal = getValue(I.getOperand(2)); 2877 ISD::NodeType OpCode = Cond.getValueType().isVector() ? 2878 ISD::VSELECT : ISD::SELECT; 2879 2880 for (unsigned i = 0; i != NumValues; ++i) 2881 Values[i] = DAG.getNode(OpCode, getCurSDLoc(), 2882 TrueVal.getNode()->getValueType(TrueVal.getResNo()+i), 2883 Cond, 2884 SDValue(TrueVal.getNode(), 2885 TrueVal.getResNo() + i), 2886 SDValue(FalseVal.getNode(), 2887 FalseVal.getResNo() + i)); 2888 2889 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 2890 DAG.getVTList(&ValueVTs[0], NumValues), 2891 &Values[0], NumValues)); 2892} 2893 2894void SelectionDAGBuilder::visitTrunc(const User &I) { 2895 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). 2896 SDValue N = getValue(I.getOperand(0)); 2897 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2898 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N)); 2899} 2900 2901void SelectionDAGBuilder::visitZExt(const User &I) { 2902 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 2903 // ZExt also can't be a cast to bool for same reason. So, nothing much to do 2904 SDValue N = getValue(I.getOperand(0)); 2905 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2906 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N)); 2907} 2908 2909void SelectionDAGBuilder::visitSExt(const User &I) { 2910 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 2911 // SExt also can't be a cast to bool for same reason. So, nothing much to do 2912 SDValue N = getValue(I.getOperand(0)); 2913 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2914 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N)); 2915} 2916 2917void SelectionDAGBuilder::visitFPTrunc(const User &I) { 2918 // FPTrunc is never a no-op cast, no need to check 2919 SDValue N = getValue(I.getOperand(0)); 2920 const TargetLowering *TLI = TM.getTargetLowering(); 2921 EVT DestVT = TLI->getValueType(I.getType()); 2922 setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurSDLoc(), 2923 DestVT, N, 2924 DAG.getTargetConstant(0, TLI->getPointerTy()))); 2925} 2926 2927void SelectionDAGBuilder::visitFPExt(const User &I) { 2928 // FPExt is never a no-op cast, no need to check 2929 SDValue N = getValue(I.getOperand(0)); 2930 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2931 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N)); 2932} 2933 2934void SelectionDAGBuilder::visitFPToUI(const User &I) { 2935 // FPToUI is never a no-op cast, no need to check 2936 SDValue N = getValue(I.getOperand(0)); 2937 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2938 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N)); 2939} 2940 2941void SelectionDAGBuilder::visitFPToSI(const User &I) { 2942 // FPToSI is never a no-op cast, no need to check 2943 SDValue N = getValue(I.getOperand(0)); 2944 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2945 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N)); 2946} 2947 2948void SelectionDAGBuilder::visitUIToFP(const User &I) { 2949 // UIToFP is never a no-op cast, no need to check 2950 SDValue N = getValue(I.getOperand(0)); 2951 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2952 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N)); 2953} 2954 2955void SelectionDAGBuilder::visitSIToFP(const User &I) { 2956 // SIToFP is never a no-op cast, no need to check 2957 SDValue N = getValue(I.getOperand(0)); 2958 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2959 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N)); 2960} 2961 2962void SelectionDAGBuilder::visitPtrToInt(const User &I) { 2963 // What to do depends on the size of the integer and the size of the pointer. 2964 // We can either truncate, zero extend, or no-op, accordingly. 2965 SDValue N = getValue(I.getOperand(0)); 2966 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2967 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT)); 2968} 2969 2970void SelectionDAGBuilder::visitIntToPtr(const User &I) { 2971 // What to do depends on the size of the integer and the size of the pointer. 2972 // We can either truncate, zero extend, or no-op, accordingly. 2973 SDValue N = getValue(I.getOperand(0)); 2974 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2975 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT)); 2976} 2977 2978void SelectionDAGBuilder::visitBitCast(const User &I) { 2979 SDValue N = getValue(I.getOperand(0)); 2980 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 2981 2982 // BitCast assures us that source and destination are the same size so this is 2983 // either a BITCAST or a no-op. 2984 if (DestVT != N.getValueType()) 2985 setValue(&I, DAG.getNode(ISD::BITCAST, getCurSDLoc(), 2986 DestVT, N)); // convert types. 2987 // Check if the original LLVM IR Operand was a ConstantInt, because getValue() 2988 // might fold any kind of constant expression to an integer constant and that 2989 // is not what we are looking for. Only regcognize a bitcast of a genuine 2990 // constant integer as an opaque constant. 2991 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0))) 2992 setValue(&I, DAG.getConstant(C->getValue(), DestVT, /*isTarget=*/false, 2993 /*isOpaque*/true)); 2994 else 2995 setValue(&I, N); // noop cast. 2996} 2997 2998void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) { 2999 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3000 const Value *SV = I.getOperand(0); 3001 SDValue N = getValue(SV); 3002 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType()); 3003 3004 unsigned SrcAS = SV->getType()->getPointerAddressSpace(); 3005 unsigned DestAS = I.getType()->getPointerAddressSpace(); 3006 3007 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 3008 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS); 3009 3010 setValue(&I, N); 3011} 3012 3013void SelectionDAGBuilder::visitInsertElement(const User &I) { 3014 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3015 SDValue InVec = getValue(I.getOperand(0)); 3016 SDValue InVal = getValue(I.getOperand(1)); 3017 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), 3018 getCurSDLoc(), TLI.getVectorIdxTy()); 3019 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(), 3020 TM.getTargetLowering()->getValueType(I.getType()), 3021 InVec, InVal, InIdx)); 3022} 3023 3024void SelectionDAGBuilder::visitExtractElement(const User &I) { 3025 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3026 SDValue InVec = getValue(I.getOperand(0)); 3027 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), 3028 getCurSDLoc(), TLI.getVectorIdxTy()); 3029 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(), 3030 TM.getTargetLowering()->getValueType(I.getType()), 3031 InVec, InIdx)); 3032} 3033 3034// Utility for visitShuffleVector - Return true if every element in Mask, 3035// beginning from position Pos and ending in Pos+Size, falls within the 3036// specified sequential range [L, L+Pos). or is undef. 3037static bool isSequentialInRange(const SmallVectorImpl<int> &Mask, 3038 unsigned Pos, unsigned Size, int Low) { 3039 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low) 3040 if (Mask[i] >= 0 && Mask[i] != Low) 3041 return false; 3042 return true; 3043} 3044 3045void SelectionDAGBuilder::visitShuffleVector(const User &I) { 3046 SDValue Src1 = getValue(I.getOperand(0)); 3047 SDValue Src2 = getValue(I.getOperand(1)); 3048 3049 SmallVector<int, 8> Mask; 3050 ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask); 3051 unsigned MaskNumElts = Mask.size(); 3052 3053 const TargetLowering *TLI = TM.getTargetLowering(); 3054 EVT VT = TLI->getValueType(I.getType()); 3055 EVT SrcVT = Src1.getValueType(); 3056 unsigned SrcNumElts = SrcVT.getVectorNumElements(); 3057 3058 if (SrcNumElts == MaskNumElts) { 3059 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2, 3060 &Mask[0])); 3061 return; 3062 } 3063 3064 // Normalize the shuffle vector since mask and vector length don't match. 3065 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) { 3066 // Mask is longer than the source vectors and is a multiple of the source 3067 // vectors. We can use concatenate vector to make the mask and vectors 3068 // lengths match. 3069 if (SrcNumElts*2 == MaskNumElts) { 3070 // First check for Src1 in low and Src2 in high 3071 if (isSequentialInRange(Mask, 0, SrcNumElts, 0) && 3072 isSequentialInRange(Mask, SrcNumElts, SrcNumElts, SrcNumElts)) { 3073 // The shuffle is concatenating two vectors together. 3074 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(), 3075 VT, Src1, Src2)); 3076 return; 3077 } 3078 // Then check for Src2 in low and Src1 in high 3079 if (isSequentialInRange(Mask, 0, SrcNumElts, SrcNumElts) && 3080 isSequentialInRange(Mask, SrcNumElts, SrcNumElts, 0)) { 3081 // The shuffle is concatenating two vectors together. 3082 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(), 3083 VT, Src2, Src1)); 3084 return; 3085 } 3086 } 3087 3088 // Pad both vectors with undefs to make them the same length as the mask. 3089 unsigned NumConcat = MaskNumElts / SrcNumElts; 3090 bool Src1U = Src1.getOpcode() == ISD::UNDEF; 3091 bool Src2U = Src2.getOpcode() == ISD::UNDEF; 3092 SDValue UndefVal = DAG.getUNDEF(SrcVT); 3093 3094 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal); 3095 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal); 3096 MOps1[0] = Src1; 3097 MOps2[0] = Src2; 3098 3099 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS, 3100 getCurSDLoc(), VT, 3101 &MOps1[0], NumConcat); 3102 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS, 3103 getCurSDLoc(), VT, 3104 &MOps2[0], NumConcat); 3105 3106 // Readjust mask for new input vector length. 3107 SmallVector<int, 8> MappedOps; 3108 for (unsigned i = 0; i != MaskNumElts; ++i) { 3109 int Idx = Mask[i]; 3110 if (Idx >= (int)SrcNumElts) 3111 Idx -= SrcNumElts - MaskNumElts; 3112 MappedOps.push_back(Idx); 3113 } 3114 3115 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2, 3116 &MappedOps[0])); 3117 return; 3118 } 3119 3120 if (SrcNumElts > MaskNumElts) { 3121 // Analyze the access pattern of the vector to see if we can extract 3122 // two subvectors and do the shuffle. The analysis is done by calculating 3123 // the range of elements the mask access on both vectors. 3124 int MinRange[2] = { static_cast<int>(SrcNumElts), 3125 static_cast<int>(SrcNumElts)}; 3126 int MaxRange[2] = {-1, -1}; 3127 3128 for (unsigned i = 0; i != MaskNumElts; ++i) { 3129 int Idx = Mask[i]; 3130 unsigned Input = 0; 3131 if (Idx < 0) 3132 continue; 3133 3134 if (Idx >= (int)SrcNumElts) { 3135 Input = 1; 3136 Idx -= SrcNumElts; 3137 } 3138 if (Idx > MaxRange[Input]) 3139 MaxRange[Input] = Idx; 3140 if (Idx < MinRange[Input]) 3141 MinRange[Input] = Idx; 3142 } 3143 3144 // Check if the access is smaller than the vector size and can we find 3145 // a reasonable extract index. 3146 int RangeUse[2] = { -1, -1 }; // 0 = Unused, 1 = Extract, -1 = Can not 3147 // Extract. 3148 int StartIdx[2]; // StartIdx to extract from 3149 for (unsigned Input = 0; Input < 2; ++Input) { 3150 if (MinRange[Input] >= (int)SrcNumElts && MaxRange[Input] < 0) { 3151 RangeUse[Input] = 0; // Unused 3152 StartIdx[Input] = 0; 3153 continue; 3154 } 3155 3156 // Find a good start index that is a multiple of the mask length. Then 3157 // see if the rest of the elements are in range. 3158 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts; 3159 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts && 3160 StartIdx[Input] + MaskNumElts <= SrcNumElts) 3161 RangeUse[Input] = 1; // Extract from a multiple of the mask length. 3162 } 3163 3164 if (RangeUse[0] == 0 && RangeUse[1] == 0) { 3165 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used. 3166 return; 3167 } 3168 if (RangeUse[0] >= 0 && RangeUse[1] >= 0) { 3169 // Extract appropriate subvector and generate a vector shuffle 3170 for (unsigned Input = 0; Input < 2; ++Input) { 3171 SDValue &Src = Input == 0 ? Src1 : Src2; 3172 if (RangeUse[Input] == 0) 3173 Src = DAG.getUNDEF(VT); 3174 else 3175 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurSDLoc(), VT, 3176 Src, DAG.getConstant(StartIdx[Input], 3177 TLI->getVectorIdxTy())); 3178 } 3179 3180 // Calculate new mask. 3181 SmallVector<int, 8> MappedOps; 3182 for (unsigned i = 0; i != MaskNumElts; ++i) { 3183 int Idx = Mask[i]; 3184 if (Idx >= 0) { 3185 if (Idx < (int)SrcNumElts) 3186 Idx -= StartIdx[0]; 3187 else 3188 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts; 3189 } 3190 MappedOps.push_back(Idx); 3191 } 3192 3193 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2, 3194 &MappedOps[0])); 3195 return; 3196 } 3197 } 3198 3199 // We can't use either concat vectors or extract subvectors so fall back to 3200 // replacing the shuffle with extract and build vector. 3201 // to insert and build vector. 3202 EVT EltVT = VT.getVectorElementType(); 3203 EVT IdxVT = TLI->getVectorIdxTy(); 3204 SmallVector<SDValue,8> Ops; 3205 for (unsigned i = 0; i != MaskNumElts; ++i) { 3206 int Idx = Mask[i]; 3207 SDValue Res; 3208 3209 if (Idx < 0) { 3210 Res = DAG.getUNDEF(EltVT); 3211 } else { 3212 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2; 3213 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts; 3214 3215 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(), 3216 EltVT, Src, DAG.getConstant(Idx, IdxVT)); 3217 } 3218 3219 Ops.push_back(Res); 3220 } 3221 3222 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), 3223 VT, &Ops[0], Ops.size())); 3224} 3225 3226void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) { 3227 const Value *Op0 = I.getOperand(0); 3228 const Value *Op1 = I.getOperand(1); 3229 Type *AggTy = I.getType(); 3230 Type *ValTy = Op1->getType(); 3231 bool IntoUndef = isa<UndefValue>(Op0); 3232 bool FromUndef = isa<UndefValue>(Op1); 3233 3234 unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices()); 3235 3236 const TargetLowering *TLI = TM.getTargetLowering(); 3237 SmallVector<EVT, 4> AggValueVTs; 3238 ComputeValueVTs(*TLI, AggTy, AggValueVTs); 3239 SmallVector<EVT, 4> ValValueVTs; 3240 ComputeValueVTs(*TLI, ValTy, ValValueVTs); 3241 3242 unsigned NumAggValues = AggValueVTs.size(); 3243 unsigned NumValValues = ValValueVTs.size(); 3244 SmallVector<SDValue, 4> Values(NumAggValues); 3245 3246 SDValue Agg = getValue(Op0); 3247 unsigned i = 0; 3248 // Copy the beginning value(s) from the original aggregate. 3249 for (; i != LinearIndex; ++i) 3250 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : 3251 SDValue(Agg.getNode(), Agg.getResNo() + i); 3252 // Copy values from the inserted value(s). 3253 if (NumValValues) { 3254 SDValue Val = getValue(Op1); 3255 for (; i != LinearIndex + NumValValues; ++i) 3256 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) : 3257 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex); 3258 } 3259 // Copy remaining value(s) from the original aggregate. 3260 for (; i != NumAggValues; ++i) 3261 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : 3262 SDValue(Agg.getNode(), Agg.getResNo() + i); 3263 3264 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 3265 DAG.getVTList(&AggValueVTs[0], NumAggValues), 3266 &Values[0], NumAggValues)); 3267} 3268 3269void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) { 3270 const Value *Op0 = I.getOperand(0); 3271 Type *AggTy = Op0->getType(); 3272 Type *ValTy = I.getType(); 3273 bool OutOfUndef = isa<UndefValue>(Op0); 3274 3275 unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices()); 3276 3277 const TargetLowering *TLI = TM.getTargetLowering(); 3278 SmallVector<EVT, 4> ValValueVTs; 3279 ComputeValueVTs(*TLI, ValTy, ValValueVTs); 3280 3281 unsigned NumValValues = ValValueVTs.size(); 3282 3283 // Ignore a extractvalue that produces an empty object 3284 if (!NumValValues) { 3285 setValue(&I, DAG.getUNDEF(MVT(MVT::Other))); 3286 return; 3287 } 3288 3289 SmallVector<SDValue, 4> Values(NumValValues); 3290 3291 SDValue Agg = getValue(Op0); 3292 // Copy out the selected value(s). 3293 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i) 3294 Values[i - LinearIndex] = 3295 OutOfUndef ? 3296 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) : 3297 SDValue(Agg.getNode(), Agg.getResNo() + i); 3298 3299 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 3300 DAG.getVTList(&ValValueVTs[0], NumValValues), 3301 &Values[0], NumValValues)); 3302} 3303 3304void SelectionDAGBuilder::visitGetElementPtr(const User &I) { 3305 Value *Op0 = I.getOperand(0); 3306 // Note that the pointer operand may be a vector of pointers. Take the scalar 3307 // element which holds a pointer. 3308 Type *Ty = Op0->getType()->getScalarType(); 3309 unsigned AS = Ty->getPointerAddressSpace(); 3310 SDValue N = getValue(Op0); 3311 3312 for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end(); 3313 OI != E; ++OI) { 3314 const Value *Idx = *OI; 3315 if (StructType *StTy = dyn_cast<StructType>(Ty)) { 3316 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 3317 if (Field) { 3318 // N = N + Offset 3319 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field); 3320 N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N, 3321 DAG.getConstant(Offset, N.getValueType())); 3322 } 3323 3324 Ty = StTy->getElementType(Field); 3325 } else { 3326 Ty = cast<SequentialType>(Ty)->getElementType(); 3327 3328 // If this is a constant subscript, handle it quickly. 3329 const TargetLowering *TLI = TM.getTargetLowering(); 3330 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { 3331 if (CI->isZero()) continue; 3332 uint64_t Offs = 3333 DL->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); 3334 SDValue OffsVal; 3335 EVT PTy = TLI->getPointerTy(AS); 3336 unsigned PtrBits = PTy.getSizeInBits(); 3337 if (PtrBits < 64) 3338 OffsVal = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), PTy, 3339 DAG.getConstant(Offs, MVT::i64)); 3340 else 3341 OffsVal = DAG.getConstant(Offs, PTy); 3342 3343 N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N, 3344 OffsVal); 3345 continue; 3346 } 3347 3348 // N = N + Idx * ElementSize; 3349 APInt ElementSize = APInt(TLI->getPointerSizeInBits(AS), 3350 DL->getTypeAllocSize(Ty)); 3351 SDValue IdxN = getValue(Idx); 3352 3353 // If the index is smaller or larger than intptr_t, truncate or extend 3354 // it. 3355 IdxN = DAG.getSExtOrTrunc(IdxN, getCurSDLoc(), N.getValueType()); 3356 3357 // If this is a multiply by a power of two, turn it into a shl 3358 // immediately. This is a very common case. 3359 if (ElementSize != 1) { 3360 if (ElementSize.isPowerOf2()) { 3361 unsigned Amt = ElementSize.logBase2(); 3362 IdxN = DAG.getNode(ISD::SHL, getCurSDLoc(), 3363 N.getValueType(), IdxN, 3364 DAG.getConstant(Amt, IdxN.getValueType())); 3365 } else { 3366 SDValue Scale = DAG.getConstant(ElementSize, IdxN.getValueType()); 3367 IdxN = DAG.getNode(ISD::MUL, getCurSDLoc(), 3368 N.getValueType(), IdxN, Scale); 3369 } 3370 } 3371 3372 N = DAG.getNode(ISD::ADD, getCurSDLoc(), 3373 N.getValueType(), N, IdxN); 3374 } 3375 } 3376 3377 setValue(&I, N); 3378} 3379 3380void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) { 3381 // If this is a fixed sized alloca in the entry block of the function, 3382 // allocate it statically on the stack. 3383 if (FuncInfo.StaticAllocaMap.count(&I)) 3384 return; // getValue will auto-populate this. 3385 3386 Type *Ty = I.getAllocatedType(); 3387 const TargetLowering *TLI = TM.getTargetLowering(); 3388 uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty); 3389 unsigned Align = 3390 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), 3391 I.getAlignment()); 3392 3393 SDValue AllocSize = getValue(I.getArraySize()); 3394 3395 EVT IntPtr = TLI->getPointerTy(); 3396 if (AllocSize.getValueType() != IntPtr) 3397 AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurSDLoc(), IntPtr); 3398 3399 AllocSize = DAG.getNode(ISD::MUL, getCurSDLoc(), IntPtr, 3400 AllocSize, 3401 DAG.getConstant(TySize, IntPtr)); 3402 3403 // Handle alignment. If the requested alignment is less than or equal to 3404 // the stack alignment, ignore it. If the size is greater than or equal to 3405 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node. 3406 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); 3407 if (Align <= StackAlign) 3408 Align = 0; 3409 3410 // Round the size of the allocation up to the stack alignment size 3411 // by add SA-1 to the size. 3412 AllocSize = DAG.getNode(ISD::ADD, getCurSDLoc(), 3413 AllocSize.getValueType(), AllocSize, 3414 DAG.getIntPtrConstant(StackAlign-1)); 3415 3416 // Mask out the low bits for alignment purposes. 3417 AllocSize = DAG.getNode(ISD::AND, getCurSDLoc(), 3418 AllocSize.getValueType(), AllocSize, 3419 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1))); 3420 3421 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) }; 3422 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other); 3423 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurSDLoc(), 3424 VTs, Ops, 3); 3425 setValue(&I, DSA); 3426 DAG.setRoot(DSA.getValue(1)); 3427 3428 assert(FuncInfo.MF->getFrameInfo()->hasVarSizedObjects()); 3429} 3430 3431void SelectionDAGBuilder::visitLoad(const LoadInst &I) { 3432 if (I.isAtomic()) 3433 return visitAtomicLoad(I); 3434 3435 const Value *SV = I.getOperand(0); 3436 SDValue Ptr = getValue(SV); 3437 3438 Type *Ty = I.getType(); 3439 3440 bool isVolatile = I.isVolatile(); 3441 bool isNonTemporal = I.getMetadata("nontemporal") != 0; 3442 bool isInvariant = I.getMetadata("invariant.load") != 0; 3443 unsigned Alignment = I.getAlignment(); 3444 const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa); 3445 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); 3446 3447 SmallVector<EVT, 4> ValueVTs; 3448 SmallVector<uint64_t, 4> Offsets; 3449 ComputeValueVTs(*TM.getTargetLowering(), Ty, ValueVTs, &Offsets); 3450 unsigned NumValues = ValueVTs.size(); 3451 if (NumValues == 0) 3452 return; 3453 3454 SDValue Root; 3455 bool ConstantMemory = false; 3456 if (isVolatile || NumValues > MaxParallelChains) 3457 // Serialize volatile loads with other side effects. 3458 Root = getRoot(); 3459 else if (AA->pointsToConstantMemory( 3460 AliasAnalysis::Location(SV, AA->getTypeStoreSize(Ty), TBAAInfo))) { 3461 // Do not serialize (non-volatile) loads of constant memory with anything. 3462 Root = DAG.getEntryNode(); 3463 ConstantMemory = true; 3464 } else { 3465 // Do not serialize non-volatile loads against each other. 3466 Root = DAG.getRoot(); 3467 } 3468 3469 const TargetLowering *TLI = TM.getTargetLowering(); 3470 if (isVolatile) 3471 Root = TLI->prepareVolatileOrAtomicLoad(Root, getCurSDLoc(), DAG); 3472 3473 SmallVector<SDValue, 4> Values(NumValues); 3474 SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains), 3475 NumValues)); 3476 EVT PtrVT = Ptr.getValueType(); 3477 unsigned ChainI = 0; 3478 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { 3479 // Serializing loads here may result in excessive register pressure, and 3480 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling 3481 // could recover a bit by hoisting nodes upward in the chain by recognizing 3482 // they are side-effect free or do not alias. The optimizer should really 3483 // avoid this case by converting large object/array copies to llvm.memcpy 3484 // (MaxParallelChains should always remain as failsafe). 3485 if (ChainI == MaxParallelChains) { 3486 assert(PendingLoads.empty() && "PendingLoads must be serialized first"); 3487 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), 3488 MVT::Other, &Chains[0], ChainI); 3489 Root = Chain; 3490 ChainI = 0; 3491 } 3492 SDValue A = DAG.getNode(ISD::ADD, getCurSDLoc(), 3493 PtrVT, Ptr, 3494 DAG.getConstant(Offsets[i], PtrVT)); 3495 SDValue L = DAG.getLoad(ValueVTs[i], getCurSDLoc(), Root, 3496 A, MachinePointerInfo(SV, Offsets[i]), isVolatile, 3497 isNonTemporal, isInvariant, Alignment, TBAAInfo, 3498 Ranges); 3499 3500 Values[i] = L; 3501 Chains[ChainI] = L.getValue(1); 3502 } 3503 3504 if (!ConstantMemory) { 3505 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), 3506 MVT::Other, &Chains[0], ChainI); 3507 if (isVolatile) 3508 DAG.setRoot(Chain); 3509 else 3510 PendingLoads.push_back(Chain); 3511 } 3512 3513 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 3514 DAG.getVTList(&ValueVTs[0], NumValues), 3515 &Values[0], NumValues)); 3516} 3517 3518void SelectionDAGBuilder::visitStore(const StoreInst &I) { 3519 if (I.isAtomic()) 3520 return visitAtomicStore(I); 3521 3522 const Value *SrcV = I.getOperand(0); 3523 const Value *PtrV = I.getOperand(1); 3524 3525 SmallVector<EVT, 4> ValueVTs; 3526 SmallVector<uint64_t, 4> Offsets; 3527 ComputeValueVTs(*TM.getTargetLowering(), SrcV->getType(), ValueVTs, &Offsets); 3528 unsigned NumValues = ValueVTs.size(); 3529 if (NumValues == 0) 3530 return; 3531 3532 // Get the lowered operands. Note that we do this after 3533 // checking if NumResults is zero, because with zero results 3534 // the operands won't have values in the map. 3535 SDValue Src = getValue(SrcV); 3536 SDValue Ptr = getValue(PtrV); 3537 3538 SDValue Root = getRoot(); 3539 SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains), 3540 NumValues)); 3541 EVT PtrVT = Ptr.getValueType(); 3542 bool isVolatile = I.isVolatile(); 3543 bool isNonTemporal = I.getMetadata("nontemporal") != 0; 3544 unsigned Alignment = I.getAlignment(); 3545 const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa); 3546 3547 unsigned ChainI = 0; 3548 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { 3549 // See visitLoad comments. 3550 if (ChainI == MaxParallelChains) { 3551 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), 3552 MVT::Other, &Chains[0], ChainI); 3553 Root = Chain; 3554 ChainI = 0; 3555 } 3556 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(), PtrVT, Ptr, 3557 DAG.getConstant(Offsets[i], PtrVT)); 3558 SDValue St = DAG.getStore(Root, getCurSDLoc(), 3559 SDValue(Src.getNode(), Src.getResNo() + i), 3560 Add, MachinePointerInfo(PtrV, Offsets[i]), 3561 isVolatile, isNonTemporal, Alignment, TBAAInfo); 3562 Chains[ChainI] = St; 3563 } 3564 3565 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), 3566 MVT::Other, &Chains[0], ChainI); 3567 DAG.setRoot(StoreNode); 3568} 3569 3570static SDValue InsertFenceForAtomic(SDValue Chain, AtomicOrdering Order, 3571 SynchronizationScope Scope, 3572 bool Before, SDLoc dl, 3573 SelectionDAG &DAG, 3574 const TargetLowering &TLI) { 3575 // Fence, if necessary 3576 if (Before) { 3577 if (Order == AcquireRelease || Order == SequentiallyConsistent) 3578 Order = Release; 3579 else if (Order == Acquire || Order == Monotonic) 3580 return Chain; 3581 } else { 3582 if (Order == AcquireRelease) 3583 Order = Acquire; 3584 else if (Order == Release || Order == Monotonic) 3585 return Chain; 3586 } 3587 SDValue Ops[3]; 3588 Ops[0] = Chain; 3589 Ops[1] = DAG.getConstant(Order, TLI.getPointerTy()); 3590 Ops[2] = DAG.getConstant(Scope, TLI.getPointerTy()); 3591 return DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3); 3592} 3593 3594void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) { 3595 SDLoc dl = getCurSDLoc(); 3596 AtomicOrdering SuccessOrder = I.getSuccessOrdering(); 3597 AtomicOrdering FailureOrder = I.getFailureOrdering(); 3598 SynchronizationScope Scope = I.getSynchScope(); 3599 3600 SDValue InChain = getRoot(); 3601 3602 const TargetLowering *TLI = TM.getTargetLowering(); 3603 if (TLI->getInsertFencesForAtomic()) 3604 InChain = InsertFenceForAtomic(InChain, SuccessOrder, Scope, true, dl, 3605 DAG, *TLI); 3606 3607 SDValue L = 3608 DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl, 3609 getValue(I.getCompareOperand()).getSimpleValueType(), 3610 InChain, 3611 getValue(I.getPointerOperand()), 3612 getValue(I.getCompareOperand()), 3613 getValue(I.getNewValOperand()), 3614 MachinePointerInfo(I.getPointerOperand()), 0 /* Alignment */, 3615 TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder, 3616 TLI->getInsertFencesForAtomic() ? Monotonic : FailureOrder, 3617 Scope); 3618 3619 SDValue OutChain = L.getValue(1); 3620 3621 if (TLI->getInsertFencesForAtomic()) 3622 OutChain = InsertFenceForAtomic(OutChain, SuccessOrder, Scope, false, dl, 3623 DAG, *TLI); 3624 3625 setValue(&I, L); 3626 DAG.setRoot(OutChain); 3627} 3628 3629void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) { 3630 SDLoc dl = getCurSDLoc(); 3631 ISD::NodeType NT; 3632 switch (I.getOperation()) { 3633 default: llvm_unreachable("Unknown atomicrmw operation"); 3634 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break; 3635 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break; 3636 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break; 3637 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break; 3638 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break; 3639 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break; 3640 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break; 3641 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break; 3642 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break; 3643 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break; 3644 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break; 3645 } 3646 AtomicOrdering Order = I.getOrdering(); 3647 SynchronizationScope Scope = I.getSynchScope(); 3648 3649 SDValue InChain = getRoot(); 3650 3651 const TargetLowering *TLI = TM.getTargetLowering(); 3652 if (TLI->getInsertFencesForAtomic()) 3653 InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl, 3654 DAG, *TLI); 3655 3656 SDValue L = 3657 DAG.getAtomic(NT, dl, 3658 getValue(I.getValOperand()).getSimpleValueType(), 3659 InChain, 3660 getValue(I.getPointerOperand()), 3661 getValue(I.getValOperand()), 3662 I.getPointerOperand(), 0 /* Alignment */, 3663 TLI->getInsertFencesForAtomic() ? Monotonic : Order, 3664 Scope); 3665 3666 SDValue OutChain = L.getValue(1); 3667 3668 if (TLI->getInsertFencesForAtomic()) 3669 OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl, 3670 DAG, *TLI); 3671 3672 setValue(&I, L); 3673 DAG.setRoot(OutChain); 3674} 3675 3676void SelectionDAGBuilder::visitFence(const FenceInst &I) { 3677 SDLoc dl = getCurSDLoc(); 3678 const TargetLowering *TLI = TM.getTargetLowering(); 3679 SDValue Ops[3]; 3680 Ops[0] = getRoot(); 3681 Ops[1] = DAG.getConstant(I.getOrdering(), TLI->getPointerTy()); 3682 Ops[2] = DAG.getConstant(I.getSynchScope(), TLI->getPointerTy()); 3683 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3)); 3684} 3685 3686void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) { 3687 SDLoc dl = getCurSDLoc(); 3688 AtomicOrdering Order = I.getOrdering(); 3689 SynchronizationScope Scope = I.getSynchScope(); 3690 3691 SDValue InChain = getRoot(); 3692 3693 const TargetLowering *TLI = TM.getTargetLowering(); 3694 EVT VT = TLI->getValueType(I.getType()); 3695 3696 if (I.getAlignment() < VT.getSizeInBits() / 8) 3697 report_fatal_error("Cannot generate unaligned atomic load"); 3698 3699 InChain = TLI->prepareVolatileOrAtomicLoad(InChain, dl, DAG); 3700 SDValue L = 3701 DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain, 3702 getValue(I.getPointerOperand()), 3703 I.getPointerOperand(), I.getAlignment(), 3704 TLI->getInsertFencesForAtomic() ? Monotonic : Order, 3705 Scope); 3706 3707 SDValue OutChain = L.getValue(1); 3708 3709 if (TLI->getInsertFencesForAtomic()) 3710 OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl, 3711 DAG, *TLI); 3712 3713 setValue(&I, L); 3714 DAG.setRoot(OutChain); 3715} 3716 3717void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) { 3718 SDLoc dl = getCurSDLoc(); 3719 3720 AtomicOrdering Order = I.getOrdering(); 3721 SynchronizationScope Scope = I.getSynchScope(); 3722 3723 SDValue InChain = getRoot(); 3724 3725 const TargetLowering *TLI = TM.getTargetLowering(); 3726 EVT VT = TLI->getValueType(I.getValueOperand()->getType()); 3727 3728 if (I.getAlignment() < VT.getSizeInBits() / 8) 3729 report_fatal_error("Cannot generate unaligned atomic store"); 3730 3731 if (TLI->getInsertFencesForAtomic()) 3732 InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl, 3733 DAG, *TLI); 3734 3735 SDValue OutChain = 3736 DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT, 3737 InChain, 3738 getValue(I.getPointerOperand()), 3739 getValue(I.getValueOperand()), 3740 I.getPointerOperand(), I.getAlignment(), 3741 TLI->getInsertFencesForAtomic() ? Monotonic : Order, 3742 Scope); 3743 3744 if (TLI->getInsertFencesForAtomic()) 3745 OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl, 3746 DAG, *TLI); 3747 3748 DAG.setRoot(OutChain); 3749} 3750 3751/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC 3752/// node. 3753void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I, 3754 unsigned Intrinsic) { 3755 bool HasChain = !I.doesNotAccessMemory(); 3756 bool OnlyLoad = HasChain && I.onlyReadsMemory(); 3757 3758 // Build the operand list. 3759 SmallVector<SDValue, 8> Ops; 3760 if (HasChain) { // If this intrinsic has side-effects, chainify it. 3761 if (OnlyLoad) { 3762 // We don't need to serialize loads against other loads. 3763 Ops.push_back(DAG.getRoot()); 3764 } else { 3765 Ops.push_back(getRoot()); 3766 } 3767 } 3768 3769 // Info is set by getTgtMemInstrinsic 3770 TargetLowering::IntrinsicInfo Info; 3771 const TargetLowering *TLI = TM.getTargetLowering(); 3772 bool IsTgtIntrinsic = TLI->getTgtMemIntrinsic(Info, I, Intrinsic); 3773 3774 // Add the intrinsic ID as an integer operand if it's not a target intrinsic. 3775 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID || 3776 Info.opc == ISD::INTRINSIC_W_CHAIN) 3777 Ops.push_back(DAG.getTargetConstant(Intrinsic, TLI->getPointerTy())); 3778 3779 // Add all operands of the call to the operand list. 3780 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) { 3781 SDValue Op = getValue(I.getArgOperand(i)); 3782 Ops.push_back(Op); 3783 } 3784 3785 SmallVector<EVT, 4> ValueVTs; 3786 ComputeValueVTs(*TLI, I.getType(), ValueVTs); 3787 3788 if (HasChain) 3789 ValueVTs.push_back(MVT::Other); 3790 3791 SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size()); 3792 3793 // Create the node. 3794 SDValue Result; 3795 if (IsTgtIntrinsic) { 3796 // This is target intrinsic that touches memory 3797 Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), 3798 VTs, &Ops[0], Ops.size(), 3799 Info.memVT, 3800 MachinePointerInfo(Info.ptrVal, Info.offset), 3801 Info.align, Info.vol, 3802 Info.readMem, Info.writeMem); 3803 } else if (!HasChain) { 3804 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), 3805 VTs, &Ops[0], Ops.size()); 3806 } else if (!I.getType()->isVoidTy()) { 3807 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), 3808 VTs, &Ops[0], Ops.size()); 3809 } else { 3810 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), 3811 VTs, &Ops[0], Ops.size()); 3812 } 3813 3814 if (HasChain) { 3815 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1); 3816 if (OnlyLoad) 3817 PendingLoads.push_back(Chain); 3818 else 3819 DAG.setRoot(Chain); 3820 } 3821 3822 if (!I.getType()->isVoidTy()) { 3823 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) { 3824 EVT VT = TLI->getValueType(PTy); 3825 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result); 3826 } 3827 3828 setValue(&I, Result); 3829 } 3830} 3831 3832/// GetSignificand - Get the significand and build it into a floating-point 3833/// number with exponent of 1: 3834/// 3835/// Op = (Op & 0x007fffff) | 0x3f800000; 3836/// 3837/// where Op is the hexadecimal representation of floating point value. 3838static SDValue 3839GetSignificand(SelectionDAG &DAG, SDValue Op, SDLoc dl) { 3840 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, 3841 DAG.getConstant(0x007fffff, MVT::i32)); 3842 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1, 3843 DAG.getConstant(0x3f800000, MVT::i32)); 3844 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2); 3845} 3846 3847/// GetExponent - Get the exponent: 3848/// 3849/// (float)(int)(((Op & 0x7f800000) >> 23) - 127); 3850/// 3851/// where Op is the hexadecimal representation of floating point value. 3852static SDValue 3853GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, 3854 SDLoc dl) { 3855 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, 3856 DAG.getConstant(0x7f800000, MVT::i32)); 3857 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0, 3858 DAG.getConstant(23, TLI.getPointerTy())); 3859 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1, 3860 DAG.getConstant(127, MVT::i32)); 3861 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2); 3862} 3863 3864/// getF32Constant - Get 32-bit floating point constant. 3865static SDValue 3866getF32Constant(SelectionDAG &DAG, unsigned Flt) { 3867 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle, APInt(32, Flt)), 3868 MVT::f32); 3869} 3870 3871/// expandExp - Lower an exp intrinsic. Handles the special sequences for 3872/// limited-precision mode. 3873static SDValue expandExp(SDLoc dl, SDValue Op, SelectionDAG &DAG, 3874 const TargetLowering &TLI) { 3875 if (Op.getValueType() == MVT::f32 && 3876 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 3877 3878 // Put the exponent in the right bit position for later addition to the 3879 // final result: 3880 // 3881 // #define LOG2OFe 1.4426950f 3882 // IntegerPartOfX = ((int32_t)(X * LOG2OFe)); 3883 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op, 3884 getF32Constant(DAG, 0x3fb8aa3b)); 3885 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0); 3886 3887 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX; 3888 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX); 3889 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1); 3890 3891 // IntegerPartOfX <<= 23; 3892 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX, 3893 DAG.getConstant(23, TLI.getPointerTy())); 3894 3895 SDValue TwoToFracPartOfX; 3896 if (LimitFloatPrecision <= 6) { 3897 // For floating-point precision of 6: 3898 // 3899 // TwoToFractionalPartOfX = 3900 // 0.997535578f + 3901 // (0.735607626f + 0.252464424f * x) * x; 3902 // 3903 // error 0.0144103317, which is 6 bits 3904 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3905 getF32Constant(DAG, 0x3e814304)); 3906 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 3907 getF32Constant(DAG, 0x3f3c50c8)); 3908 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 3909 TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 3910 getF32Constant(DAG, 0x3f7f5e7e)); 3911 } else if (LimitFloatPrecision <= 12) { 3912 // For floating-point precision of 12: 3913 // 3914 // TwoToFractionalPartOfX = 3915 // 0.999892986f + 3916 // (0.696457318f + 3917 // (0.224338339f + 0.792043434e-1f * x) * x) * x; 3918 // 3919 // 0.000107046256 error, which is 13 to 14 bits 3920 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3921 getF32Constant(DAG, 0x3da235e3)); 3922 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 3923 getF32Constant(DAG, 0x3e65b8f3)); 3924 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 3925 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 3926 getF32Constant(DAG, 0x3f324b07)); 3927 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 3928 TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 3929 getF32Constant(DAG, 0x3f7ff8fd)); 3930 } else { // LimitFloatPrecision <= 18 3931 // For floating-point precision of 18: 3932 // 3933 // TwoToFractionalPartOfX = 3934 // 0.999999982f + 3935 // (0.693148872f + 3936 // (0.240227044f + 3937 // (0.554906021e-1f + 3938 // (0.961591928e-2f + 3939 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x; 3940 // 3941 // error 2.47208000*10^(-7), which is better than 18 bits 3942 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3943 getF32Constant(DAG, 0x3924b03e)); 3944 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 3945 getF32Constant(DAG, 0x3ab24b87)); 3946 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 3947 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 3948 getF32Constant(DAG, 0x3c1d8c17)); 3949 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 3950 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 3951 getF32Constant(DAG, 0x3d634a1d)); 3952 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 3953 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 3954 getF32Constant(DAG, 0x3e75fe14)); 3955 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 3956 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10, 3957 getF32Constant(DAG, 0x3f317234)); 3958 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X); 3959 TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12, 3960 getF32Constant(DAG, 0x3f800000)); 3961 } 3962 3963 // Add the exponent into the result in integer domain. 3964 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFracPartOfX); 3965 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3966 DAG.getNode(ISD::ADD, dl, MVT::i32, 3967 t13, IntegerPartOfX)); 3968 } 3969 3970 // No special expansion. 3971 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op); 3972} 3973 3974/// expandLog - Lower a log intrinsic. Handles the special sequences for 3975/// limited-precision mode. 3976static SDValue expandLog(SDLoc dl, SDValue Op, SelectionDAG &DAG, 3977 const TargetLowering &TLI) { 3978 if (Op.getValueType() == MVT::f32 && 3979 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 3980 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3981 3982 // Scale the exponent by log(2) [0.69314718f]. 3983 SDValue Exp = GetExponent(DAG, Op1, TLI, dl); 3984 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, 3985 getF32Constant(DAG, 0x3f317218)); 3986 3987 // Get the significand and build it into a floating-point number with 3988 // exponent of 1. 3989 SDValue X = GetSignificand(DAG, Op1, dl); 3990 3991 SDValue LogOfMantissa; 3992 if (LimitFloatPrecision <= 6) { 3993 // For floating-point precision of 6: 3994 // 3995 // LogofMantissa = 3996 // -1.1609546f + 3997 // (1.4034025f - 0.23903021f * x) * x; 3998 // 3999 // error 0.0034276066, which is better than 8 bits 4000 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4001 getF32Constant(DAG, 0xbe74c456)); 4002 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4003 getF32Constant(DAG, 0x3fb3a2b1)); 4004 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4005 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4006 getF32Constant(DAG, 0x3f949a29)); 4007 } else if (LimitFloatPrecision <= 12) { 4008 // For floating-point precision of 12: 4009 // 4010 // LogOfMantissa = 4011 // -1.7417939f + 4012 // (2.8212026f + 4013 // (-1.4699568f + 4014 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x; 4015 // 4016 // error 0.000061011436, which is 14 bits 4017 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4018 getF32Constant(DAG, 0xbd67b6d6)); 4019 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4020 getF32Constant(DAG, 0x3ee4f4b8)); 4021 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4022 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4023 getF32Constant(DAG, 0x3fbc278b)); 4024 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4025 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4026 getF32Constant(DAG, 0x40348e95)); 4027 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4028 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 4029 getF32Constant(DAG, 0x3fdef31a)); 4030 } else { // LimitFloatPrecision <= 18 4031 // For floating-point precision of 18: 4032 // 4033 // LogOfMantissa = 4034 // -2.1072184f + 4035 // (4.2372794f + 4036 // (-3.7029485f + 4037 // (2.2781945f + 4038 // (-0.87823314f + 4039 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x; 4040 // 4041 // error 0.0000023660568, which is better than 18 bits 4042 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4043 getF32Constant(DAG, 0xbc91e5ac)); 4044 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4045 getF32Constant(DAG, 0x3e4350aa)); 4046 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4047 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4048 getF32Constant(DAG, 0x3f60d3e3)); 4049 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4050 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4051 getF32Constant(DAG, 0x4011cdf0)); 4052 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4053 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 4054 getF32Constant(DAG, 0x406cfd1c)); 4055 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 4056 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 4057 getF32Constant(DAG, 0x408797cb)); 4058 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 4059 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10, 4060 getF32Constant(DAG, 0x4006dcab)); 4061 } 4062 4063 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa); 4064 } 4065 4066 // No special expansion. 4067 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op); 4068} 4069 4070/// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for 4071/// limited-precision mode. 4072static SDValue expandLog2(SDLoc dl, SDValue Op, SelectionDAG &DAG, 4073 const TargetLowering &TLI) { 4074 if (Op.getValueType() == MVT::f32 && 4075 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 4076 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 4077 4078 // Get the exponent. 4079 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl); 4080 4081 // Get the significand and build it into a floating-point number with 4082 // exponent of 1. 4083 SDValue X = GetSignificand(DAG, Op1, dl); 4084 4085 // Different possible minimax approximations of significand in 4086 // floating-point for various degrees of accuracy over [1,2]. 4087 SDValue Log2ofMantissa; 4088 if (LimitFloatPrecision <= 6) { 4089 // For floating-point precision of 6: 4090 // 4091 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x; 4092 // 4093 // error 0.0049451742, which is more than 7 bits 4094 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4095 getF32Constant(DAG, 0xbeb08fe0)); 4096 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4097 getF32Constant(DAG, 0x40019463)); 4098 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4099 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4100 getF32Constant(DAG, 0x3fd6633d)); 4101 } else if (LimitFloatPrecision <= 12) { 4102 // For floating-point precision of 12: 4103 // 4104 // Log2ofMantissa = 4105 // -2.51285454f + 4106 // (4.07009056f + 4107 // (-2.12067489f + 4108 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x; 4109 // 4110 // error 0.0000876136000, which is better than 13 bits 4111 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4112 getF32Constant(DAG, 0xbda7262e)); 4113 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4114 getF32Constant(DAG, 0x3f25280b)); 4115 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4116 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4117 getF32Constant(DAG, 0x4007b923)); 4118 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4119 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4120 getF32Constant(DAG, 0x40823e2f)); 4121 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4122 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 4123 getF32Constant(DAG, 0x4020d29c)); 4124 } else { // LimitFloatPrecision <= 18 4125 // For floating-point precision of 18: 4126 // 4127 // Log2ofMantissa = 4128 // -3.0400495f + 4129 // (6.1129976f + 4130 // (-5.3420409f + 4131 // (3.2865683f + 4132 // (-1.2669343f + 4133 // (0.27515199f - 4134 // 0.25691327e-1f * x) * x) * x) * x) * x) * x; 4135 // 4136 // error 0.0000018516, which is better than 18 bits 4137 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4138 getF32Constant(DAG, 0xbcd2769e)); 4139 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4140 getF32Constant(DAG, 0x3e8ce0b9)); 4141 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4142 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4143 getF32Constant(DAG, 0x3fa22ae7)); 4144 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4145 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4146 getF32Constant(DAG, 0x40525723)); 4147 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4148 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 4149 getF32Constant(DAG, 0x40aaf200)); 4150 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 4151 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 4152 getF32Constant(DAG, 0x40c39dad)); 4153 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 4154 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10, 4155 getF32Constant(DAG, 0x4042902c)); 4156 } 4157 4158 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa); 4159 } 4160 4161 // No special expansion. 4162 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op); 4163} 4164 4165/// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for 4166/// limited-precision mode. 4167static SDValue expandLog10(SDLoc dl, SDValue Op, SelectionDAG &DAG, 4168 const TargetLowering &TLI) { 4169 if (Op.getValueType() == MVT::f32 && 4170 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 4171 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 4172 4173 // Scale the exponent by log10(2) [0.30102999f]. 4174 SDValue Exp = GetExponent(DAG, Op1, TLI, dl); 4175 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, 4176 getF32Constant(DAG, 0x3e9a209a)); 4177 4178 // Get the significand and build it into a floating-point number with 4179 // exponent of 1. 4180 SDValue X = GetSignificand(DAG, Op1, dl); 4181 4182 SDValue Log10ofMantissa; 4183 if (LimitFloatPrecision <= 6) { 4184 // For floating-point precision of 6: 4185 // 4186 // Log10ofMantissa = 4187 // -0.50419619f + 4188 // (0.60948995f - 0.10380950f * x) * x; 4189 // 4190 // error 0.0014886165, which is 6 bits 4191 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4192 getF32Constant(DAG, 0xbdd49a13)); 4193 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4194 getF32Constant(DAG, 0x3f1c0789)); 4195 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4196 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4197 getF32Constant(DAG, 0x3f011300)); 4198 } else if (LimitFloatPrecision <= 12) { 4199 // For floating-point precision of 12: 4200 // 4201 // Log10ofMantissa = 4202 // -0.64831180f + 4203 // (0.91751397f + 4204 // (-0.31664806f + 0.47637168e-1f * x) * x) * x; 4205 // 4206 // error 0.00019228036, which is better than 12 bits 4207 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4208 getF32Constant(DAG, 0x3d431f31)); 4209 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, 4210 getF32Constant(DAG, 0x3ea21fb2)); 4211 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4212 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4213 getF32Constant(DAG, 0x3f6ae232)); 4214 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4215 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4, 4216 getF32Constant(DAG, 0x3f25f7c3)); 4217 } else { // LimitFloatPrecision <= 18 4218 // For floating-point precision of 18: 4219 // 4220 // Log10ofMantissa = 4221 // -0.84299375f + 4222 // (1.5327582f + 4223 // (-1.0688956f + 4224 // (0.49102474f + 4225 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x; 4226 // 4227 // error 0.0000037995730, which is better than 18 bits 4228 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4229 getF32Constant(DAG, 0x3c5d51ce)); 4230 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, 4231 getF32Constant(DAG, 0x3e00685a)); 4232 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4233 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4234 getF32Constant(DAG, 0x3efb6798)); 4235 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4236 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4, 4237 getF32Constant(DAG, 0x3f88d192)); 4238 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4239 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 4240 getF32Constant(DAG, 0x3fc4316c)); 4241 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 4242 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8, 4243 getF32Constant(DAG, 0x3f57ce70)); 4244 } 4245 4246 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa); 4247 } 4248 4249 // No special expansion. 4250 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op); 4251} 4252 4253/// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for 4254/// limited-precision mode. 4255static SDValue expandExp2(SDLoc dl, SDValue Op, SelectionDAG &DAG, 4256 const TargetLowering &TLI) { 4257 if (Op.getValueType() == MVT::f32 && 4258 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 4259 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op); 4260 4261 // FractionalPartOfX = x - (float)IntegerPartOfX; 4262 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX); 4263 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1); 4264 4265 // IntegerPartOfX <<= 23; 4266 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX, 4267 DAG.getConstant(23, TLI.getPointerTy())); 4268 4269 SDValue TwoToFractionalPartOfX; 4270 if (LimitFloatPrecision <= 6) { 4271 // For floating-point precision of 6: 4272 // 4273 // TwoToFractionalPartOfX = 4274 // 0.997535578f + 4275 // (0.735607626f + 0.252464424f * x) * x; 4276 // 4277 // error 0.0144103317, which is 6 bits 4278 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4279 getF32Constant(DAG, 0x3e814304)); 4280 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4281 getF32Constant(DAG, 0x3f3c50c8)); 4282 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4283 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4284 getF32Constant(DAG, 0x3f7f5e7e)); 4285 } else if (LimitFloatPrecision <= 12) { 4286 // For floating-point precision of 12: 4287 // 4288 // TwoToFractionalPartOfX = 4289 // 0.999892986f + 4290 // (0.696457318f + 4291 // (0.224338339f + 0.792043434e-1f * x) * x) * x; 4292 // 4293 // error 0.000107046256, which is 13 to 14 bits 4294 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4295 getF32Constant(DAG, 0x3da235e3)); 4296 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4297 getF32Constant(DAG, 0x3e65b8f3)); 4298 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4299 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4300 getF32Constant(DAG, 0x3f324b07)); 4301 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4302 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 4303 getF32Constant(DAG, 0x3f7ff8fd)); 4304 } else { // LimitFloatPrecision <= 18 4305 // For floating-point precision of 18: 4306 // 4307 // TwoToFractionalPartOfX = 4308 // 0.999999982f + 4309 // (0.693148872f + 4310 // (0.240227044f + 4311 // (0.554906021e-1f + 4312 // (0.961591928e-2f + 4313 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x; 4314 // error 2.47208000*10^(-7), which is better than 18 bits 4315 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4316 getF32Constant(DAG, 0x3924b03e)); 4317 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4318 getF32Constant(DAG, 0x3ab24b87)); 4319 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4320 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4321 getF32Constant(DAG, 0x3c1d8c17)); 4322 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4323 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 4324 getF32Constant(DAG, 0x3d634a1d)); 4325 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 4326 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 4327 getF32Constant(DAG, 0x3e75fe14)); 4328 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 4329 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10, 4330 getF32Constant(DAG, 0x3f317234)); 4331 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X); 4332 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12, 4333 getF32Constant(DAG, 0x3f800000)); 4334 } 4335 4336 // Add the exponent into the result in integer domain. 4337 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, 4338 TwoToFractionalPartOfX); 4339 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 4340 DAG.getNode(ISD::ADD, dl, MVT::i32, 4341 t13, IntegerPartOfX)); 4342 } 4343 4344 // No special expansion. 4345 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op); 4346} 4347 4348/// visitPow - Lower a pow intrinsic. Handles the special sequences for 4349/// limited-precision mode with x == 10.0f. 4350static SDValue expandPow(SDLoc dl, SDValue LHS, SDValue RHS, 4351 SelectionDAG &DAG, const TargetLowering &TLI) { 4352 bool IsExp10 = false; 4353 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 && 4354 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 4355 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) { 4356 APFloat Ten(10.0f); 4357 IsExp10 = LHSC->isExactlyValue(Ten); 4358 } 4359 } 4360 4361 if (IsExp10) { 4362 // Put the exponent in the right bit position for later addition to the 4363 // final result: 4364 // 4365 // #define LOG2OF10 3.3219281f 4366 // IntegerPartOfX = (int32_t)(x * LOG2OF10); 4367 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS, 4368 getF32Constant(DAG, 0x40549a78)); 4369 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0); 4370 4371 // FractionalPartOfX = x - (float)IntegerPartOfX; 4372 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX); 4373 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1); 4374 4375 // IntegerPartOfX <<= 23; 4376 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX, 4377 DAG.getConstant(23, TLI.getPointerTy())); 4378 4379 SDValue TwoToFractionalPartOfX; 4380 if (LimitFloatPrecision <= 6) { 4381 // For floating-point precision of 6: 4382 // 4383 // twoToFractionalPartOfX = 4384 // 0.997535578f + 4385 // (0.735607626f + 0.252464424f * x) * x; 4386 // 4387 // error 0.0144103317, which is 6 bits 4388 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4389 getF32Constant(DAG, 0x3e814304)); 4390 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4391 getF32Constant(DAG, 0x3f3c50c8)); 4392 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4393 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4394 getF32Constant(DAG, 0x3f7f5e7e)); 4395 } else if (LimitFloatPrecision <= 12) { 4396 // For floating-point precision of 12: 4397 // 4398 // TwoToFractionalPartOfX = 4399 // 0.999892986f + 4400 // (0.696457318f + 4401 // (0.224338339f + 0.792043434e-1f * x) * x) * x; 4402 // 4403 // error 0.000107046256, which is 13 to 14 bits 4404 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4405 getF32Constant(DAG, 0x3da235e3)); 4406 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4407 getF32Constant(DAG, 0x3e65b8f3)); 4408 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4409 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4410 getF32Constant(DAG, 0x3f324b07)); 4411 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4412 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 4413 getF32Constant(DAG, 0x3f7ff8fd)); 4414 } else { // LimitFloatPrecision <= 18 4415 // For floating-point precision of 18: 4416 // 4417 // TwoToFractionalPartOfX = 4418 // 0.999999982f + 4419 // (0.693148872f + 4420 // (0.240227044f + 4421 // (0.554906021e-1f + 4422 // (0.961591928e-2f + 4423 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x; 4424 // error 2.47208000*10^(-7), which is better than 18 bits 4425 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4426 getF32Constant(DAG, 0x3924b03e)); 4427 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4428 getF32Constant(DAG, 0x3ab24b87)); 4429 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4430 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4431 getF32Constant(DAG, 0x3c1d8c17)); 4432 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4433 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 4434 getF32Constant(DAG, 0x3d634a1d)); 4435 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 4436 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 4437 getF32Constant(DAG, 0x3e75fe14)); 4438 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 4439 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10, 4440 getF32Constant(DAG, 0x3f317234)); 4441 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X); 4442 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12, 4443 getF32Constant(DAG, 0x3f800000)); 4444 } 4445 4446 SDValue t13 = DAG.getNode(ISD::BITCAST, dl,MVT::i32,TwoToFractionalPartOfX); 4447 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 4448 DAG.getNode(ISD::ADD, dl, MVT::i32, 4449 t13, IntegerPartOfX)); 4450 } 4451 4452 // No special expansion. 4453 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS); 4454} 4455 4456 4457/// ExpandPowI - Expand a llvm.powi intrinsic. 4458static SDValue ExpandPowI(SDLoc DL, SDValue LHS, SDValue RHS, 4459 SelectionDAG &DAG) { 4460 // If RHS is a constant, we can expand this out to a multiplication tree, 4461 // otherwise we end up lowering to a call to __powidf2 (for example). When 4462 // optimizing for size, we only want to do this if the expansion would produce 4463 // a small number of multiplies, otherwise we do the full expansion. 4464 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 4465 // Get the exponent as a positive value. 4466 unsigned Val = RHSC->getSExtValue(); 4467 if ((int)Val < 0) Val = -Val; 4468 4469 // powi(x, 0) -> 1.0 4470 if (Val == 0) 4471 return DAG.getConstantFP(1.0, LHS.getValueType()); 4472 4473 const Function *F = DAG.getMachineFunction().getFunction(); 4474 if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 4475 Attribute::OptimizeForSize) || 4476 // If optimizing for size, don't insert too many multiplies. This 4477 // inserts up to 5 multiplies. 4478 CountPopulation_32(Val)+Log2_32(Val) < 7) { 4479 // We use the simple binary decomposition method to generate the multiply 4480 // sequence. There are more optimal ways to do this (for example, 4481 // powi(x,15) generates one more multiply than it should), but this has 4482 // the benefit of being both really simple and much better than a libcall. 4483 SDValue Res; // Logically starts equal to 1.0 4484 SDValue CurSquare = LHS; 4485 while (Val) { 4486 if (Val & 1) { 4487 if (Res.getNode()) 4488 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare); 4489 else 4490 Res = CurSquare; // 1.0*CurSquare. 4491 } 4492 4493 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(), 4494 CurSquare, CurSquare); 4495 Val >>= 1; 4496 } 4497 4498 // If the original was negative, invert the result, producing 1/(x*x*x). 4499 if (RHSC->getSExtValue() < 0) 4500 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(), 4501 DAG.getConstantFP(1.0, LHS.getValueType()), Res); 4502 return Res; 4503 } 4504 } 4505 4506 // Otherwise, expand to a libcall. 4507 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS); 4508} 4509 4510// getTruncatedArgReg - Find underlying register used for an truncated 4511// argument. 4512static unsigned getTruncatedArgReg(const SDValue &N) { 4513 if (N.getOpcode() != ISD::TRUNCATE) 4514 return 0; 4515 4516 const SDValue &Ext = N.getOperand(0); 4517 if (Ext.getOpcode() == ISD::AssertZext || 4518 Ext.getOpcode() == ISD::AssertSext) { 4519 const SDValue &CFR = Ext.getOperand(0); 4520 if (CFR.getOpcode() == ISD::CopyFromReg) 4521 return cast<RegisterSDNode>(CFR.getOperand(1))->getReg(); 4522 if (CFR.getOpcode() == ISD::TRUNCATE) 4523 return getTruncatedArgReg(CFR); 4524 } 4525 return 0; 4526} 4527 4528/// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function 4529/// argument, create the corresponding DBG_VALUE machine instruction for it now. 4530/// At the end of instruction selection, they will be inserted to the entry BB. 4531bool 4532SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable, 4533 int64_t Offset, 4534 const SDValue &N) { 4535 const Argument *Arg = dyn_cast<Argument>(V); 4536 if (!Arg) 4537 return false; 4538 4539 MachineFunction &MF = DAG.getMachineFunction(); 4540 const TargetInstrInfo *TII = DAG.getTarget().getInstrInfo(); 4541 4542 // Ignore inlined function arguments here. 4543 DIVariable DV(Variable); 4544 if (DV.isInlinedFnArgument(MF.getFunction())) 4545 return false; 4546 4547 Optional<MachineOperand> Op; 4548 // Some arguments' frame index is recorded during argument lowering. 4549 if (int FI = FuncInfo.getArgumentFrameIndex(Arg)) 4550 Op = MachineOperand::CreateFI(FI); 4551 4552 if (!Op && N.getNode()) { 4553 unsigned Reg; 4554 if (N.getOpcode() == ISD::CopyFromReg) 4555 Reg = cast<RegisterSDNode>(N.getOperand(1))->getReg(); 4556 else 4557 Reg = getTruncatedArgReg(N); 4558 if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) { 4559 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 4560 unsigned PR = RegInfo.getLiveInPhysReg(Reg); 4561 if (PR) 4562 Reg = PR; 4563 } 4564 if (Reg) 4565 Op = MachineOperand::CreateReg(Reg, false); 4566 } 4567 4568 if (!Op) { 4569 // Check if ValueMap has reg number. 4570 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V); 4571 if (VMI != FuncInfo.ValueMap.end()) 4572 Op = MachineOperand::CreateReg(VMI->second, false); 4573 } 4574 4575 if (!Op && N.getNode()) 4576 // Check if frame index is available. 4577 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode())) 4578 if (FrameIndexSDNode *FINode = 4579 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) 4580 Op = MachineOperand::CreateFI(FINode->getIndex()); 4581 4582 if (!Op) 4583 return false; 4584 4585 // FIXME: This does not handle register-indirect values at offset 0. 4586 bool IsIndirect = Offset != 0; 4587 if (Op->isReg()) 4588 FuncInfo.ArgDbgValues.push_back(BuildMI(MF, getCurDebugLoc(), 4589 TII->get(TargetOpcode::DBG_VALUE), 4590 IsIndirect, 4591 Op->getReg(), Offset, Variable)); 4592 else 4593 FuncInfo.ArgDbgValues.push_back( 4594 BuildMI(MF, getCurDebugLoc(), TII->get(TargetOpcode::DBG_VALUE)) 4595 .addOperand(*Op).addImm(Offset).addMetadata(Variable)); 4596 4597 return true; 4598} 4599 4600// VisualStudio defines setjmp as _setjmp 4601#if defined(_MSC_VER) && defined(setjmp) && \ 4602 !defined(setjmp_undefined_for_msvc) 4603# pragma push_macro("setjmp") 4604# undef setjmp 4605# define setjmp_undefined_for_msvc 4606#endif 4607 4608/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If 4609/// we want to emit this as a call to a named external function, return the name 4610/// otherwise lower it and return null. 4611const char * 4612SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { 4613 const TargetLowering *TLI = TM.getTargetLowering(); 4614 SDLoc sdl = getCurSDLoc(); 4615 DebugLoc dl = getCurDebugLoc(); 4616 SDValue Res; 4617 4618 switch (Intrinsic) { 4619 default: 4620 // By default, turn this into a target intrinsic node. 4621 visitTargetIntrinsic(I, Intrinsic); 4622 return 0; 4623 case Intrinsic::vastart: visitVAStart(I); return 0; 4624 case Intrinsic::vaend: visitVAEnd(I); return 0; 4625 case Intrinsic::vacopy: visitVACopy(I); return 0; 4626 case Intrinsic::returnaddress: 4627 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, TLI->getPointerTy(), 4628 getValue(I.getArgOperand(0)))); 4629 return 0; 4630 case Intrinsic::frameaddress: 4631 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, TLI->getPointerTy(), 4632 getValue(I.getArgOperand(0)))); 4633 return 0; 4634 case Intrinsic::setjmp: 4635 return &"_setjmp"[!TLI->usesUnderscoreSetJmp()]; 4636 case Intrinsic::longjmp: 4637 return &"_longjmp"[!TLI->usesUnderscoreLongJmp()]; 4638 case Intrinsic::memcpy: { 4639 // Assert for address < 256 since we support only user defined address 4640 // spaces. 4641 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace() 4642 < 256 && 4643 cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace() 4644 < 256 && 4645 "Unknown address space"); 4646 SDValue Op1 = getValue(I.getArgOperand(0)); 4647 SDValue Op2 = getValue(I.getArgOperand(1)); 4648 SDValue Op3 = getValue(I.getArgOperand(2)); 4649 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); 4650 if (!Align) 4651 Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment. 4652 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue(); 4653 DAG.setRoot(DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, false, 4654 MachinePointerInfo(I.getArgOperand(0)), 4655 MachinePointerInfo(I.getArgOperand(1)))); 4656 return 0; 4657 } 4658 case Intrinsic::memset: { 4659 // Assert for address < 256 since we support only user defined address 4660 // spaces. 4661 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace() 4662 < 256 && 4663 "Unknown address space"); 4664 SDValue Op1 = getValue(I.getArgOperand(0)); 4665 SDValue Op2 = getValue(I.getArgOperand(1)); 4666 SDValue Op3 = getValue(I.getArgOperand(2)); 4667 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); 4668 if (!Align) 4669 Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment. 4670 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue(); 4671 DAG.setRoot(DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, 4672 MachinePointerInfo(I.getArgOperand(0)))); 4673 return 0; 4674 } 4675 case Intrinsic::memmove: { 4676 // Assert for address < 256 since we support only user defined address 4677 // spaces. 4678 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace() 4679 < 256 && 4680 cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace() 4681 < 256 && 4682 "Unknown address space"); 4683 SDValue Op1 = getValue(I.getArgOperand(0)); 4684 SDValue Op2 = getValue(I.getArgOperand(1)); 4685 SDValue Op3 = getValue(I.getArgOperand(2)); 4686 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); 4687 if (!Align) 4688 Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment. 4689 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue(); 4690 DAG.setRoot(DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, 4691 MachinePointerInfo(I.getArgOperand(0)), 4692 MachinePointerInfo(I.getArgOperand(1)))); 4693 return 0; 4694 } 4695 case Intrinsic::dbg_declare: { 4696 const DbgDeclareInst &DI = cast<DbgDeclareInst>(I); 4697 MDNode *Variable = DI.getVariable(); 4698 const Value *Address = DI.getAddress(); 4699 DIVariable DIVar(Variable); 4700 assert((!DIVar || DIVar.isVariable()) && 4701 "Variable in DbgDeclareInst should be either null or a DIVariable."); 4702 if (!Address || !DIVar) { 4703 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 4704 return 0; 4705 } 4706 4707 // Check if address has undef value. 4708 if (isa<UndefValue>(Address) || 4709 (Address->use_empty() && !isa<Argument>(Address))) { 4710 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 4711 return 0; 4712 } 4713 4714 SDValue &N = NodeMap[Address]; 4715 if (!N.getNode() && isa<Argument>(Address)) 4716 // Check unused arguments map. 4717 N = UnusedArgNodeMap[Address]; 4718 SDDbgValue *SDV; 4719 if (N.getNode()) { 4720 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address)) 4721 Address = BCI->getOperand(0); 4722 // Parameters are handled specially. 4723 bool isParameter = 4724 (DIVariable(Variable).getTag() == dwarf::DW_TAG_arg_variable || 4725 isa<Argument>(Address)); 4726 4727 const AllocaInst *AI = dyn_cast<AllocaInst>(Address); 4728 4729 if (isParameter && !AI) { 4730 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N.getNode()); 4731 if (FINode) 4732 // Byval parameter. We have a frame index at this point. 4733 SDV = DAG.getDbgValue(Variable, FINode->getIndex(), 4734 0, dl, SDNodeOrder); 4735 else { 4736 // Address is an argument, so try to emit its dbg value using 4737 // virtual register info from the FuncInfo.ValueMap. 4738 EmitFuncArgumentDbgValue(Address, Variable, 0, N); 4739 return 0; 4740 } 4741 } else if (AI) 4742 SDV = DAG.getDbgValue(Variable, N.getNode(), N.getResNo(), 4743 0, dl, SDNodeOrder); 4744 else { 4745 // Can't do anything with other non-AI cases yet. 4746 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 4747 DEBUG(dbgs() << "non-AllocaInst issue for Address: \n\t"); 4748 DEBUG(Address->dump()); 4749 return 0; 4750 } 4751 DAG.AddDbgValue(SDV, N.getNode(), isParameter); 4752 } else { 4753 // If Address is an argument then try to emit its dbg value using 4754 // virtual register info from the FuncInfo.ValueMap. 4755 if (!EmitFuncArgumentDbgValue(Address, Variable, 0, N)) { 4756 // If variable is pinned by a alloca in dominating bb then 4757 // use StaticAllocaMap. 4758 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) { 4759 if (AI->getParent() != DI.getParent()) { 4760 DenseMap<const AllocaInst*, int>::iterator SI = 4761 FuncInfo.StaticAllocaMap.find(AI); 4762 if (SI != FuncInfo.StaticAllocaMap.end()) { 4763 SDV = DAG.getDbgValue(Variable, SI->second, 4764 0, dl, SDNodeOrder); 4765 DAG.AddDbgValue(SDV, 0, false); 4766 return 0; 4767 } 4768 } 4769 } 4770 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 4771 } 4772 } 4773 return 0; 4774 } 4775 case Intrinsic::dbg_value: { 4776 const DbgValueInst &DI = cast<DbgValueInst>(I); 4777 DIVariable DIVar(DI.getVariable()); 4778 assert((!DIVar || DIVar.isVariable()) && 4779 "Variable in DbgValueInst should be either null or a DIVariable."); 4780 if (!DIVar) 4781 return 0; 4782 4783 MDNode *Variable = DI.getVariable(); 4784 uint64_t Offset = DI.getOffset(); 4785 const Value *V = DI.getValue(); 4786 if (!V) 4787 return 0; 4788 4789 SDDbgValue *SDV; 4790 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) { 4791 SDV = DAG.getDbgValue(Variable, V, Offset, dl, SDNodeOrder); 4792 DAG.AddDbgValue(SDV, 0, false); 4793 } else { 4794 // Do not use getValue() in here; we don't want to generate code at 4795 // this point if it hasn't been done yet. 4796 SDValue N = NodeMap[V]; 4797 if (!N.getNode() && isa<Argument>(V)) 4798 // Check unused arguments map. 4799 N = UnusedArgNodeMap[V]; 4800 if (N.getNode()) { 4801 if (!EmitFuncArgumentDbgValue(V, Variable, Offset, N)) { 4802 SDV = DAG.getDbgValue(Variable, N.getNode(), 4803 N.getResNo(), Offset, dl, SDNodeOrder); 4804 DAG.AddDbgValue(SDV, N.getNode(), false); 4805 } 4806 } else if (!V->use_empty() ) { 4807 // Do not call getValue(V) yet, as we don't want to generate code. 4808 // Remember it for later. 4809 DanglingDebugInfo DDI(&DI, dl, SDNodeOrder); 4810 DanglingDebugInfoMap[V] = DDI; 4811 } else { 4812 // We may expand this to cover more cases. One case where we have no 4813 // data available is an unreferenced parameter. 4814 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 4815 } 4816 } 4817 4818 // Build a debug info table entry. 4819 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V)) 4820 V = BCI->getOperand(0); 4821 const AllocaInst *AI = dyn_cast<AllocaInst>(V); 4822 // Don't handle byval struct arguments or VLAs, for example. 4823 if (!AI) { 4824 DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n"); 4825 DEBUG(dbgs() << " Last seen at:\n " << *V << "\n"); 4826 return 0; 4827 } 4828 DenseMap<const AllocaInst*, int>::iterator SI = 4829 FuncInfo.StaticAllocaMap.find(AI); 4830 if (SI == FuncInfo.StaticAllocaMap.end()) 4831 return 0; // VLAs. 4832 int FI = SI->second; 4833 4834 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); 4835 if (!DI.getDebugLoc().isUnknown() && MMI.hasDebugInfo()) 4836 MMI.setVariableDbgInfo(Variable, FI, DI.getDebugLoc()); 4837 return 0; 4838 } 4839 4840 case Intrinsic::eh_typeid_for: { 4841 // Find the type id for the given typeinfo. 4842 GlobalVariable *GV = ExtractTypeInfo(I.getArgOperand(0)); 4843 unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV); 4844 Res = DAG.getConstant(TypeID, MVT::i32); 4845 setValue(&I, Res); 4846 return 0; 4847 } 4848 4849 case Intrinsic::eh_return_i32: 4850 case Intrinsic::eh_return_i64: 4851 DAG.getMachineFunction().getMMI().setCallsEHReturn(true); 4852 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl, 4853 MVT::Other, 4854 getControlRoot(), 4855 getValue(I.getArgOperand(0)), 4856 getValue(I.getArgOperand(1)))); 4857 return 0; 4858 case Intrinsic::eh_unwind_init: 4859 DAG.getMachineFunction().getMMI().setCallsUnwindInit(true); 4860 return 0; 4861 case Intrinsic::eh_dwarf_cfa: { 4862 SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), sdl, 4863 TLI->getPointerTy()); 4864 SDValue Offset = DAG.getNode(ISD::ADD, sdl, 4865 CfaArg.getValueType(), 4866 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, sdl, 4867 CfaArg.getValueType()), 4868 CfaArg); 4869 SDValue FA = DAG.getNode(ISD::FRAMEADDR, sdl, 4870 TLI->getPointerTy(), 4871 DAG.getConstant(0, TLI->getPointerTy())); 4872 setValue(&I, DAG.getNode(ISD::ADD, sdl, FA.getValueType(), 4873 FA, Offset)); 4874 return 0; 4875 } 4876 case Intrinsic::eh_sjlj_callsite: { 4877 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); 4878 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0)); 4879 assert(CI && "Non-constant call site value in eh.sjlj.callsite!"); 4880 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!"); 4881 4882 MMI.setCurrentCallSite(CI->getZExtValue()); 4883 return 0; 4884 } 4885 case Intrinsic::eh_sjlj_functioncontext: { 4886 // Get and store the index of the function context. 4887 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4888 AllocaInst *FnCtx = 4889 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts()); 4890 int FI = FuncInfo.StaticAllocaMap[FnCtx]; 4891 MFI->setFunctionContextIndex(FI); 4892 return 0; 4893 } 4894 case Intrinsic::eh_sjlj_setjmp: { 4895 SDValue Ops[2]; 4896 Ops[0] = getRoot(); 4897 Ops[1] = getValue(I.getArgOperand(0)); 4898 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl, 4899 DAG.getVTList(MVT::i32, MVT::Other), 4900 Ops, 2); 4901 setValue(&I, Op.getValue(0)); 4902 DAG.setRoot(Op.getValue(1)); 4903 return 0; 4904 } 4905 case Intrinsic::eh_sjlj_longjmp: { 4906 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other, 4907 getRoot(), getValue(I.getArgOperand(0)))); 4908 return 0; 4909 } 4910 4911 case Intrinsic::x86_mmx_pslli_w: 4912 case Intrinsic::x86_mmx_pslli_d: 4913 case Intrinsic::x86_mmx_pslli_q: 4914 case Intrinsic::x86_mmx_psrli_w: 4915 case Intrinsic::x86_mmx_psrli_d: 4916 case Intrinsic::x86_mmx_psrli_q: 4917 case Intrinsic::x86_mmx_psrai_w: 4918 case Intrinsic::x86_mmx_psrai_d: { 4919 SDValue ShAmt = getValue(I.getArgOperand(1)); 4920 if (isa<ConstantSDNode>(ShAmt)) { 4921 visitTargetIntrinsic(I, Intrinsic); 4922 return 0; 4923 } 4924 unsigned NewIntrinsic = 0; 4925 EVT ShAmtVT = MVT::v2i32; 4926 switch (Intrinsic) { 4927 case Intrinsic::x86_mmx_pslli_w: 4928 NewIntrinsic = Intrinsic::x86_mmx_psll_w; 4929 break; 4930 case Intrinsic::x86_mmx_pslli_d: 4931 NewIntrinsic = Intrinsic::x86_mmx_psll_d; 4932 break; 4933 case Intrinsic::x86_mmx_pslli_q: 4934 NewIntrinsic = Intrinsic::x86_mmx_psll_q; 4935 break; 4936 case Intrinsic::x86_mmx_psrli_w: 4937 NewIntrinsic = Intrinsic::x86_mmx_psrl_w; 4938 break; 4939 case Intrinsic::x86_mmx_psrli_d: 4940 NewIntrinsic = Intrinsic::x86_mmx_psrl_d; 4941 break; 4942 case Intrinsic::x86_mmx_psrli_q: 4943 NewIntrinsic = Intrinsic::x86_mmx_psrl_q; 4944 break; 4945 case Intrinsic::x86_mmx_psrai_w: 4946 NewIntrinsic = Intrinsic::x86_mmx_psra_w; 4947 break; 4948 case Intrinsic::x86_mmx_psrai_d: 4949 NewIntrinsic = Intrinsic::x86_mmx_psra_d; 4950 break; 4951 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 4952 } 4953 4954 // The vector shift intrinsics with scalars uses 32b shift amounts but 4955 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits 4956 // to be zero. 4957 // We must do this early because v2i32 is not a legal type. 4958 SDValue ShOps[2]; 4959 ShOps[0] = ShAmt; 4960 ShOps[1] = DAG.getConstant(0, MVT::i32); 4961 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, sdl, ShAmtVT, &ShOps[0], 2); 4962 EVT DestVT = TLI->getValueType(I.getType()); 4963 ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt); 4964 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT, 4965 DAG.getConstant(NewIntrinsic, MVT::i32), 4966 getValue(I.getArgOperand(0)), ShAmt); 4967 setValue(&I, Res); 4968 return 0; 4969 } 4970 case Intrinsic::x86_avx_vinsertf128_pd_256: 4971 case Intrinsic::x86_avx_vinsertf128_ps_256: 4972 case Intrinsic::x86_avx_vinsertf128_si_256: 4973 case Intrinsic::x86_avx2_vinserti128: { 4974 EVT DestVT = TLI->getValueType(I.getType()); 4975 EVT ElVT = TLI->getValueType(I.getArgOperand(1)->getType()); 4976 uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue() & 1) * 4977 ElVT.getVectorNumElements(); 4978 Res = DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, DestVT, 4979 getValue(I.getArgOperand(0)), 4980 getValue(I.getArgOperand(1)), 4981 DAG.getConstant(Idx, TLI->getVectorIdxTy())); 4982 setValue(&I, Res); 4983 return 0; 4984 } 4985 case Intrinsic::x86_avx_vextractf128_pd_256: 4986 case Intrinsic::x86_avx_vextractf128_ps_256: 4987 case Intrinsic::x86_avx_vextractf128_si_256: 4988 case Intrinsic::x86_avx2_vextracti128: { 4989 EVT DestVT = TLI->getValueType(I.getType()); 4990 uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(1))->getZExtValue() & 1) * 4991 DestVT.getVectorNumElements(); 4992 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, DestVT, 4993 getValue(I.getArgOperand(0)), 4994 DAG.getConstant(Idx, TLI->getVectorIdxTy())); 4995 setValue(&I, Res); 4996 return 0; 4997 } 4998 case Intrinsic::convertff: 4999 case Intrinsic::convertfsi: 5000 case Intrinsic::convertfui: 5001 case Intrinsic::convertsif: 5002 case Intrinsic::convertuif: 5003 case Intrinsic::convertss: 5004 case Intrinsic::convertsu: 5005 case Intrinsic::convertus: 5006 case Intrinsic::convertuu: { 5007 ISD::CvtCode Code = ISD::CVT_INVALID; 5008 switch (Intrinsic) { 5009 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 5010 case Intrinsic::convertff: Code = ISD::CVT_FF; break; 5011 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break; 5012 case Intrinsic::convertfui: Code = ISD::CVT_FU; break; 5013 case Intrinsic::convertsif: Code = ISD::CVT_SF; break; 5014 case Intrinsic::convertuif: Code = ISD::CVT_UF; break; 5015 case Intrinsic::convertss: Code = ISD::CVT_SS; break; 5016 case Intrinsic::convertsu: Code = ISD::CVT_SU; break; 5017 case Intrinsic::convertus: Code = ISD::CVT_US; break; 5018 case Intrinsic::convertuu: Code = ISD::CVT_UU; break; 5019 } 5020 EVT DestVT = TLI->getValueType(I.getType()); 5021 const Value *Op1 = I.getArgOperand(0); 5022 Res = DAG.getConvertRndSat(DestVT, sdl, getValue(Op1), 5023 DAG.getValueType(DestVT), 5024 DAG.getValueType(getValue(Op1).getValueType()), 5025 getValue(I.getArgOperand(1)), 5026 getValue(I.getArgOperand(2)), 5027 Code); 5028 setValue(&I, Res); 5029 return 0; 5030 } 5031 case Intrinsic::powi: 5032 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)), 5033 getValue(I.getArgOperand(1)), DAG)); 5034 return 0; 5035 case Intrinsic::log: 5036 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, *TLI)); 5037 return 0; 5038 case Intrinsic::log2: 5039 setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI)); 5040 return 0; 5041 case Intrinsic::log10: 5042 setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, *TLI)); 5043 return 0; 5044 case Intrinsic::exp: 5045 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, *TLI)); 5046 return 0; 5047 case Intrinsic::exp2: 5048 setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI)); 5049 return 0; 5050 case Intrinsic::pow: 5051 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)), 5052 getValue(I.getArgOperand(1)), DAG, *TLI)); 5053 return 0; 5054 case Intrinsic::sqrt: 5055 case Intrinsic::fabs: 5056 case Intrinsic::sin: 5057 case Intrinsic::cos: 5058 case Intrinsic::floor: 5059 case Intrinsic::ceil: 5060 case Intrinsic::trunc: 5061 case Intrinsic::rint: 5062 case Intrinsic::nearbyint: 5063 case Intrinsic::round: { 5064 unsigned Opcode; 5065 switch (Intrinsic) { 5066 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 5067 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 5068 case Intrinsic::fabs: Opcode = ISD::FABS; break; 5069 case Intrinsic::sin: Opcode = ISD::FSIN; break; 5070 case Intrinsic::cos: Opcode = ISD::FCOS; break; 5071 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 5072 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 5073 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 5074 case Intrinsic::rint: Opcode = ISD::FRINT; break; 5075 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 5076 case Intrinsic::round: Opcode = ISD::FROUND; break; 5077 } 5078 5079 setValue(&I, DAG.getNode(Opcode, sdl, 5080 getValue(I.getArgOperand(0)).getValueType(), 5081 getValue(I.getArgOperand(0)))); 5082 return 0; 5083 } 5084 case Intrinsic::copysign: 5085 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl, 5086 getValue(I.getArgOperand(0)).getValueType(), 5087 getValue(I.getArgOperand(0)), 5088 getValue(I.getArgOperand(1)))); 5089 return 0; 5090 case Intrinsic::fma: 5091 setValue(&I, DAG.getNode(ISD::FMA, sdl, 5092 getValue(I.getArgOperand(0)).getValueType(), 5093 getValue(I.getArgOperand(0)), 5094 getValue(I.getArgOperand(1)), 5095 getValue(I.getArgOperand(2)))); 5096 return 0; 5097 case Intrinsic::fmuladd: { 5098 EVT VT = TLI->getValueType(I.getType()); 5099 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 5100 TLI->isFMAFasterThanFMulAndFAdd(VT)) { 5101 setValue(&I, DAG.getNode(ISD::FMA, sdl, 5102 getValue(I.getArgOperand(0)).getValueType(), 5103 getValue(I.getArgOperand(0)), 5104 getValue(I.getArgOperand(1)), 5105 getValue(I.getArgOperand(2)))); 5106 } else { 5107 SDValue Mul = DAG.getNode(ISD::FMUL, sdl, 5108 getValue(I.getArgOperand(0)).getValueType(), 5109 getValue(I.getArgOperand(0)), 5110 getValue(I.getArgOperand(1))); 5111 SDValue Add = DAG.getNode(ISD::FADD, sdl, 5112 getValue(I.getArgOperand(0)).getValueType(), 5113 Mul, 5114 getValue(I.getArgOperand(2))); 5115 setValue(&I, Add); 5116 } 5117 return 0; 5118 } 5119 case Intrinsic::convert_to_fp16: 5120 setValue(&I, DAG.getNode(ISD::FP32_TO_FP16, sdl, 5121 MVT::i16, getValue(I.getArgOperand(0)))); 5122 return 0; 5123 case Intrinsic::convert_from_fp16: 5124 setValue(&I, DAG.getNode(ISD::FP16_TO_FP32, sdl, 5125 MVT::f32, getValue(I.getArgOperand(0)))); 5126 return 0; 5127 case Intrinsic::pcmarker: { 5128 SDValue Tmp = getValue(I.getArgOperand(0)); 5129 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp)); 5130 return 0; 5131 } 5132 case Intrinsic::readcyclecounter: { 5133 SDValue Op = getRoot(); 5134 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl, 5135 DAG.getVTList(MVT::i64, MVT::Other), 5136 &Op, 1); 5137 setValue(&I, Res); 5138 DAG.setRoot(Res.getValue(1)); 5139 return 0; 5140 } 5141 case Intrinsic::bswap: 5142 setValue(&I, DAG.getNode(ISD::BSWAP, sdl, 5143 getValue(I.getArgOperand(0)).getValueType(), 5144 getValue(I.getArgOperand(0)))); 5145 return 0; 5146 case Intrinsic::cttz: { 5147 SDValue Arg = getValue(I.getArgOperand(0)); 5148 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1)); 5149 EVT Ty = Arg.getValueType(); 5150 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF, 5151 sdl, Ty, Arg)); 5152 return 0; 5153 } 5154 case Intrinsic::ctlz: { 5155 SDValue Arg = getValue(I.getArgOperand(0)); 5156 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1)); 5157 EVT Ty = Arg.getValueType(); 5158 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF, 5159 sdl, Ty, Arg)); 5160 return 0; 5161 } 5162 case Intrinsic::ctpop: { 5163 SDValue Arg = getValue(I.getArgOperand(0)); 5164 EVT Ty = Arg.getValueType(); 5165 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg)); 5166 return 0; 5167 } 5168 case Intrinsic::stacksave: { 5169 SDValue Op = getRoot(); 5170 Res = DAG.getNode(ISD::STACKSAVE, sdl, 5171 DAG.getVTList(TLI->getPointerTy(), MVT::Other), &Op, 1); 5172 setValue(&I, Res); 5173 DAG.setRoot(Res.getValue(1)); 5174 return 0; 5175 } 5176 case Intrinsic::stackrestore: { 5177 Res = getValue(I.getArgOperand(0)); 5178 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res)); 5179 return 0; 5180 } 5181 case Intrinsic::stackprotector: { 5182 // Emit code into the DAG to store the stack guard onto the stack. 5183 MachineFunction &MF = DAG.getMachineFunction(); 5184 MachineFrameInfo *MFI = MF.getFrameInfo(); 5185 EVT PtrTy = TLI->getPointerTy(); 5186 5187 SDValue Src = getValue(I.getArgOperand(0)); // The guard's value. 5188 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1)); 5189 5190 int FI = FuncInfo.StaticAllocaMap[Slot]; 5191 MFI->setStackProtectorIndex(FI); 5192 5193 SDValue FIN = DAG.getFrameIndex(FI, PtrTy); 5194 5195 // Store the stack protector onto the stack. 5196 Res = DAG.getStore(getRoot(), sdl, Src, FIN, 5197 MachinePointerInfo::getFixedStack(FI), 5198 true, false, 0); 5199 setValue(&I, Res); 5200 DAG.setRoot(Res); 5201 return 0; 5202 } 5203 case Intrinsic::objectsize: { 5204 // If we don't know by now, we're never going to know. 5205 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1)); 5206 5207 assert(CI && "Non-constant type in __builtin_object_size?"); 5208 5209 SDValue Arg = getValue(I.getCalledValue()); 5210 EVT Ty = Arg.getValueType(); 5211 5212 if (CI->isZero()) 5213 Res = DAG.getConstant(-1ULL, Ty); 5214 else 5215 Res = DAG.getConstant(0, Ty); 5216 5217 setValue(&I, Res); 5218 return 0; 5219 } 5220 case Intrinsic::annotation: 5221 case Intrinsic::ptr_annotation: 5222 // Drop the intrinsic, but forward the value 5223 setValue(&I, getValue(I.getOperand(0))); 5224 return 0; 5225 case Intrinsic::var_annotation: 5226 // Discard annotate attributes 5227 return 0; 5228 5229 case Intrinsic::init_trampoline: { 5230 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts()); 5231 5232 SDValue Ops[6]; 5233 Ops[0] = getRoot(); 5234 Ops[1] = getValue(I.getArgOperand(0)); 5235 Ops[2] = getValue(I.getArgOperand(1)); 5236 Ops[3] = getValue(I.getArgOperand(2)); 5237 Ops[4] = DAG.getSrcValue(I.getArgOperand(0)); 5238 Ops[5] = DAG.getSrcValue(F); 5239 5240 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops, 6); 5241 5242 DAG.setRoot(Res); 5243 return 0; 5244 } 5245 case Intrinsic::adjust_trampoline: { 5246 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl, 5247 TLI->getPointerTy(), 5248 getValue(I.getArgOperand(0)))); 5249 return 0; 5250 } 5251 case Intrinsic::gcroot: 5252 if (GFI) { 5253 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts(); 5254 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1)); 5255 5256 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode()); 5257 GFI->addStackRoot(FI->getIndex(), TypeMap); 5258 } 5259 return 0; 5260 case Intrinsic::gcread: 5261 case Intrinsic::gcwrite: 5262 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!"); 5263 case Intrinsic::flt_rounds: 5264 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32)); 5265 return 0; 5266 5267 case Intrinsic::expect: { 5268 // Just replace __builtin_expect(exp, c) with EXP. 5269 setValue(&I, getValue(I.getArgOperand(0))); 5270 return 0; 5271 } 5272 5273 case Intrinsic::debugtrap: 5274 case Intrinsic::trap: { 5275 StringRef TrapFuncName = TM.Options.getTrapFunctionName(); 5276 if (TrapFuncName.empty()) { 5277 ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ? 5278 ISD::TRAP : ISD::DEBUGTRAP; 5279 DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot())); 5280 return 0; 5281 } 5282 TargetLowering::ArgListTy Args; 5283 TargetLowering:: 5284 CallLoweringInfo CLI(getRoot(), I.getType(), 5285 false, false, false, false, 0, CallingConv::C, 5286 /*isTailCall=*/false, 5287 /*doesNotRet=*/false, /*isReturnValueUsed=*/true, 5288 DAG.getExternalSymbol(TrapFuncName.data(), 5289 TLI->getPointerTy()), 5290 Args, DAG, sdl); 5291 std::pair<SDValue, SDValue> Result = TLI->LowerCallTo(CLI); 5292 DAG.setRoot(Result.second); 5293 return 0; 5294 } 5295 5296 case Intrinsic::uadd_with_overflow: 5297 case Intrinsic::sadd_with_overflow: 5298 case Intrinsic::usub_with_overflow: 5299 case Intrinsic::ssub_with_overflow: 5300 case Intrinsic::umul_with_overflow: 5301 case Intrinsic::smul_with_overflow: { 5302 ISD::NodeType Op; 5303 switch (Intrinsic) { 5304 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 5305 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break; 5306 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break; 5307 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break; 5308 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break; 5309 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break; 5310 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break; 5311 } 5312 SDValue Op1 = getValue(I.getArgOperand(0)); 5313 SDValue Op2 = getValue(I.getArgOperand(1)); 5314 5315 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1); 5316 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2)); 5317 return 0; 5318 } 5319 case Intrinsic::prefetch: { 5320 SDValue Ops[5]; 5321 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue(); 5322 Ops[0] = getRoot(); 5323 Ops[1] = getValue(I.getArgOperand(0)); 5324 Ops[2] = getValue(I.getArgOperand(1)); 5325 Ops[3] = getValue(I.getArgOperand(2)); 5326 Ops[4] = getValue(I.getArgOperand(3)); 5327 DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl, 5328 DAG.getVTList(MVT::Other), 5329 &Ops[0], 5, 5330 EVT::getIntegerVT(*Context, 8), 5331 MachinePointerInfo(I.getArgOperand(0)), 5332 0, /* align */ 5333 false, /* volatile */ 5334 rw==0, /* read */ 5335 rw==1)); /* write */ 5336 return 0; 5337 } 5338 case Intrinsic::lifetime_start: 5339 case Intrinsic::lifetime_end: { 5340 bool IsStart = (Intrinsic == Intrinsic::lifetime_start); 5341 // Stack coloring is not enabled in O0, discard region information. 5342 if (TM.getOptLevel() == CodeGenOpt::None) 5343 return 0; 5344 5345 SmallVector<Value *, 4> Allocas; 5346 GetUnderlyingObjects(I.getArgOperand(1), Allocas, DL); 5347 5348 for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(), 5349 E = Allocas.end(); Object != E; ++Object) { 5350 AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object); 5351 5352 // Could not find an Alloca. 5353 if (!LifetimeObject) 5354 continue; 5355 5356 int FI = FuncInfo.StaticAllocaMap[LifetimeObject]; 5357 5358 SDValue Ops[2]; 5359 Ops[0] = getRoot(); 5360 Ops[1] = DAG.getFrameIndex(FI, TLI->getPointerTy(), true); 5361 unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END); 5362 5363 Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops, 2); 5364 DAG.setRoot(Res); 5365 } 5366 return 0; 5367 } 5368 case Intrinsic::invariant_start: 5369 // Discard region information. 5370 setValue(&I, DAG.getUNDEF(TLI->getPointerTy())); 5371 return 0; 5372 case Intrinsic::invariant_end: 5373 // Discard region information. 5374 return 0; 5375 case Intrinsic::stackprotectorcheck: { 5376 // Do not actually emit anything for this basic block. Instead we initialize 5377 // the stack protector descriptor and export the guard variable so we can 5378 // access it in FinishBasicBlock. 5379 const BasicBlock *BB = I.getParent(); 5380 SPDescriptor.initialize(BB, FuncInfo.MBBMap[BB], I); 5381 ExportFromCurrentBlock(SPDescriptor.getGuard()); 5382 5383 // Flush our exports since we are going to process a terminator. 5384 (void)getControlRoot(); 5385 return 0; 5386 } 5387 case Intrinsic::clear_cache: 5388 return TLI->getClearCacheBuiltinName(); 5389 case Intrinsic::donothing: 5390 // ignore 5391 return 0; 5392 case Intrinsic::experimental_stackmap: { 5393 visitStackmap(I); 5394 return 0; 5395 } 5396 case Intrinsic::experimental_patchpoint_void: 5397 case Intrinsic::experimental_patchpoint_i64: { 5398 visitPatchpoint(I); 5399 return 0; 5400 } 5401 } 5402} 5403 5404void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, 5405 bool isTailCall, 5406 MachineBasicBlock *LandingPad) { 5407 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 5408 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 5409 Type *RetTy = FTy->getReturnType(); 5410 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); 5411 MCSymbol *BeginLabel = 0; 5412 5413 TargetLowering::ArgListTy Args; 5414 TargetLowering::ArgListEntry Entry; 5415 Args.reserve(CS.arg_size()); 5416 5417 // Check whether the function can return without sret-demotion. 5418 SmallVector<ISD::OutputArg, 4> Outs; 5419 const TargetLowering *TLI = TM.getTargetLowering(); 5420 GetReturnInfo(RetTy, CS.getAttributes(), Outs, *TLI); 5421 5422 bool CanLowerReturn = TLI->CanLowerReturn(CS.getCallingConv(), 5423 DAG.getMachineFunction(), 5424 FTy->isVarArg(), Outs, 5425 FTy->getContext()); 5426 5427 SDValue DemoteStackSlot; 5428 int DemoteStackIdx = -100; 5429 5430 if (!CanLowerReturn) { 5431 assert(!CS.hasInAllocaArgument() && 5432 "sret demotion is incompatible with inalloca"); 5433 uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize( 5434 FTy->getReturnType()); 5435 unsigned Align = TLI->getDataLayout()->getPrefTypeAlignment( 5436 FTy->getReturnType()); 5437 MachineFunction &MF = DAG.getMachineFunction(); 5438 DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false); 5439 Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType()); 5440 5441 DemoteStackSlot = DAG.getFrameIndex(DemoteStackIdx, TLI->getPointerTy()); 5442 Entry.Node = DemoteStackSlot; 5443 Entry.Ty = StackSlotPtrType; 5444 Entry.isSExt = false; 5445 Entry.isZExt = false; 5446 Entry.isInReg = false; 5447 Entry.isSRet = true; 5448 Entry.isNest = false; 5449 Entry.isByVal = false; 5450 Entry.isReturned = false; 5451 Entry.Alignment = Align; 5452 Args.push_back(Entry); 5453 RetTy = Type::getVoidTy(FTy->getContext()); 5454 } 5455 5456 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 5457 i != e; ++i) { 5458 const Value *V = *i; 5459 5460 // Skip empty types 5461 if (V->getType()->isEmptyTy()) 5462 continue; 5463 5464 SDValue ArgNode = getValue(V); 5465 Entry.Node = ArgNode; Entry.Ty = V->getType(); 5466 5467 // Skip the first return-type Attribute to get to params. 5468 Entry.setAttributes(&CS, i - CS.arg_begin() + 1); 5469 Args.push_back(Entry); 5470 } 5471 5472 if (LandingPad) { 5473 // Insert a label before the invoke call to mark the try range. This can be 5474 // used to detect deletion of the invoke via the MachineModuleInfo. 5475 BeginLabel = MMI.getContext().CreateTempSymbol(); 5476 5477 // For SjLj, keep track of which landing pads go with which invokes 5478 // so as to maintain the ordering of pads in the LSDA. 5479 unsigned CallSiteIndex = MMI.getCurrentCallSite(); 5480 if (CallSiteIndex) { 5481 MMI.setCallSiteBeginLabel(BeginLabel, CallSiteIndex); 5482 LPadToCallSiteMap[LandingPad].push_back(CallSiteIndex); 5483 5484 // Now that the call site is handled, stop tracking it. 5485 MMI.setCurrentCallSite(0); 5486 } 5487 5488 // Both PendingLoads and PendingExports must be flushed here; 5489 // this call might not return. 5490 (void)getRoot(); 5491 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel)); 5492 } 5493 5494 // Check if target-independent constraints permit a tail call here. 5495 // Target-dependent constraints are checked within TLI->LowerCallTo. 5496 if (isTailCall && !isInTailCallPosition(CS, *TLI)) 5497 isTailCall = false; 5498 5499 TargetLowering:: 5500 CallLoweringInfo CLI(getRoot(), RetTy, FTy, isTailCall, Callee, Args, DAG, 5501 getCurSDLoc(), CS); 5502 std::pair<SDValue,SDValue> Result = TLI->LowerCallTo(CLI); 5503 assert((isTailCall || Result.second.getNode()) && 5504 "Non-null chain expected with non-tail call!"); 5505 assert((Result.second.getNode() || !Result.first.getNode()) && 5506 "Null value expected with tail call!"); 5507 if (Result.first.getNode()) { 5508 setValue(CS.getInstruction(), Result.first); 5509 } else if (!CanLowerReturn && Result.second.getNode()) { 5510 // The instruction result is the result of loading from the 5511 // hidden sret parameter. 5512 SmallVector<EVT, 1> PVTs; 5513 Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType()); 5514 5515 ComputeValueVTs(*TLI, PtrRetTy, PVTs); 5516 assert(PVTs.size() == 1 && "Pointers should fit in one register"); 5517 EVT PtrVT = PVTs[0]; 5518 5519 SmallVector<EVT, 4> RetTys; 5520 SmallVector<uint64_t, 4> Offsets; 5521 RetTy = FTy->getReturnType(); 5522 ComputeValueVTs(*TLI, RetTy, RetTys, &Offsets); 5523 5524 unsigned NumValues = RetTys.size(); 5525 SmallVector<SDValue, 4> Values(NumValues); 5526 SmallVector<SDValue, 4> Chains(NumValues); 5527 5528 for (unsigned i = 0; i < NumValues; ++i) { 5529 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(), PtrVT, 5530 DemoteStackSlot, 5531 DAG.getConstant(Offsets[i], PtrVT)); 5532 SDValue L = DAG.getLoad(RetTys[i], getCurSDLoc(), Result.second, Add, 5533 MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]), 5534 false, false, false, 1); 5535 Values[i] = L; 5536 Chains[i] = L.getValue(1); 5537 } 5538 5539 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), 5540 MVT::Other, &Chains[0], NumValues); 5541 PendingLoads.push_back(Chain); 5542 5543 setValue(CS.getInstruction(), 5544 DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 5545 DAG.getVTList(&RetTys[0], RetTys.size()), 5546 &Values[0], Values.size())); 5547 } 5548 5549 if (!Result.second.getNode()) { 5550 // As a special case, a null chain means that a tail call has been emitted 5551 // and the DAG root is already updated. 5552 HasTailCall = true; 5553 5554 // Since there's no actual continuation from this block, nothing can be 5555 // relying on us setting vregs for them. 5556 PendingExports.clear(); 5557 } else { 5558 DAG.setRoot(Result.second); 5559 } 5560 5561 if (LandingPad) { 5562 // Insert a label at the end of the invoke call to mark the try range. This 5563 // can be used to detect deletion of the invoke via the MachineModuleInfo. 5564 MCSymbol *EndLabel = MMI.getContext().CreateTempSymbol(); 5565 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel)); 5566 5567 // Inform MachineModuleInfo of range. 5568 MMI.addInvoke(LandingPad, BeginLabel, EndLabel); 5569 } 5570} 5571 5572/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the 5573/// value is equal or not-equal to zero. 5574static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) { 5575 for (const User *U : V->users()) { 5576 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) 5577 if (IC->isEquality()) 5578 if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1))) 5579 if (C->isNullValue()) 5580 continue; 5581 // Unknown instruction. 5582 return false; 5583 } 5584 return true; 5585} 5586 5587static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, 5588 Type *LoadTy, 5589 SelectionDAGBuilder &Builder) { 5590 5591 // Check to see if this load can be trivially constant folded, e.g. if the 5592 // input is from a string literal. 5593 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) { 5594 // Cast pointer to the type we really want to load. 5595 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput), 5596 PointerType::getUnqual(LoadTy)); 5597 5598 if (const Constant *LoadCst = 5599 ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput), 5600 Builder.DL)) 5601 return Builder.getValue(LoadCst); 5602 } 5603 5604 // Otherwise, we have to emit the load. If the pointer is to unfoldable but 5605 // still constant memory, the input chain can be the entry node. 5606 SDValue Root; 5607 bool ConstantMemory = false; 5608 5609 // Do not serialize (non-volatile) loads of constant memory with anything. 5610 if (Builder.AA->pointsToConstantMemory(PtrVal)) { 5611 Root = Builder.DAG.getEntryNode(); 5612 ConstantMemory = true; 5613 } else { 5614 // Do not serialize non-volatile loads against each other. 5615 Root = Builder.DAG.getRoot(); 5616 } 5617 5618 SDValue Ptr = Builder.getValue(PtrVal); 5619 SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, 5620 Ptr, MachinePointerInfo(PtrVal), 5621 false /*volatile*/, 5622 false /*nontemporal*/, 5623 false /*isinvariant*/, 1 /* align=1 */); 5624 5625 if (!ConstantMemory) 5626 Builder.PendingLoads.push_back(LoadVal.getValue(1)); 5627 return LoadVal; 5628} 5629 5630/// processIntegerCallValue - Record the value for an instruction that 5631/// produces an integer result, converting the type where necessary. 5632void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I, 5633 SDValue Value, 5634 bool IsSigned) { 5635 EVT VT = TM.getTargetLowering()->getValueType(I.getType(), true); 5636 if (IsSigned) 5637 Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT); 5638 else 5639 Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT); 5640 setValue(&I, Value); 5641} 5642 5643/// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form. 5644/// If so, return true and lower it, otherwise return false and it will be 5645/// lowered like a normal call. 5646bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) { 5647 // Verify that the prototype makes sense. int memcmp(void*,void*,size_t) 5648 if (I.getNumArgOperands() != 3) 5649 return false; 5650 5651 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1); 5652 if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() || 5653 !I.getArgOperand(2)->getType()->isIntegerTy() || 5654 !I.getType()->isIntegerTy()) 5655 return false; 5656 5657 const Value *Size = I.getArgOperand(2); 5658 const ConstantInt *CSize = dyn_cast<ConstantInt>(Size); 5659 if (CSize && CSize->getZExtValue() == 0) { 5660 EVT CallVT = TM.getTargetLowering()->getValueType(I.getType(), true); 5661 setValue(&I, DAG.getConstant(0, CallVT)); 5662 return true; 5663 } 5664 5665 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo(); 5666 std::pair<SDValue, SDValue> Res = 5667 TSI.EmitTargetCodeForMemcmp(DAG, getCurSDLoc(), DAG.getRoot(), 5668 getValue(LHS), getValue(RHS), getValue(Size), 5669 MachinePointerInfo(LHS), 5670 MachinePointerInfo(RHS)); 5671 if (Res.first.getNode()) { 5672 processIntegerCallValue(I, Res.first, true); 5673 PendingLoads.push_back(Res.second); 5674 return true; 5675 } 5676 5677 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0 5678 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0 5679 if (CSize && IsOnlyUsedInZeroEqualityComparison(&I)) { 5680 bool ActuallyDoIt = true; 5681 MVT LoadVT; 5682 Type *LoadTy; 5683 switch (CSize->getZExtValue()) { 5684 default: 5685 LoadVT = MVT::Other; 5686 LoadTy = 0; 5687 ActuallyDoIt = false; 5688 break; 5689 case 2: 5690 LoadVT = MVT::i16; 5691 LoadTy = Type::getInt16Ty(CSize->getContext()); 5692 break; 5693 case 4: 5694 LoadVT = MVT::i32; 5695 LoadTy = Type::getInt32Ty(CSize->getContext()); 5696 break; 5697 case 8: 5698 LoadVT = MVT::i64; 5699 LoadTy = Type::getInt64Ty(CSize->getContext()); 5700 break; 5701 /* 5702 case 16: 5703 LoadVT = MVT::v4i32; 5704 LoadTy = Type::getInt32Ty(CSize->getContext()); 5705 LoadTy = VectorType::get(LoadTy, 4); 5706 break; 5707 */ 5708 } 5709 5710 // This turns into unaligned loads. We only do this if the target natively 5711 // supports the MVT we'll be loading or if it is small enough (<= 4) that 5712 // we'll only produce a small number of byte loads. 5713 5714 // Require that we can find a legal MVT, and only do this if the target 5715 // supports unaligned loads of that type. Expanding into byte loads would 5716 // bloat the code. 5717 const TargetLowering *TLI = TM.getTargetLowering(); 5718 if (ActuallyDoIt && CSize->getZExtValue() > 4) { 5719 unsigned DstAS = LHS->getType()->getPointerAddressSpace(); 5720 unsigned SrcAS = RHS->getType()->getPointerAddressSpace(); 5721 // TODO: Handle 5 byte compare as 4-byte + 1 byte. 5722 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads. 5723 if (!TLI->isTypeLegal(LoadVT) || 5724 !TLI->allowsUnalignedMemoryAccesses(LoadVT, SrcAS) || 5725 !TLI->allowsUnalignedMemoryAccesses(LoadVT, DstAS)) 5726 ActuallyDoIt = false; 5727 } 5728 5729 if (ActuallyDoIt) { 5730 SDValue LHSVal = getMemCmpLoad(LHS, LoadVT, LoadTy, *this); 5731 SDValue RHSVal = getMemCmpLoad(RHS, LoadVT, LoadTy, *this); 5732 5733 SDValue Res = DAG.getSetCC(getCurSDLoc(), MVT::i1, LHSVal, RHSVal, 5734 ISD::SETNE); 5735 processIntegerCallValue(I, Res, false); 5736 return true; 5737 } 5738 } 5739 5740 5741 return false; 5742} 5743 5744/// visitMemChrCall -- See if we can lower a memchr call into an optimized 5745/// form. If so, return true and lower it, otherwise return false and it 5746/// will be lowered like a normal call. 5747bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) { 5748 // Verify that the prototype makes sense. void *memchr(void *, int, size_t) 5749 if (I.getNumArgOperands() != 3) 5750 return false; 5751 5752 const Value *Src = I.getArgOperand(0); 5753 const Value *Char = I.getArgOperand(1); 5754 const Value *Length = I.getArgOperand(2); 5755 if (!Src->getType()->isPointerTy() || 5756 !Char->getType()->isIntegerTy() || 5757 !Length->getType()->isIntegerTy() || 5758 !I.getType()->isPointerTy()) 5759 return false; 5760 5761 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo(); 5762 std::pair<SDValue, SDValue> Res = 5763 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(), 5764 getValue(Src), getValue(Char), getValue(Length), 5765 MachinePointerInfo(Src)); 5766 if (Res.first.getNode()) { 5767 setValue(&I, Res.first); 5768 PendingLoads.push_back(Res.second); 5769 return true; 5770 } 5771 5772 return false; 5773} 5774 5775/// visitStrCpyCall -- See if we can lower a strcpy or stpcpy call into an 5776/// optimized form. If so, return true and lower it, otherwise return false 5777/// and it will be lowered like a normal call. 5778bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) { 5779 // Verify that the prototype makes sense. char *strcpy(char *, char *) 5780 if (I.getNumArgOperands() != 2) 5781 return false; 5782 5783 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 5784 if (!Arg0->getType()->isPointerTy() || 5785 !Arg1->getType()->isPointerTy() || 5786 !I.getType()->isPointerTy()) 5787 return false; 5788 5789 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo(); 5790 std::pair<SDValue, SDValue> Res = 5791 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(), 5792 getValue(Arg0), getValue(Arg1), 5793 MachinePointerInfo(Arg0), 5794 MachinePointerInfo(Arg1), isStpcpy); 5795 if (Res.first.getNode()) { 5796 setValue(&I, Res.first); 5797 DAG.setRoot(Res.second); 5798 return true; 5799 } 5800 5801 return false; 5802} 5803 5804/// visitStrCmpCall - See if we can lower a call to strcmp in an optimized form. 5805/// If so, return true and lower it, otherwise return false and it will be 5806/// lowered like a normal call. 5807bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) { 5808 // Verify that the prototype makes sense. int strcmp(void*,void*) 5809 if (I.getNumArgOperands() != 2) 5810 return false; 5811 5812 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 5813 if (!Arg0->getType()->isPointerTy() || 5814 !Arg1->getType()->isPointerTy() || 5815 !I.getType()->isIntegerTy()) 5816 return false; 5817 5818 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo(); 5819 std::pair<SDValue, SDValue> Res = 5820 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(), 5821 getValue(Arg0), getValue(Arg1), 5822 MachinePointerInfo(Arg0), 5823 MachinePointerInfo(Arg1)); 5824 if (Res.first.getNode()) { 5825 processIntegerCallValue(I, Res.first, true); 5826 PendingLoads.push_back(Res.second); 5827 return true; 5828 } 5829 5830 return false; 5831} 5832 5833/// visitStrLenCall -- See if we can lower a strlen call into an optimized 5834/// form. If so, return true and lower it, otherwise return false and it 5835/// will be lowered like a normal call. 5836bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) { 5837 // Verify that the prototype makes sense. size_t strlen(char *) 5838 if (I.getNumArgOperands() != 1) 5839 return false; 5840 5841 const Value *Arg0 = I.getArgOperand(0); 5842 if (!Arg0->getType()->isPointerTy() || !I.getType()->isIntegerTy()) 5843 return false; 5844 5845 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo(); 5846 std::pair<SDValue, SDValue> Res = 5847 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(), 5848 getValue(Arg0), MachinePointerInfo(Arg0)); 5849 if (Res.first.getNode()) { 5850 processIntegerCallValue(I, Res.first, false); 5851 PendingLoads.push_back(Res.second); 5852 return true; 5853 } 5854 5855 return false; 5856} 5857 5858/// visitStrNLenCall -- See if we can lower a strnlen call into an optimized 5859/// form. If so, return true and lower it, otherwise return false and it 5860/// will be lowered like a normal call. 5861bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) { 5862 // Verify that the prototype makes sense. size_t strnlen(char *, size_t) 5863 if (I.getNumArgOperands() != 2) 5864 return false; 5865 5866 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 5867 if (!Arg0->getType()->isPointerTy() || 5868 !Arg1->getType()->isIntegerTy() || 5869 !I.getType()->isIntegerTy()) 5870 return false; 5871 5872 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo(); 5873 std::pair<SDValue, SDValue> Res = 5874 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(), 5875 getValue(Arg0), getValue(Arg1), 5876 MachinePointerInfo(Arg0)); 5877 if (Res.first.getNode()) { 5878 processIntegerCallValue(I, Res.first, false); 5879 PendingLoads.push_back(Res.second); 5880 return true; 5881 } 5882 5883 return false; 5884} 5885 5886/// visitUnaryFloatCall - If a call instruction is a unary floating-point 5887/// operation (as expected), translate it to an SDNode with the specified opcode 5888/// and return true. 5889bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I, 5890 unsigned Opcode) { 5891 // Sanity check that it really is a unary floating-point call. 5892 if (I.getNumArgOperands() != 1 || 5893 !I.getArgOperand(0)->getType()->isFloatingPointTy() || 5894 I.getType() != I.getArgOperand(0)->getType() || 5895 !I.onlyReadsMemory()) 5896 return false; 5897 5898 SDValue Tmp = getValue(I.getArgOperand(0)); 5899 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp)); 5900 return true; 5901} 5902 5903void SelectionDAGBuilder::visitCall(const CallInst &I) { 5904 // Handle inline assembly differently. 5905 if (isa<InlineAsm>(I.getCalledValue())) { 5906 visitInlineAsm(&I); 5907 return; 5908 } 5909 5910 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); 5911 ComputeUsesVAFloatArgument(I, &MMI); 5912 5913 const char *RenameFn = 0; 5914 if (Function *F = I.getCalledFunction()) { 5915 if (F->isDeclaration()) { 5916 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) { 5917 if (unsigned IID = II->getIntrinsicID(F)) { 5918 RenameFn = visitIntrinsicCall(I, IID); 5919 if (!RenameFn) 5920 return; 5921 } 5922 } 5923 if (unsigned IID = F->getIntrinsicID()) { 5924 RenameFn = visitIntrinsicCall(I, IID); 5925 if (!RenameFn) 5926 return; 5927 } 5928 } 5929 5930 // Check for well-known libc/libm calls. If the function is internal, it 5931 // can't be a library call. 5932 LibFunc::Func Func; 5933 if (!F->hasLocalLinkage() && F->hasName() && 5934 LibInfo->getLibFunc(F->getName(), Func) && 5935 LibInfo->hasOptimizedCodeGen(Func)) { 5936 switch (Func) { 5937 default: break; 5938 case LibFunc::copysign: 5939 case LibFunc::copysignf: 5940 case LibFunc::copysignl: 5941 if (I.getNumArgOperands() == 2 && // Basic sanity checks. 5942 I.getArgOperand(0)->getType()->isFloatingPointTy() && 5943 I.getType() == I.getArgOperand(0)->getType() && 5944 I.getType() == I.getArgOperand(1)->getType() && 5945 I.onlyReadsMemory()) { 5946 SDValue LHS = getValue(I.getArgOperand(0)); 5947 SDValue RHS = getValue(I.getArgOperand(1)); 5948 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(), 5949 LHS.getValueType(), LHS, RHS)); 5950 return; 5951 } 5952 break; 5953 case LibFunc::fabs: 5954 case LibFunc::fabsf: 5955 case LibFunc::fabsl: 5956 if (visitUnaryFloatCall(I, ISD::FABS)) 5957 return; 5958 break; 5959 case LibFunc::sin: 5960 case LibFunc::sinf: 5961 case LibFunc::sinl: 5962 if (visitUnaryFloatCall(I, ISD::FSIN)) 5963 return; 5964 break; 5965 case LibFunc::cos: 5966 case LibFunc::cosf: 5967 case LibFunc::cosl: 5968 if (visitUnaryFloatCall(I, ISD::FCOS)) 5969 return; 5970 break; 5971 case LibFunc::sqrt: 5972 case LibFunc::sqrtf: 5973 case LibFunc::sqrtl: 5974 case LibFunc::sqrt_finite: 5975 case LibFunc::sqrtf_finite: 5976 case LibFunc::sqrtl_finite: 5977 if (visitUnaryFloatCall(I, ISD::FSQRT)) 5978 return; 5979 break; 5980 case LibFunc::floor: 5981 case LibFunc::floorf: 5982 case LibFunc::floorl: 5983 if (visitUnaryFloatCall(I, ISD::FFLOOR)) 5984 return; 5985 break; 5986 case LibFunc::nearbyint: 5987 case LibFunc::nearbyintf: 5988 case LibFunc::nearbyintl: 5989 if (visitUnaryFloatCall(I, ISD::FNEARBYINT)) 5990 return; 5991 break; 5992 case LibFunc::ceil: 5993 case LibFunc::ceilf: 5994 case LibFunc::ceill: 5995 if (visitUnaryFloatCall(I, ISD::FCEIL)) 5996 return; 5997 break; 5998 case LibFunc::rint: 5999 case LibFunc::rintf: 6000 case LibFunc::rintl: 6001 if (visitUnaryFloatCall(I, ISD::FRINT)) 6002 return; 6003 break; 6004 case LibFunc::round: 6005 case LibFunc::roundf: 6006 case LibFunc::roundl: 6007 if (visitUnaryFloatCall(I, ISD::FROUND)) 6008 return; 6009 break; 6010 case LibFunc::trunc: 6011 case LibFunc::truncf: 6012 case LibFunc::truncl: 6013 if (visitUnaryFloatCall(I, ISD::FTRUNC)) 6014 return; 6015 break; 6016 case LibFunc::log2: 6017 case LibFunc::log2f: 6018 case LibFunc::log2l: 6019 if (visitUnaryFloatCall(I, ISD::FLOG2)) 6020 return; 6021 break; 6022 case LibFunc::exp2: 6023 case LibFunc::exp2f: 6024 case LibFunc::exp2l: 6025 if (visitUnaryFloatCall(I, ISD::FEXP2)) 6026 return; 6027 break; 6028 case LibFunc::memcmp: 6029 if (visitMemCmpCall(I)) 6030 return; 6031 break; 6032 case LibFunc::memchr: 6033 if (visitMemChrCall(I)) 6034 return; 6035 break; 6036 case LibFunc::strcpy: 6037 if (visitStrCpyCall(I, false)) 6038 return; 6039 break; 6040 case LibFunc::stpcpy: 6041 if (visitStrCpyCall(I, true)) 6042 return; 6043 break; 6044 case LibFunc::strcmp: 6045 if (visitStrCmpCall(I)) 6046 return; 6047 break; 6048 case LibFunc::strlen: 6049 if (visitStrLenCall(I)) 6050 return; 6051 break; 6052 case LibFunc::strnlen: 6053 if (visitStrNLenCall(I)) 6054 return; 6055 break; 6056 } 6057 } 6058 } 6059 6060 SDValue Callee; 6061 if (!RenameFn) 6062 Callee = getValue(I.getCalledValue()); 6063 else 6064 Callee = DAG.getExternalSymbol(RenameFn, 6065 TM.getTargetLowering()->getPointerTy()); 6066 6067 // Check if we can potentially perform a tail call. More detailed checking is 6068 // be done within LowerCallTo, after more information about the call is known. 6069 LowerCallTo(&I, Callee, I.isTailCall()); 6070} 6071 6072namespace { 6073 6074/// AsmOperandInfo - This contains information for each constraint that we are 6075/// lowering. 6076class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { 6077public: 6078 /// CallOperand - If this is the result output operand or a clobber 6079 /// this is null, otherwise it is the incoming operand to the CallInst. 6080 /// This gets modified as the asm is processed. 6081 SDValue CallOperand; 6082 6083 /// AssignedRegs - If this is a register or register class operand, this 6084 /// contains the set of register corresponding to the operand. 6085 RegsForValue AssignedRegs; 6086 6087 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info) 6088 : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) { 6089 } 6090 6091 /// getCallOperandValEVT - Return the EVT of the Value* that this operand 6092 /// corresponds to. If there is no Value* for this operand, it returns 6093 /// MVT::Other. 6094 EVT getCallOperandValEVT(LLVMContext &Context, 6095 const TargetLowering &TLI, 6096 const DataLayout *DL) const { 6097 if (CallOperandVal == 0) return MVT::Other; 6098 6099 if (isa<BasicBlock>(CallOperandVal)) 6100 return TLI.getPointerTy(); 6101 6102 llvm::Type *OpTy = CallOperandVal->getType(); 6103 6104 // FIXME: code duplicated from TargetLowering::ParseConstraints(). 6105 // If this is an indirect operand, the operand is a pointer to the 6106 // accessed type. 6107 if (isIndirect) { 6108 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy); 6109 if (!PtrTy) 6110 report_fatal_error("Indirect operand for inline asm not a pointer!"); 6111 OpTy = PtrTy->getElementType(); 6112 } 6113 6114 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 6115 if (StructType *STy = dyn_cast<StructType>(OpTy)) 6116 if (STy->getNumElements() == 1) 6117 OpTy = STy->getElementType(0); 6118 6119 // If OpTy is not a single value, it may be a struct/union that we 6120 // can tile with integers. 6121 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 6122 unsigned BitSize = DL->getTypeSizeInBits(OpTy); 6123 switch (BitSize) { 6124 default: break; 6125 case 1: 6126 case 8: 6127 case 16: 6128 case 32: 6129 case 64: 6130 case 128: 6131 OpTy = IntegerType::get(Context, BitSize); 6132 break; 6133 } 6134 } 6135 6136 return TLI.getValueType(OpTy, true); 6137 } 6138}; 6139 6140typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector; 6141 6142} // end anonymous namespace 6143 6144/// GetRegistersForValue - Assign registers (virtual or physical) for the 6145/// specified operand. We prefer to assign virtual registers, to allow the 6146/// register allocator to handle the assignment process. However, if the asm 6147/// uses features that we can't model on machineinstrs, we have SDISel do the 6148/// allocation. This produces generally horrible, but correct, code. 6149/// 6150/// OpInfo describes the operand. 6151/// 6152static void GetRegistersForValue(SelectionDAG &DAG, 6153 const TargetLowering &TLI, 6154 SDLoc DL, 6155 SDISelAsmOperandInfo &OpInfo) { 6156 LLVMContext &Context = *DAG.getContext(); 6157 6158 MachineFunction &MF = DAG.getMachineFunction(); 6159 SmallVector<unsigned, 4> Regs; 6160 6161 // If this is a constraint for a single physreg, or a constraint for a 6162 // register class, find it. 6163 std::pair<unsigned, const TargetRegisterClass*> PhysReg = 6164 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode, 6165 OpInfo.ConstraintVT); 6166 6167 unsigned NumRegs = 1; 6168 if (OpInfo.ConstraintVT != MVT::Other) { 6169 // If this is a FP input in an integer register (or visa versa) insert a bit 6170 // cast of the input value. More generally, handle any case where the input 6171 // value disagrees with the register class we plan to stick this in. 6172 if (OpInfo.Type == InlineAsm::isInput && 6173 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) { 6174 // Try to convert to the first EVT that the reg class contains. If the 6175 // types are identical size, use a bitcast to convert (e.g. two differing 6176 // vector types). 6177 MVT RegVT = *PhysReg.second->vt_begin(); 6178 if (RegVT.getSizeInBits() == OpInfo.CallOperand.getValueSizeInBits()) { 6179 OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL, 6180 RegVT, OpInfo.CallOperand); 6181 OpInfo.ConstraintVT = RegVT; 6182 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) { 6183 // If the input is a FP value and we want it in FP registers, do a 6184 // bitcast to the corresponding integer type. This turns an f64 value 6185 // into i64, which can be passed with two i32 values on a 32-bit 6186 // machine. 6187 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits()); 6188 OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL, 6189 RegVT, OpInfo.CallOperand); 6190 OpInfo.ConstraintVT = RegVT; 6191 } 6192 } 6193 6194 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT); 6195 } 6196 6197 MVT RegVT; 6198 EVT ValueVT = OpInfo.ConstraintVT; 6199 6200 // If this is a constraint for a specific physical register, like {r17}, 6201 // assign it now. 6202 if (unsigned AssignedReg = PhysReg.first) { 6203 const TargetRegisterClass *RC = PhysReg.second; 6204 if (OpInfo.ConstraintVT == MVT::Other) 6205 ValueVT = *RC->vt_begin(); 6206 6207 // Get the actual register value type. This is important, because the user 6208 // may have asked for (e.g.) the AX register in i32 type. We need to 6209 // remember that AX is actually i16 to get the right extension. 6210 RegVT = *RC->vt_begin(); 6211 6212 // This is a explicit reference to a physical register. 6213 Regs.push_back(AssignedReg); 6214 6215 // If this is an expanded reference, add the rest of the regs to Regs. 6216 if (NumRegs != 1) { 6217 TargetRegisterClass::iterator I = RC->begin(); 6218 for (; *I != AssignedReg; ++I) 6219 assert(I != RC->end() && "Didn't find reg!"); 6220 6221 // Already added the first reg. 6222 --NumRegs; ++I; 6223 for (; NumRegs; --NumRegs, ++I) { 6224 assert(I != RC->end() && "Ran out of registers to allocate!"); 6225 Regs.push_back(*I); 6226 } 6227 } 6228 6229 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT); 6230 return; 6231 } 6232 6233 // Otherwise, if this was a reference to an LLVM register class, create vregs 6234 // for this reference. 6235 if (const TargetRegisterClass *RC = PhysReg.second) { 6236 RegVT = *RC->vt_begin(); 6237 if (OpInfo.ConstraintVT == MVT::Other) 6238 ValueVT = RegVT; 6239 6240 // Create the appropriate number of virtual registers. 6241 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 6242 for (; NumRegs; --NumRegs) 6243 Regs.push_back(RegInfo.createVirtualRegister(RC)); 6244 6245 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT); 6246 return; 6247 } 6248 6249 // Otherwise, we couldn't allocate enough registers for this. 6250} 6251 6252/// visitInlineAsm - Handle a call to an InlineAsm object. 6253/// 6254void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { 6255 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); 6256 6257 /// ConstraintOperands - Information about all of the constraints. 6258 SDISelAsmOperandInfoVector ConstraintOperands; 6259 6260 const TargetLowering *TLI = TM.getTargetLowering(); 6261 TargetLowering::AsmOperandInfoVector 6262 TargetConstraints = TLI->ParseConstraints(CS); 6263 6264 bool hasMemory = false; 6265 6266 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 6267 unsigned ResNo = 0; // ResNo - The result number of the next output. 6268 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 6269 ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i])); 6270 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); 6271 6272 MVT OpVT = MVT::Other; 6273 6274 // Compute the value type for each operand. 6275 switch (OpInfo.Type) { 6276 case InlineAsm::isOutput: 6277 // Indirect outputs just consume an argument. 6278 if (OpInfo.isIndirect) { 6279 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++)); 6280 break; 6281 } 6282 6283 // The return value of the call is this value. As such, there is no 6284 // corresponding argument. 6285 assert(!CS.getType()->isVoidTy() && "Bad inline asm!"); 6286 if (StructType *STy = dyn_cast<StructType>(CS.getType())) { 6287 OpVT = TLI->getSimpleValueType(STy->getElementType(ResNo)); 6288 } else { 6289 assert(ResNo == 0 && "Asm only has one result!"); 6290 OpVT = TLI->getSimpleValueType(CS.getType()); 6291 } 6292 ++ResNo; 6293 break; 6294 case InlineAsm::isInput: 6295 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++)); 6296 break; 6297 case InlineAsm::isClobber: 6298 // Nothing to do. 6299 break; 6300 } 6301 6302 // If this is an input or an indirect output, process the call argument. 6303 // BasicBlocks are labels, currently appearing only in asm's. 6304 if (OpInfo.CallOperandVal) { 6305 if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) { 6306 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]); 6307 } else { 6308 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal); 6309 } 6310 6311 OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), *TLI, DL). 6312 getSimpleVT(); 6313 } 6314 6315 OpInfo.ConstraintVT = OpVT; 6316 6317 // Indirect operand accesses access memory. 6318 if (OpInfo.isIndirect) 6319 hasMemory = true; 6320 else { 6321 for (unsigned j = 0, ee = OpInfo.Codes.size(); j != ee; ++j) { 6322 TargetLowering::ConstraintType 6323 CType = TLI->getConstraintType(OpInfo.Codes[j]); 6324 if (CType == TargetLowering::C_Memory) { 6325 hasMemory = true; 6326 break; 6327 } 6328 } 6329 } 6330 } 6331 6332 SDValue Chain, Flag; 6333 6334 // We won't need to flush pending loads if this asm doesn't touch 6335 // memory and is nonvolatile. 6336 if (hasMemory || IA->hasSideEffects()) 6337 Chain = getRoot(); 6338 else 6339 Chain = DAG.getRoot(); 6340 6341 // Second pass over the constraints: compute which constraint option to use 6342 // and assign registers to constraints that want a specific physreg. 6343 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) { 6344 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i]; 6345 6346 // If this is an output operand with a matching input operand, look up the 6347 // matching input. If their types mismatch, e.g. one is an integer, the 6348 // other is floating point, or their sizes are different, flag it as an 6349 // error. 6350 if (OpInfo.hasMatchingInput()) { 6351 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 6352 6353 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 6354 std::pair<unsigned, const TargetRegisterClass*> MatchRC = 6355 TLI->getRegForInlineAsmConstraint(OpInfo.ConstraintCode, 6356 OpInfo.ConstraintVT); 6357 std::pair<unsigned, const TargetRegisterClass*> InputRC = 6358 TLI->getRegForInlineAsmConstraint(Input.ConstraintCode, 6359 Input.ConstraintVT); 6360 if ((OpInfo.ConstraintVT.isInteger() != 6361 Input.ConstraintVT.isInteger()) || 6362 (MatchRC.second != InputRC.second)) { 6363 report_fatal_error("Unsupported asm: input constraint" 6364 " with a matching output constraint of" 6365 " incompatible type!"); 6366 } 6367 Input.ConstraintVT = OpInfo.ConstraintVT; 6368 } 6369 } 6370 6371 // Compute the constraint code and ConstraintType to use. 6372 TLI->ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG); 6373 6374 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 6375 OpInfo.Type == InlineAsm::isClobber) 6376 continue; 6377 6378 // If this is a memory input, and if the operand is not indirect, do what we 6379 // need to to provide an address for the memory input. 6380 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 6381 !OpInfo.isIndirect) { 6382 assert((OpInfo.isMultipleAlternative || 6383 (OpInfo.Type == InlineAsm::isInput)) && 6384 "Can only indirectify direct input operands!"); 6385 6386 // Memory operands really want the address of the value. If we don't have 6387 // an indirect input, put it in the constpool if we can, otherwise spill 6388 // it to a stack slot. 6389 // TODO: This isn't quite right. We need to handle these according to 6390 // the addressing mode that the constraint wants. Also, this may take 6391 // an additional register for the computation and we don't want that 6392 // either. 6393 6394 // If the operand is a float, integer, or vector constant, spill to a 6395 // constant pool entry to get its address. 6396 const Value *OpVal = OpInfo.CallOperandVal; 6397 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) || 6398 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) { 6399 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal), 6400 TLI->getPointerTy()); 6401 } else { 6402 // Otherwise, create a stack slot and emit a store to it before the 6403 // asm. 6404 Type *Ty = OpVal->getType(); 6405 uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty); 6406 unsigned Align = TLI->getDataLayout()->getPrefTypeAlignment(Ty); 6407 MachineFunction &MF = DAG.getMachineFunction(); 6408 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false); 6409 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI->getPointerTy()); 6410 Chain = DAG.getStore(Chain, getCurSDLoc(), 6411 OpInfo.CallOperand, StackSlot, 6412 MachinePointerInfo::getFixedStack(SSFI), 6413 false, false, 0); 6414 OpInfo.CallOperand = StackSlot; 6415 } 6416 6417 // There is no longer a Value* corresponding to this operand. 6418 OpInfo.CallOperandVal = 0; 6419 6420 // It is now an indirect operand. 6421 OpInfo.isIndirect = true; 6422 } 6423 6424 // If this constraint is for a specific register, allocate it before 6425 // anything else. 6426 if (OpInfo.ConstraintType == TargetLowering::C_Register) 6427 GetRegistersForValue(DAG, *TLI, getCurSDLoc(), OpInfo); 6428 } 6429 6430 // Second pass - Loop over all of the operands, assigning virtual or physregs 6431 // to register class operands. 6432 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) { 6433 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i]; 6434 6435 // C_Register operands have already been allocated, Other/Memory don't need 6436 // to be. 6437 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass) 6438 GetRegistersForValue(DAG, *TLI, getCurSDLoc(), OpInfo); 6439 } 6440 6441 // AsmNodeOperands - The operands for the ISD::INLINEASM node. 6442 std::vector<SDValue> AsmNodeOperands; 6443 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain 6444 AsmNodeOperands.push_back( 6445 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), 6446 TLI->getPointerTy())); 6447 6448 // If we have a !srcloc metadata node associated with it, we want to attach 6449 // this to the ultimately generated inline asm machineinstr. To do this, we 6450 // pass in the third operand as this (potentially null) inline asm MDNode. 6451 const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc"); 6452 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc)); 6453 6454 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore 6455 // bits as operand 3. 6456 unsigned ExtraInfo = 0; 6457 if (IA->hasSideEffects()) 6458 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 6459 if (IA->isAlignStack()) 6460 ExtraInfo |= InlineAsm::Extra_IsAlignStack; 6461 // Set the asm dialect. 6462 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect; 6463 6464 // Determine if this InlineAsm MayLoad or MayStore based on the constraints. 6465 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 6466 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 6467 6468 // Compute the constraint code and ConstraintType to use. 6469 TLI->ComputeConstraintToUse(OpInfo, SDValue()); 6470 6471 // Ideally, we would only check against memory constraints. However, the 6472 // meaning of an other constraint can be target-specific and we can't easily 6473 // reason about it. Therefore, be conservative and set MayLoad/MayStore 6474 // for other constriants as well. 6475 if (OpInfo.ConstraintType == TargetLowering::C_Memory || 6476 OpInfo.ConstraintType == TargetLowering::C_Other) { 6477 if (OpInfo.Type == InlineAsm::isInput) 6478 ExtraInfo |= InlineAsm::Extra_MayLoad; 6479 else if (OpInfo.Type == InlineAsm::isOutput) 6480 ExtraInfo |= InlineAsm::Extra_MayStore; 6481 else if (OpInfo.Type == InlineAsm::isClobber) 6482 ExtraInfo |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore); 6483 } 6484 } 6485 6486 AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo, 6487 TLI->getPointerTy())); 6488 6489 // Loop over all of the inputs, copying the operand values into the 6490 // appropriate registers and processing the output regs. 6491 RegsForValue RetValRegs; 6492 6493 // IndirectStoresToEmit - The set of stores to emit after the inline asm node. 6494 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit; 6495 6496 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) { 6497 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i]; 6498 6499 switch (OpInfo.Type) { 6500 case InlineAsm::isOutput: { 6501 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass && 6502 OpInfo.ConstraintType != TargetLowering::C_Register) { 6503 // Memory output, or 'other' output (e.g. 'X' constraint). 6504 assert(OpInfo.isIndirect && "Memory output must be indirect operand"); 6505 6506 // Add information to the INLINEASM node to know about this output. 6507 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); 6508 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, 6509 TLI->getPointerTy())); 6510 AsmNodeOperands.push_back(OpInfo.CallOperand); 6511 break; 6512 } 6513 6514 // Otherwise, this is a register or register class output. 6515 6516 // Copy the output from the appropriate register. Find a register that 6517 // we can use. 6518 if (OpInfo.AssignedRegs.Regs.empty()) { 6519 LLVMContext &Ctx = *DAG.getContext(); 6520 Ctx.emitError(CS.getInstruction(), 6521 "couldn't allocate output register for constraint '" + 6522 Twine(OpInfo.ConstraintCode) + "'"); 6523 return; 6524 } 6525 6526 // If this is an indirect operand, store through the pointer after the 6527 // asm. 6528 if (OpInfo.isIndirect) { 6529 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs, 6530 OpInfo.CallOperandVal)); 6531 } else { 6532 // This is the result value of the call. 6533 assert(!CS.getType()->isVoidTy() && "Bad inline asm!"); 6534 // Concatenate this output onto the outputs list. 6535 RetValRegs.append(OpInfo.AssignedRegs); 6536 } 6537 6538 // Add information to the INLINEASM node to know that this register is 6539 // set. 6540 OpInfo.AssignedRegs 6541 .AddInlineAsmOperands(OpInfo.isEarlyClobber 6542 ? InlineAsm::Kind_RegDefEarlyClobber 6543 : InlineAsm::Kind_RegDef, 6544 false, 0, DAG, AsmNodeOperands); 6545 break; 6546 } 6547 case InlineAsm::isInput: { 6548 SDValue InOperandVal = OpInfo.CallOperand; 6549 6550 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint? 6551 // If this is required to match an output register we have already set, 6552 // just use its register. 6553 unsigned OperandNo = OpInfo.getMatchedOperand(); 6554 6555 // Scan until we find the definition we already emitted of this operand. 6556 // When we find it, create a RegsForValue operand. 6557 unsigned CurOp = InlineAsm::Op_FirstOperand; 6558 for (; OperandNo; --OperandNo) { 6559 // Advance to the next operand. 6560 unsigned OpFlag = 6561 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue(); 6562 assert((InlineAsm::isRegDefKind(OpFlag) || 6563 InlineAsm::isRegDefEarlyClobberKind(OpFlag) || 6564 InlineAsm::isMemKind(OpFlag)) && "Skipped past definitions?"); 6565 CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1; 6566 } 6567 6568 unsigned OpFlag = 6569 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue(); 6570 if (InlineAsm::isRegDefKind(OpFlag) || 6571 InlineAsm::isRegDefEarlyClobberKind(OpFlag)) { 6572 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs. 6573 if (OpInfo.isIndirect) { 6574 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c 6575 LLVMContext &Ctx = *DAG.getContext(); 6576 Ctx.emitError(CS.getInstruction(), "inline asm not supported yet:" 6577 " don't know how to handle tied " 6578 "indirect register inputs"); 6579 return; 6580 } 6581 6582 RegsForValue MatchedRegs; 6583 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType()); 6584 MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType(); 6585 MatchedRegs.RegVTs.push_back(RegVT); 6586 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo(); 6587 for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag); 6588 i != e; ++i) { 6589 if (const TargetRegisterClass *RC = TLI->getRegClassFor(RegVT)) 6590 MatchedRegs.Regs.push_back(RegInfo.createVirtualRegister(RC)); 6591 else { 6592 LLVMContext &Ctx = *DAG.getContext(); 6593 Ctx.emitError(CS.getInstruction(), 6594 "inline asm error: This value" 6595 " type register class is not natively supported!"); 6596 return; 6597 } 6598 } 6599 // Use the produced MatchedRegs object to 6600 MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurSDLoc(), 6601 Chain, &Flag, CS.getInstruction()); 6602 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, 6603 true, OpInfo.getMatchedOperand(), 6604 DAG, AsmNodeOperands); 6605 break; 6606 } 6607 6608 assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!"); 6609 assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 && 6610 "Unexpected number of operands"); 6611 // Add information to the INLINEASM node to know about this input. 6612 // See InlineAsm.h isUseOperandTiedToDef. 6613 OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag, 6614 OpInfo.getMatchedOperand()); 6615 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag, 6616 TLI->getPointerTy())); 6617 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]); 6618 break; 6619 } 6620 6621 // Treat indirect 'X' constraint as memory. 6622 if (OpInfo.ConstraintType == TargetLowering::C_Other && 6623 OpInfo.isIndirect) 6624 OpInfo.ConstraintType = TargetLowering::C_Memory; 6625 6626 if (OpInfo.ConstraintType == TargetLowering::C_Other) { 6627 std::vector<SDValue> Ops; 6628 TLI->LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode, 6629 Ops, DAG); 6630 if (Ops.empty()) { 6631 LLVMContext &Ctx = *DAG.getContext(); 6632 Ctx.emitError(CS.getInstruction(), 6633 "invalid operand for inline asm constraint '" + 6634 Twine(OpInfo.ConstraintCode) + "'"); 6635 return; 6636 } 6637 6638 // Add information to the INLINEASM node to know about this input. 6639 unsigned ResOpType = 6640 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size()); 6641 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType, 6642 TLI->getPointerTy())); 6643 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end()); 6644 break; 6645 } 6646 6647 if (OpInfo.ConstraintType == TargetLowering::C_Memory) { 6648 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!"); 6649 assert(InOperandVal.getValueType() == TLI->getPointerTy() && 6650 "Memory operands expect pointer values"); 6651 6652 // Add information to the INLINEASM node to know about this input. 6653 unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); 6654 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType, 6655 TLI->getPointerTy())); 6656 AsmNodeOperands.push_back(InOperandVal); 6657 break; 6658 } 6659 6660 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass || 6661 OpInfo.ConstraintType == TargetLowering::C_Register) && 6662 "Unknown constraint type!"); 6663 6664 // TODO: Support this. 6665 if (OpInfo.isIndirect) { 6666 LLVMContext &Ctx = *DAG.getContext(); 6667 Ctx.emitError(CS.getInstruction(), 6668 "Don't know how to handle indirect register inputs yet " 6669 "for constraint '" + 6670 Twine(OpInfo.ConstraintCode) + "'"); 6671 return; 6672 } 6673 6674 // Copy the input into the appropriate registers. 6675 if (OpInfo.AssignedRegs.Regs.empty()) { 6676 LLVMContext &Ctx = *DAG.getContext(); 6677 Ctx.emitError(CS.getInstruction(), 6678 "couldn't allocate input reg for constraint '" + 6679 Twine(OpInfo.ConstraintCode) + "'"); 6680 return; 6681 } 6682 6683 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurSDLoc(), 6684 Chain, &Flag, CS.getInstruction()); 6685 6686 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0, 6687 DAG, AsmNodeOperands); 6688 break; 6689 } 6690 case InlineAsm::isClobber: { 6691 // Add the clobbered value to the operand list, so that the register 6692 // allocator is aware that the physreg got clobbered. 6693 if (!OpInfo.AssignedRegs.Regs.empty()) 6694 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber, 6695 false, 0, DAG, 6696 AsmNodeOperands); 6697 break; 6698 } 6699 } 6700 } 6701 6702 // Finish up input operands. Set the input chain and add the flag last. 6703 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain; 6704 if (Flag.getNode()) AsmNodeOperands.push_back(Flag); 6705 6706 Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(), 6707 DAG.getVTList(MVT::Other, MVT::Glue), 6708 &AsmNodeOperands[0], AsmNodeOperands.size()); 6709 Flag = Chain.getValue(1); 6710 6711 // If this asm returns a register value, copy the result from that register 6712 // and set it as the value of the call. 6713 if (!RetValRegs.Regs.empty()) { 6714 SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), 6715 Chain, &Flag, CS.getInstruction()); 6716 6717 // FIXME: Why don't we do this for inline asms with MRVs? 6718 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) { 6719 EVT ResultType = TLI->getValueType(CS.getType()); 6720 6721 // If any of the results of the inline asm is a vector, it may have the 6722 // wrong width/num elts. This can happen for register classes that can 6723 // contain multiple different value types. The preg or vreg allocated may 6724 // not have the same VT as was expected. Convert it to the right type 6725 // with bit_convert. 6726 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) { 6727 Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(), 6728 ResultType, Val); 6729 6730 } else if (ResultType != Val.getValueType() && 6731 ResultType.isInteger() && Val.getValueType().isInteger()) { 6732 // If a result value was tied to an input value, the computed result may 6733 // have a wider width than the expected result. Extract the relevant 6734 // portion. 6735 Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultType, Val); 6736 } 6737 6738 assert(ResultType == Val.getValueType() && "Asm result value mismatch!"); 6739 } 6740 6741 setValue(CS.getInstruction(), Val); 6742 // Don't need to use this as a chain in this case. 6743 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty()) 6744 return; 6745 } 6746 6747 std::vector<std::pair<SDValue, const Value *> > StoresToEmit; 6748 6749 // Process indirect outputs, first output all of the flagged copies out of 6750 // physregs. 6751 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) { 6752 RegsForValue &OutRegs = IndirectStoresToEmit[i].first; 6753 const Value *Ptr = IndirectStoresToEmit[i].second; 6754 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), 6755 Chain, &Flag, IA); 6756 StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); 6757 } 6758 6759 // Emit the non-flagged stores from the physregs. 6760 SmallVector<SDValue, 8> OutChains; 6761 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) { 6762 SDValue Val = DAG.getStore(Chain, getCurSDLoc(), 6763 StoresToEmit[i].first, 6764 getValue(StoresToEmit[i].second), 6765 MachinePointerInfo(StoresToEmit[i].second), 6766 false, false, 0); 6767 OutChains.push_back(Val); 6768 } 6769 6770 if (!OutChains.empty()) 6771 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, 6772 &OutChains[0], OutChains.size()); 6773 6774 DAG.setRoot(Chain); 6775} 6776 6777void SelectionDAGBuilder::visitVAStart(const CallInst &I) { 6778 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(), 6779 MVT::Other, getRoot(), 6780 getValue(I.getArgOperand(0)), 6781 DAG.getSrcValue(I.getArgOperand(0)))); 6782} 6783 6784void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) { 6785 const TargetLowering *TLI = TM.getTargetLowering(); 6786 const DataLayout &DL = *TLI->getDataLayout(); 6787 SDValue V = DAG.getVAArg(TLI->getValueType(I.getType()), getCurSDLoc(), 6788 getRoot(), getValue(I.getOperand(0)), 6789 DAG.getSrcValue(I.getOperand(0)), 6790 DL.getABITypeAlignment(I.getType())); 6791 setValue(&I, V); 6792 DAG.setRoot(V.getValue(1)); 6793} 6794 6795void SelectionDAGBuilder::visitVAEnd(const CallInst &I) { 6796 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(), 6797 MVT::Other, getRoot(), 6798 getValue(I.getArgOperand(0)), 6799 DAG.getSrcValue(I.getArgOperand(0)))); 6800} 6801 6802void SelectionDAGBuilder::visitVACopy(const CallInst &I) { 6803 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(), 6804 MVT::Other, getRoot(), 6805 getValue(I.getArgOperand(0)), 6806 getValue(I.getArgOperand(1)), 6807 DAG.getSrcValue(I.getArgOperand(0)), 6808 DAG.getSrcValue(I.getArgOperand(1)))); 6809} 6810 6811/// \brief Lower an argument list according to the target calling convention. 6812/// 6813/// \return A tuple of <return-value, token-chain> 6814/// 6815/// This is a helper for lowering intrinsics that follow a target calling 6816/// convention or require stack pointer adjustment. Only a subset of the 6817/// intrinsic's operands need to participate in the calling convention. 6818std::pair<SDValue, SDValue> 6819SelectionDAGBuilder::LowerCallOperands(const CallInst &CI, unsigned ArgIdx, 6820 unsigned NumArgs, SDValue Callee, 6821 bool useVoidTy) { 6822 TargetLowering::ArgListTy Args; 6823 Args.reserve(NumArgs); 6824 6825 // Populate the argument list. 6826 // Attributes for args start at offset 1, after the return attribute. 6827 ImmutableCallSite CS(&CI); 6828 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1; 6829 ArgI != ArgE; ++ArgI) { 6830 const Value *V = CI.getOperand(ArgI); 6831 6832 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 6833 6834 TargetLowering::ArgListEntry Entry; 6835 Entry.Node = getValue(V); 6836 Entry.Ty = V->getType(); 6837 Entry.setAttributes(&CS, AttrI); 6838 Args.push_back(Entry); 6839 } 6840 6841 Type *retTy = useVoidTy ? Type::getVoidTy(*DAG.getContext()) : CI.getType(); 6842 TargetLowering::CallLoweringInfo CLI(getRoot(), retTy, /*retSExt*/ false, 6843 /*retZExt*/ false, /*isVarArg*/ false, /*isInReg*/ false, NumArgs, 6844 CI.getCallingConv(), /*isTailCall*/ false, /*doesNotReturn*/ false, 6845 /*isReturnValueUsed*/ CI.use_empty(), Callee, Args, DAG, getCurSDLoc()); 6846 6847 const TargetLowering *TLI = TM.getTargetLowering(); 6848 return TLI->LowerCallTo(CLI); 6849} 6850 6851/// \brief Add a stack map intrinsic call's live variable operands to a stackmap 6852/// or patchpoint target node's operand list. 6853/// 6854/// Constants are converted to TargetConstants purely as an optimization to 6855/// avoid constant materialization and register allocation. 6856/// 6857/// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not 6858/// generate addess computation nodes, and so ExpandISelPseudo can convert the 6859/// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids 6860/// address materialization and register allocation, but may also be required 6861/// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an 6862/// alloca in the entry block, then the runtime may assume that the alloca's 6863/// StackMap location can be read immediately after compilation and that the 6864/// location is valid at any point during execution (this is similar to the 6865/// assumption made by the llvm.gcroot intrinsic). If the alloca's location were 6866/// only available in a register, then the runtime would need to trap when 6867/// execution reaches the StackMap in order to read the alloca's location. 6868static void addStackMapLiveVars(const CallInst &CI, unsigned StartIdx, 6869 SmallVectorImpl<SDValue> &Ops, 6870 SelectionDAGBuilder &Builder) { 6871 for (unsigned i = StartIdx, e = CI.getNumArgOperands(); i != e; ++i) { 6872 SDValue OpVal = Builder.getValue(CI.getArgOperand(i)); 6873 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) { 6874 Ops.push_back( 6875 Builder.DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64)); 6876 Ops.push_back( 6877 Builder.DAG.getTargetConstant(C->getSExtValue(), MVT::i64)); 6878 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) { 6879 const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo(); 6880 Ops.push_back( 6881 Builder.DAG.getTargetFrameIndex(FI->getIndex(), TLI.getPointerTy())); 6882 } else 6883 Ops.push_back(OpVal); 6884 } 6885} 6886 6887/// \brief Lower llvm.experimental.stackmap directly to its target opcode. 6888void SelectionDAGBuilder::visitStackmap(const CallInst &CI) { 6889 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>, 6890 // [live variables...]) 6891 6892 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value."); 6893 6894 SDValue Chain, InFlag, Callee, NullPtr; 6895 SmallVector<SDValue, 32> Ops; 6896 6897 SDLoc DL = getCurSDLoc(); 6898 Callee = getValue(CI.getCalledValue()); 6899 NullPtr = DAG.getIntPtrConstant(0, true); 6900 6901 // The stackmap intrinsic only records the live variables (the arguemnts 6902 // passed to it) and emits NOPS (if requested). Unlike the patchpoint 6903 // intrinsic, this won't be lowered to a function call. This means we don't 6904 // have to worry about calling conventions and target specific lowering code. 6905 // Instead we perform the call lowering right here. 6906 // 6907 // chain, flag = CALLSEQ_START(chain, 0) 6908 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag) 6909 // chain, flag = CALLSEQ_END(chain, 0, 0, flag) 6910 // 6911 Chain = DAG.getCALLSEQ_START(getRoot(), NullPtr, DL); 6912 InFlag = Chain.getValue(1); 6913 6914 // Add the <id> and <numBytes> constants. 6915 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos)); 6916 Ops.push_back(DAG.getTargetConstant( 6917 cast<ConstantSDNode>(IDVal)->getZExtValue(), MVT::i64)); 6918 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos)); 6919 Ops.push_back(DAG.getTargetConstant( 6920 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), MVT::i32)); 6921 6922 // Push live variables for the stack map. 6923 addStackMapLiveVars(CI, 2, Ops, *this); 6924 6925 // We are not pushing any register mask info here on the operands list, 6926 // because the stackmap doesn't clobber anything. 6927 6928 // Push the chain and the glue flag. 6929 Ops.push_back(Chain); 6930 Ops.push_back(InFlag); 6931 6932 // Create the STACKMAP node. 6933 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 6934 SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops); 6935 Chain = SDValue(SM, 0); 6936 InFlag = Chain.getValue(1); 6937 6938 Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL); 6939 6940 // Stackmaps don't generate values, so nothing goes into the NodeMap. 6941 6942 // Set the root to the target-lowered call chain. 6943 DAG.setRoot(Chain); 6944 6945 // Inform the Frame Information that we have a stackmap in this function. 6946 FuncInfo.MF->getFrameInfo()->setHasStackMap(); 6947} 6948 6949/// \brief Lower llvm.experimental.patchpoint directly to its target opcode. 6950void SelectionDAGBuilder::visitPatchpoint(const CallInst &CI) { 6951 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, 6952 // i32 <numBytes>, 6953 // i8* <target>, 6954 // i32 <numArgs>, 6955 // [Args...], 6956 // [live variables...]) 6957 6958 CallingConv::ID CC = CI.getCallingConv(); 6959 bool isAnyRegCC = CC == CallingConv::AnyReg; 6960 bool hasDef = !CI.getType()->isVoidTy(); 6961 SDValue Callee = getValue(CI.getOperand(2)); // <target> 6962 6963 // Get the real number of arguments participating in the call <numArgs> 6964 SDValue NArgVal = getValue(CI.getArgOperand(PatchPointOpers::NArgPos)); 6965 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue(); 6966 6967 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> 6968 // Intrinsics include all meta-operands up to but not including CC. 6969 unsigned NumMetaOpers = PatchPointOpers::CCPos; 6970 assert(CI.getNumArgOperands() >= NumMetaOpers + NumArgs && 6971 "Not enough arguments provided to the patchpoint intrinsic"); 6972 6973 // For AnyRegCC the arguments are lowered later on manually. 6974 unsigned NumCallArgs = isAnyRegCC ? 0 : NumArgs; 6975 std::pair<SDValue, SDValue> Result = 6976 LowerCallOperands(CI, NumMetaOpers, NumCallArgs, Callee, isAnyRegCC); 6977 6978 // Set the root to the target-lowered call chain. 6979 SDValue Chain = Result.second; 6980 DAG.setRoot(Chain); 6981 6982 SDNode *CallEnd = Chain.getNode(); 6983 if (hasDef && (CallEnd->getOpcode() == ISD::CopyFromReg)) 6984 CallEnd = CallEnd->getOperand(0).getNode(); 6985 6986 /// Get a call instruction from the call sequence chain. 6987 /// Tail calls are not allowed. 6988 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END && 6989 "Expected a callseq node."); 6990 SDNode *Call = CallEnd->getOperand(0).getNode(); 6991 bool hasGlue = Call->getGluedNode(); 6992 6993 // Replace the target specific call node with the patchable intrinsic. 6994 SmallVector<SDValue, 8> Ops; 6995 6996 // Add the <id> and <numBytes> constants. 6997 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos)); 6998 Ops.push_back(DAG.getTargetConstant( 6999 cast<ConstantSDNode>(IDVal)->getZExtValue(), MVT::i64)); 7000 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos)); 7001 Ops.push_back(DAG.getTargetConstant( 7002 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), MVT::i32)); 7003 7004 // Assume that the Callee is a constant address. 7005 // FIXME: handle function symbols in the future. 7006 Ops.push_back( 7007 DAG.getIntPtrConstant(cast<ConstantSDNode>(Callee)->getZExtValue(), 7008 /*isTarget=*/true)); 7009 7010 // Adjust <numArgs> to account for any arguments that have been passed on the 7011 // stack instead. 7012 // Call Node: Chain, Target, {Args}, RegMask, [Glue] 7013 unsigned NumCallRegArgs = Call->getNumOperands() - (hasGlue ? 4 : 3); 7014 NumCallRegArgs = isAnyRegCC ? NumArgs : NumCallRegArgs; 7015 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, MVT::i32)); 7016 7017 // Add the calling convention 7018 Ops.push_back(DAG.getTargetConstant((unsigned)CC, MVT::i32)); 7019 7020 // Add the arguments we omitted previously. The register allocator should 7021 // place these in any free register. 7022 if (isAnyRegCC) 7023 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) 7024 Ops.push_back(getValue(CI.getArgOperand(i))); 7025 7026 // Push the arguments from the call instruction up to the register mask. 7027 SDNode::op_iterator e = hasGlue ? Call->op_end()-2 : Call->op_end()-1; 7028 for (SDNode::op_iterator i = Call->op_begin()+2; i != e; ++i) 7029 Ops.push_back(*i); 7030 7031 // Push live variables for the stack map. 7032 addStackMapLiveVars(CI, NumMetaOpers + NumArgs, Ops, *this); 7033 7034 // Push the register mask info. 7035 if (hasGlue) 7036 Ops.push_back(*(Call->op_end()-2)); 7037 else 7038 Ops.push_back(*(Call->op_end()-1)); 7039 7040 // Push the chain (this is originally the first operand of the call, but 7041 // becomes now the last or second to last operand). 7042 Ops.push_back(*(Call->op_begin())); 7043 7044 // Push the glue flag (last operand). 7045 if (hasGlue) 7046 Ops.push_back(*(Call->op_end()-1)); 7047 7048 SDVTList NodeTys; 7049 if (isAnyRegCC && hasDef) { 7050 // Create the return types based on the intrinsic definition 7051 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7052 SmallVector<EVT, 3> ValueVTs; 7053 ComputeValueVTs(TLI, CI.getType(), ValueVTs); 7054 assert(ValueVTs.size() == 1 && "Expected only one return value type."); 7055 7056 // There is always a chain and a glue type at the end 7057 ValueVTs.push_back(MVT::Other); 7058 ValueVTs.push_back(MVT::Glue); 7059 NodeTys = DAG.getVTList(ValueVTs.data(), ValueVTs.size()); 7060 } else 7061 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7062 7063 // Replace the target specific call node with a PATCHPOINT node. 7064 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT, 7065 getCurSDLoc(), NodeTys, Ops); 7066 7067 // Update the NodeMap. 7068 if (hasDef) { 7069 if (isAnyRegCC) 7070 setValue(&CI, SDValue(MN, 0)); 7071 else 7072 setValue(&CI, Result.first); 7073 } 7074 7075 // Fixup the consumers of the intrinsic. The chain and glue may be used in the 7076 // call sequence. Furthermore the location of the chain and glue can change 7077 // when the AnyReg calling convention is used and the intrinsic returns a 7078 // value. 7079 if (isAnyRegCC && hasDef) { 7080 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)}; 7081 SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)}; 7082 DAG.ReplaceAllUsesOfValuesWith(From, To, 2); 7083 } else 7084 DAG.ReplaceAllUsesWith(Call, MN); 7085 DAG.DeleteNode(Call); 7086 7087 // Inform the Frame Information that we have a patchpoint in this function. 7088 FuncInfo.MF->getFrameInfo()->setHasPatchPoint(); 7089} 7090 7091/// TargetLowering::LowerCallTo - This is the default LowerCallTo 7092/// implementation, which just calls LowerCall. 7093/// FIXME: When all targets are 7094/// migrated to using LowerCall, this hook should be integrated into SDISel. 7095std::pair<SDValue, SDValue> 7096TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { 7097 // Handle the incoming return values from the call. 7098 CLI.Ins.clear(); 7099 SmallVector<EVT, 4> RetTys; 7100 ComputeValueVTs(*this, CLI.RetTy, RetTys); 7101 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 7102 EVT VT = RetTys[I]; 7103 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT); 7104 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT); 7105 for (unsigned i = 0; i != NumRegs; ++i) { 7106 ISD::InputArg MyFlags; 7107 MyFlags.VT = RegisterVT; 7108 MyFlags.ArgVT = VT; 7109 MyFlags.Used = CLI.IsReturnValueUsed; 7110 if (CLI.RetSExt) 7111 MyFlags.Flags.setSExt(); 7112 if (CLI.RetZExt) 7113 MyFlags.Flags.setZExt(); 7114 if (CLI.IsInReg) 7115 MyFlags.Flags.setInReg(); 7116 CLI.Ins.push_back(MyFlags); 7117 } 7118 } 7119 7120 // Handle all of the outgoing arguments. 7121 CLI.Outs.clear(); 7122 CLI.OutVals.clear(); 7123 ArgListTy &Args = CLI.Args; 7124 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 7125 SmallVector<EVT, 4> ValueVTs; 7126 ComputeValueVTs(*this, Args[i].Ty, ValueVTs); 7127 for (unsigned Value = 0, NumValues = ValueVTs.size(); 7128 Value != NumValues; ++Value) { 7129 EVT VT = ValueVTs[Value]; 7130 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext()); 7131 SDValue Op = SDValue(Args[i].Node.getNode(), 7132 Args[i].Node.getResNo() + Value); 7133 ISD::ArgFlagsTy Flags; 7134 unsigned OriginalAlignment = 7135 getDataLayout()->getABITypeAlignment(ArgTy); 7136 7137 if (Args[i].isZExt) 7138 Flags.setZExt(); 7139 if (Args[i].isSExt) 7140 Flags.setSExt(); 7141 if (Args[i].isInReg) 7142 Flags.setInReg(); 7143 if (Args[i].isSRet) 7144 Flags.setSRet(); 7145 if (Args[i].isByVal) 7146 Flags.setByVal(); 7147 if (Args[i].isInAlloca) { 7148 Flags.setInAlloca(); 7149 // Set the byval flag for CCAssignFn callbacks that don't know about 7150 // inalloca. This way we can know how many bytes we should've allocated 7151 // and how many bytes a callee cleanup function will pop. If we port 7152 // inalloca to more targets, we'll have to add custom inalloca handling 7153 // in the various CC lowering callbacks. 7154 Flags.setByVal(); 7155 } 7156 if (Args[i].isByVal || Args[i].isInAlloca) { 7157 PointerType *Ty = cast<PointerType>(Args[i].Ty); 7158 Type *ElementTy = Ty->getElementType(); 7159 Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy)); 7160 // For ByVal, alignment should come from FE. BE will guess if this 7161 // info is not there but there are cases it cannot get right. 7162 unsigned FrameAlign; 7163 if (Args[i].Alignment) 7164 FrameAlign = Args[i].Alignment; 7165 else 7166 FrameAlign = getByValTypeAlignment(ElementTy); 7167 Flags.setByValAlign(FrameAlign); 7168 } 7169 if (Args[i].isNest) 7170 Flags.setNest(); 7171 Flags.setOrigAlign(OriginalAlignment); 7172 7173 MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT); 7174 unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT); 7175 SmallVector<SDValue, 4> Parts(NumParts); 7176 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 7177 7178 if (Args[i].isSExt) 7179 ExtendKind = ISD::SIGN_EXTEND; 7180 else if (Args[i].isZExt) 7181 ExtendKind = ISD::ZERO_EXTEND; 7182 7183 // Conservatively only handle 'returned' on non-vectors for now 7184 if (Args[i].isReturned && !Op.getValueType().isVector()) { 7185 assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues && 7186 "unexpected use of 'returned'"); 7187 // Before passing 'returned' to the target lowering code, ensure that 7188 // either the register MVT and the actual EVT are the same size or that 7189 // the return value and argument are extended in the same way; in these 7190 // cases it's safe to pass the argument register value unchanged as the 7191 // return register value (although it's at the target's option whether 7192 // to do so) 7193 // TODO: allow code generation to take advantage of partially preserved 7194 // registers rather than clobbering the entire register when the 7195 // parameter extension method is not compatible with the return 7196 // extension method 7197 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) || 7198 (ExtendKind != ISD::ANY_EXTEND && 7199 CLI.RetSExt == Args[i].isSExt && CLI.RetZExt == Args[i].isZExt)) 7200 Flags.setReturned(); 7201 } 7202 7203 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, 7204 PartVT, CLI.CS ? CLI.CS->getInstruction() : 0, ExtendKind); 7205 7206 for (unsigned j = 0; j != NumParts; ++j) { 7207 // if it isn't first piece, alignment must be 1 7208 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT, 7209 i < CLI.NumFixedArgs, 7210 i, j*Parts[j].getValueType().getStoreSize()); 7211 if (NumParts > 1 && j == 0) 7212 MyFlags.Flags.setSplit(); 7213 else if (j != 0) 7214 MyFlags.Flags.setOrigAlign(1); 7215 7216 CLI.Outs.push_back(MyFlags); 7217 CLI.OutVals.push_back(Parts[j]); 7218 } 7219 } 7220 } 7221 7222 SmallVector<SDValue, 4> InVals; 7223 CLI.Chain = LowerCall(CLI, InVals); 7224 7225 // Verify that the target's LowerCall behaved as expected. 7226 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other && 7227 "LowerCall didn't return a valid chain!"); 7228 assert((!CLI.IsTailCall || InVals.empty()) && 7229 "LowerCall emitted a return value for a tail call!"); 7230 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) && 7231 "LowerCall didn't emit the correct number of values!"); 7232 7233 // For a tail call, the return value is merely live-out and there aren't 7234 // any nodes in the DAG representing it. Return a special value to 7235 // indicate that a tail call has been emitted and no more Instructions 7236 // should be processed in the current block. 7237 if (CLI.IsTailCall) { 7238 CLI.DAG.setRoot(CLI.Chain); 7239 return std::make_pair(SDValue(), SDValue()); 7240 } 7241 7242 DEBUG(for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) { 7243 assert(InVals[i].getNode() && 7244 "LowerCall emitted a null value!"); 7245 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() && 7246 "LowerCall emitted a value with the wrong type!"); 7247 }); 7248 7249 // Collect the legal value parts into potentially illegal values 7250 // that correspond to the original function's return values. 7251 ISD::NodeType AssertOp = ISD::DELETED_NODE; 7252 if (CLI.RetSExt) 7253 AssertOp = ISD::AssertSext; 7254 else if (CLI.RetZExt) 7255 AssertOp = ISD::AssertZext; 7256 SmallVector<SDValue, 4> ReturnValues; 7257 unsigned CurReg = 0; 7258 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 7259 EVT VT = RetTys[I]; 7260 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT); 7261 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT); 7262 7263 ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg], 7264 NumRegs, RegisterVT, VT, NULL, 7265 AssertOp)); 7266 CurReg += NumRegs; 7267 } 7268 7269 // For a function returning void, there is no return value. We can't create 7270 // such a node, so we just return a null return value in that case. In 7271 // that case, nothing will actually look at the value. 7272 if (ReturnValues.empty()) 7273 return std::make_pair(SDValue(), CLI.Chain); 7274 7275 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL, 7276 CLI.DAG.getVTList(&RetTys[0], RetTys.size()), 7277 &ReturnValues[0], ReturnValues.size()); 7278 return std::make_pair(Res, CLI.Chain); 7279} 7280 7281void TargetLowering::LowerOperationWrapper(SDNode *N, 7282 SmallVectorImpl<SDValue> &Results, 7283 SelectionDAG &DAG) const { 7284 SDValue Res = LowerOperation(SDValue(N, 0), DAG); 7285 if (Res.getNode()) 7286 Results.push_back(Res); 7287} 7288 7289SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 7290 llvm_unreachable("LowerOperation not implemented for this target!"); 7291} 7292 7293void 7294SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) { 7295 SDValue Op = getNonRegisterValue(V); 7296 assert((Op.getOpcode() != ISD::CopyFromReg || 7297 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && 7298 "Copy from a reg to the same reg!"); 7299 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg"); 7300 7301 const TargetLowering *TLI = TM.getTargetLowering(); 7302 RegsForValue RFV(V->getContext(), *TLI, Reg, V->getType()); 7303 SDValue Chain = DAG.getEntryNode(); 7304 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, 0, V); 7305 PendingExports.push_back(Chain); 7306} 7307 7308#include "llvm/CodeGen/SelectionDAGISel.h" 7309 7310/// isOnlyUsedInEntryBlock - If the specified argument is only used in the 7311/// entry block, return true. This includes arguments used by switches, since 7312/// the switch may expand into multiple basic blocks. 7313static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) { 7314 // With FastISel active, we may be splitting blocks, so force creation 7315 // of virtual registers for all non-dead arguments. 7316 if (FastISel) 7317 return A->use_empty(); 7318 7319 const BasicBlock *Entry = A->getParent()->begin(); 7320 for (const User *U : A->users()) 7321 if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U)) 7322 return false; // Use not in entry block. 7323 7324 return true; 7325} 7326 7327void SelectionDAGISel::LowerArguments(const Function &F) { 7328 SelectionDAG &DAG = SDB->DAG; 7329 SDLoc dl = SDB->getCurSDLoc(); 7330 const TargetLowering *TLI = getTargetLowering(); 7331 const DataLayout *DL = TLI->getDataLayout(); 7332 SmallVector<ISD::InputArg, 16> Ins; 7333 7334 if (!FuncInfo->CanLowerReturn) { 7335 // Put in an sret pointer parameter before all the other parameters. 7336 SmallVector<EVT, 1> ValueVTs; 7337 ComputeValueVTs(*getTargetLowering(), 7338 PointerType::getUnqual(F.getReturnType()), ValueVTs); 7339 7340 // NOTE: Assuming that a pointer will never break down to more than one VT 7341 // or one register. 7342 ISD::ArgFlagsTy Flags; 7343 Flags.setSRet(); 7344 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]); 7345 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true, 0, 0); 7346 Ins.push_back(RetArg); 7347 } 7348 7349 // Set up the incoming argument description vector. 7350 unsigned Idx = 1; 7351 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); 7352 I != E; ++I, ++Idx) { 7353 SmallVector<EVT, 4> ValueVTs; 7354 ComputeValueVTs(*TLI, I->getType(), ValueVTs); 7355 bool isArgValueUsed = !I->use_empty(); 7356 unsigned PartBase = 0; 7357 for (unsigned Value = 0, NumValues = ValueVTs.size(); 7358 Value != NumValues; ++Value) { 7359 EVT VT = ValueVTs[Value]; 7360 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); 7361 ISD::ArgFlagsTy Flags; 7362 unsigned OriginalAlignment = 7363 DL->getABITypeAlignment(ArgTy); 7364 7365 if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt)) 7366 Flags.setZExt(); 7367 if (F.getAttributes().hasAttribute(Idx, Attribute::SExt)) 7368 Flags.setSExt(); 7369 if (F.getAttributes().hasAttribute(Idx, Attribute::InReg)) 7370 Flags.setInReg(); 7371 if (F.getAttributes().hasAttribute(Idx, Attribute::StructRet)) 7372 Flags.setSRet(); 7373 if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal)) 7374 Flags.setByVal(); 7375 if (F.getAttributes().hasAttribute(Idx, Attribute::InAlloca)) { 7376 Flags.setInAlloca(); 7377 // Set the byval flag for CCAssignFn callbacks that don't know about 7378 // inalloca. This way we can know how many bytes we should've allocated 7379 // and how many bytes a callee cleanup function will pop. If we port 7380 // inalloca to more targets, we'll have to add custom inalloca handling 7381 // in the various CC lowering callbacks. 7382 Flags.setByVal(); 7383 } 7384 if (Flags.isByVal() || Flags.isInAlloca()) { 7385 PointerType *Ty = cast<PointerType>(I->getType()); 7386 Type *ElementTy = Ty->getElementType(); 7387 Flags.setByValSize(DL->getTypeAllocSize(ElementTy)); 7388 // For ByVal, alignment should be passed from FE. BE will guess if 7389 // this info is not there but there are cases it cannot get right. 7390 unsigned FrameAlign; 7391 if (F.getParamAlignment(Idx)) 7392 FrameAlign = F.getParamAlignment(Idx); 7393 else 7394 FrameAlign = TLI->getByValTypeAlignment(ElementTy); 7395 Flags.setByValAlign(FrameAlign); 7396 } 7397 if (F.getAttributes().hasAttribute(Idx, Attribute::Nest)) 7398 Flags.setNest(); 7399 Flags.setOrigAlign(OriginalAlignment); 7400 7401 MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT); 7402 unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT); 7403 for (unsigned i = 0; i != NumRegs; ++i) { 7404 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed, 7405 Idx-1, PartBase+i*RegisterVT.getStoreSize()); 7406 if (NumRegs > 1 && i == 0) 7407 MyFlags.Flags.setSplit(); 7408 // if it isn't first piece, alignment must be 1 7409 else if (i > 0) 7410 MyFlags.Flags.setOrigAlign(1); 7411 Ins.push_back(MyFlags); 7412 } 7413 PartBase += VT.getStoreSize(); 7414 } 7415 } 7416 7417 // Call the target to set up the argument values. 7418 SmallVector<SDValue, 8> InVals; 7419 SDValue NewRoot = TLI->LowerFormalArguments(DAG.getRoot(), F.getCallingConv(), 7420 F.isVarArg(), Ins, 7421 dl, DAG, InVals); 7422 7423 // Verify that the target's LowerFormalArguments behaved as expected. 7424 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other && 7425 "LowerFormalArguments didn't return a valid chain!"); 7426 assert(InVals.size() == Ins.size() && 7427 "LowerFormalArguments didn't emit the correct number of values!"); 7428 DEBUG({ 7429 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 7430 assert(InVals[i].getNode() && 7431 "LowerFormalArguments emitted a null value!"); 7432 assert(EVT(Ins[i].VT) == InVals[i].getValueType() && 7433 "LowerFormalArguments emitted a value with the wrong type!"); 7434 } 7435 }); 7436 7437 // Update the DAG with the new chain value resulting from argument lowering. 7438 DAG.setRoot(NewRoot); 7439 7440 // Set up the argument values. 7441 unsigned i = 0; 7442 Idx = 1; 7443 if (!FuncInfo->CanLowerReturn) { 7444 // Create a virtual register for the sret pointer, and put in a copy 7445 // from the sret argument into it. 7446 SmallVector<EVT, 1> ValueVTs; 7447 ComputeValueVTs(*TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs); 7448 MVT VT = ValueVTs[0].getSimpleVT(); 7449 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT); 7450 ISD::NodeType AssertOp = ISD::DELETED_NODE; 7451 SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, 7452 RegVT, VT, NULL, AssertOp); 7453 7454 MachineFunction& MF = SDB->DAG.getMachineFunction(); 7455 MachineRegisterInfo& RegInfo = MF.getRegInfo(); 7456 unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT)); 7457 FuncInfo->DemoteRegister = SRetReg; 7458 NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), 7459 SRetReg, ArgValue); 7460 DAG.setRoot(NewRoot); 7461 7462 // i indexes lowered arguments. Bump it past the hidden sret argument. 7463 // Idx indexes LLVM arguments. Don't touch it. 7464 ++i; 7465 } 7466 7467 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; 7468 ++I, ++Idx) { 7469 SmallVector<SDValue, 4> ArgValues; 7470 SmallVector<EVT, 4> ValueVTs; 7471 ComputeValueVTs(*TLI, I->getType(), ValueVTs); 7472 unsigned NumValues = ValueVTs.size(); 7473 7474 // If this argument is unused then remember its value. It is used to generate 7475 // debugging information. 7476 if (I->use_empty() && NumValues) { 7477 SDB->setUnusedArgValue(I, InVals[i]); 7478 7479 // Also remember any frame index for use in FastISel. 7480 if (FrameIndexSDNode *FI = 7481 dyn_cast<FrameIndexSDNode>(InVals[i].getNode())) 7482 FuncInfo->setArgumentFrameIndex(I, FI->getIndex()); 7483 } 7484 7485 for (unsigned Val = 0; Val != NumValues; ++Val) { 7486 EVT VT = ValueVTs[Val]; 7487 MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT); 7488 unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT); 7489 7490 if (!I->use_empty()) { 7491 ISD::NodeType AssertOp = ISD::DELETED_NODE; 7492 if (F.getAttributes().hasAttribute(Idx, Attribute::SExt)) 7493 AssertOp = ISD::AssertSext; 7494 else if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt)) 7495 AssertOp = ISD::AssertZext; 7496 7497 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], 7498 NumParts, PartVT, VT, 7499 NULL, AssertOp)); 7500 } 7501 7502 i += NumParts; 7503 } 7504 7505 // We don't need to do anything else for unused arguments. 7506 if (ArgValues.empty()) 7507 continue; 7508 7509 // Note down frame index. 7510 if (FrameIndexSDNode *FI = 7511 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode())) 7512 FuncInfo->setArgumentFrameIndex(I, FI->getIndex()); 7513 7514 SDValue Res = DAG.getMergeValues(&ArgValues[0], NumValues, 7515 SDB->getCurSDLoc()); 7516 7517 SDB->setValue(I, Res); 7518 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) { 7519 if (LoadSDNode *LNode = 7520 dyn_cast<LoadSDNode>(Res.getOperand(0).getNode())) 7521 if (FrameIndexSDNode *FI = 7522 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) 7523 FuncInfo->setArgumentFrameIndex(I, FI->getIndex()); 7524 } 7525 7526 // If this argument is live outside of the entry block, insert a copy from 7527 // wherever we got it to the vreg that other BB's will reference it as. 7528 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) { 7529 // If we can, though, try to skip creating an unnecessary vreg. 7530 // FIXME: This isn't very clean... it would be nice to make this more 7531 // general. It's also subtly incompatible with the hacks FastISel 7532 // uses with vregs. 7533 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg(); 7534 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 7535 FuncInfo->ValueMap[I] = Reg; 7536 continue; 7537 } 7538 } 7539 if (!isOnlyUsedInEntryBlock(I, TM.Options.EnableFastISel)) { 7540 FuncInfo->InitializeRegForValue(I); 7541 SDB->CopyToExportRegsIfNeeded(I); 7542 } 7543 } 7544 7545 assert(i == InVals.size() && "Argument register count mismatch!"); 7546 7547 // Finally, if the target has anything special to do, allow it to do so. 7548 // FIXME: this should insert code into the DAG! 7549 EmitFunctionEntryCode(); 7550} 7551 7552/// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to 7553/// ensure constants are generated when needed. Remember the virtual registers 7554/// that need to be added to the Machine PHI nodes as input. We cannot just 7555/// directly add them, because expansion might result in multiple MBB's for one 7556/// BB. As such, the start of the BB might correspond to a different MBB than 7557/// the end. 7558/// 7559void 7560SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 7561 const TerminatorInst *TI = LLVMBB->getTerminator(); 7562 7563 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 7564 7565 // Check successor nodes' PHI nodes that expect a constant to be available 7566 // from this block. 7567 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 7568 const BasicBlock *SuccBB = TI->getSuccessor(succ); 7569 if (!isa<PHINode>(SuccBB->begin())) continue; 7570 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 7571 7572 // If this terminator has multiple identical successors (common for 7573 // switches), only handle each succ once. 7574 if (!SuccsHandled.insert(SuccMBB)) continue; 7575 7576 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 7577 7578 // At this point we know that there is a 1-1 correspondence between LLVM PHI 7579 // nodes and Machine PHI nodes, but the incoming operands have not been 7580 // emitted yet. 7581 for (BasicBlock::const_iterator I = SuccBB->begin(); 7582 const PHINode *PN = dyn_cast<PHINode>(I); ++I) { 7583 // Ignore dead phi's. 7584 if (PN->use_empty()) continue; 7585 7586 // Skip empty types 7587 if (PN->getType()->isEmptyTy()) 7588 continue; 7589 7590 unsigned Reg; 7591 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 7592 7593 if (const Constant *C = dyn_cast<Constant>(PHIOp)) { 7594 unsigned &RegOut = ConstantsOut[C]; 7595 if (RegOut == 0) { 7596 RegOut = FuncInfo.CreateRegs(C->getType()); 7597 CopyValueToVirtualRegister(C, RegOut); 7598 } 7599 Reg = RegOut; 7600 } else { 7601 DenseMap<const Value *, unsigned>::iterator I = 7602 FuncInfo.ValueMap.find(PHIOp); 7603 if (I != FuncInfo.ValueMap.end()) 7604 Reg = I->second; 7605 else { 7606 assert(isa<AllocaInst>(PHIOp) && 7607 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && 7608 "Didn't codegen value into a register!??"); 7609 Reg = FuncInfo.CreateRegs(PHIOp->getType()); 7610 CopyValueToVirtualRegister(PHIOp, Reg); 7611 } 7612 } 7613 7614 // Remember that this register needs to added to the machine PHI node as 7615 // the input for this MBB. 7616 SmallVector<EVT, 4> ValueVTs; 7617 const TargetLowering *TLI = TM.getTargetLowering(); 7618 ComputeValueVTs(*TLI, PN->getType(), ValueVTs); 7619 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) { 7620 EVT VT = ValueVTs[vti]; 7621 unsigned NumRegisters = TLI->getNumRegisters(*DAG.getContext(), VT); 7622 for (unsigned i = 0, e = NumRegisters; i != e; ++i) 7623 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i)); 7624 Reg += NumRegisters; 7625 } 7626 } 7627 } 7628 7629 ConstantsOut.clear(); 7630} 7631 7632/// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB 7633/// is 0. 7634MachineBasicBlock * 7635SelectionDAGBuilder::StackProtectorDescriptor:: 7636AddSuccessorMBB(const BasicBlock *BB, 7637 MachineBasicBlock *ParentMBB, 7638 MachineBasicBlock *SuccMBB) { 7639 // If SuccBB has not been created yet, create it. 7640 if (!SuccMBB) { 7641 MachineFunction *MF = ParentMBB->getParent(); 7642 MachineFunction::iterator BBI = ParentMBB; 7643 SuccMBB = MF->CreateMachineBasicBlock(BB); 7644 MF->insert(++BBI, SuccMBB); 7645 } 7646 // Add it as a successor of ParentMBB. 7647 ParentMBB->addSuccessor(SuccMBB); 7648 return SuccMBB; 7649} 7650