TargetLowering.h revision 7042aa598dde44ef74eb3ba1ae729729e64c46cf
1//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9/// 10/// \file 11/// This file describes how to lower LLVM code to machine code. This has two 12/// main components: 13/// 14/// 1. Which ValueTypes are natively supported by the target. 15/// 2. Which operations are supported for supported ValueTypes. 16/// 3. Cost thresholds for alternative implementations of certain operations. 17/// 18/// In addition it has a few other components, like information about FP 19/// immediates. 20/// 21//===----------------------------------------------------------------------===// 22 23#ifndef LLVM_TARGET_TARGETLOWERING_H 24#define LLVM_TARGET_TARGETLOWERING_H 25 26#include "llvm/ADT/DenseMap.h" 27#include "llvm/CodeGen/DAGCombine.h" 28#include "llvm/CodeGen/RuntimeLibcalls.h" 29#include "llvm/CodeGen/SelectionDAGNodes.h" 30#include "llvm/IR/Attributes.h" 31#include "llvm/IR/CallingConv.h" 32#include "llvm/IR/InlineAsm.h" 33#include "llvm/Support/CallSite.h" 34#include "llvm/Target/TargetCallingConv.h" 35#include "llvm/Target/TargetMachine.h" 36#include <climits> 37#include <map> 38#include <vector> 39 40namespace llvm { 41 class CallInst; 42 class CCState; 43 class FastISel; 44 class FunctionLoweringInfo; 45 class ImmutableCallSite; 46 class IntrinsicInst; 47 class MachineBasicBlock; 48 class MachineFunction; 49 class MachineInstr; 50 class MachineJumpTableInfo; 51 class MCContext; 52 class MCExpr; 53 template<typename T> class SmallVectorImpl; 54 class DataLayout; 55 class TargetRegisterClass; 56 class TargetLibraryInfo; 57 class TargetLoweringObjectFile; 58 class Value; 59 60 namespace Sched { 61 enum Preference { 62 None, // No preference 63 Source, // Follow source order. 64 RegPressure, // Scheduling for lowest register pressure. 65 Hybrid, // Scheduling for both latency and register pressure. 66 ILP, // Scheduling for ILP in low register pressure mode. 67 VLIW // Scheduling for VLIW targets. 68 }; 69 } 70 71/// This base class for TargetLowering contains the SelectionDAG-independent 72/// parts that can be used from the rest of CodeGen. 73class TargetLoweringBase { 74 TargetLoweringBase(const TargetLoweringBase&) LLVM_DELETED_FUNCTION; 75 void operator=(const TargetLoweringBase&) LLVM_DELETED_FUNCTION; 76 77public: 78 /// This enum indicates whether operations are valid for a target, and if not, 79 /// what action should be used to make them valid. 80 enum LegalizeAction { 81 Legal, // The target natively supports this operation. 82 Promote, // This operation should be executed in a larger type. 83 Expand, // Try to expand this to other ops, otherwise use a libcall. 84 Custom // Use the LowerOperation hook to implement custom lowering. 85 }; 86 87 /// This enum indicates whether a types are legal for a target, and if not, 88 /// what action should be used to make them valid. 89 enum LegalizeTypeAction { 90 TypeLegal, // The target natively supports this type. 91 TypePromoteInteger, // Replace this integer with a larger one. 92 TypeExpandInteger, // Split this integer into two of half the size. 93 TypeSoftenFloat, // Convert this float to a same size integer type. 94 TypeExpandFloat, // Split this float into two of half the size. 95 TypeScalarizeVector, // Replace this one-element vector with its element. 96 TypeSplitVector, // Split this vector into two of half the size. 97 TypeWidenVector // This vector should be widened into a larger vector. 98 }; 99 100 /// LegalizeKind holds the legalization kind that needs to happen to EVT 101 /// in order to type-legalize it. 102 typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind; 103 104 /// Enum that describes how the target represents true/false values. 105 enum BooleanContent { 106 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. 107 ZeroOrOneBooleanContent, // All bits zero except for bit 0. 108 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. 109 }; 110 111 /// Enum that describes what type of support for selects the target has. 112 enum SelectSupportKind { 113 ScalarValSelect, // The target supports scalar selects (ex: cmov). 114 ScalarCondVectorVal, // The target supports selects with a scalar condition 115 // and vector values (ex: cmov). 116 VectorMaskSelect // The target supports vector selects with a vector 117 // mask (ex: x86 blends). 118 }; 119 120 static ISD::NodeType getExtendForContent(BooleanContent Content) { 121 switch (Content) { 122 case UndefinedBooleanContent: 123 // Extend by adding rubbish bits. 124 return ISD::ANY_EXTEND; 125 case ZeroOrOneBooleanContent: 126 // Extend by adding zero bits. 127 return ISD::ZERO_EXTEND; 128 case ZeroOrNegativeOneBooleanContent: 129 // Extend by copying the sign bit. 130 return ISD::SIGN_EXTEND; 131 } 132 llvm_unreachable("Invalid content kind"); 133 } 134 135 /// NOTE: The constructor takes ownership of TLOF. 136 explicit TargetLoweringBase(const TargetMachine &TM, 137 const TargetLoweringObjectFile *TLOF); 138 virtual ~TargetLoweringBase(); 139 140protected: 141 /// \brief Initialize all of the actions to default values. 142 void initActions(); 143 144public: 145 const TargetMachine &getTargetMachine() const { return TM; } 146 const DataLayout *getDataLayout() const { return TD; } 147 const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; } 148 149 bool isBigEndian() const { return !IsLittleEndian; } 150 bool isLittleEndian() const { return IsLittleEndian; } 151 // Return the pointer type for the given address space, defaults to 152 // the pointer type from the data layout. 153 // FIXME: The default needs to be removed once all the code is updated. 154 virtual MVT getPointerTy(uint32_t /*AS*/ = 0) const { return PointerTy; } 155 virtual MVT getScalarShiftAmountTy(EVT LHSTy) const; 156 157 EVT getShiftAmountTy(EVT LHSTy) const; 158 159 /// Return true if the select operation is expensive for this target. 160 bool isSelectExpensive() const { return SelectIsExpensive; } 161 162 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const { 163 return true; 164 } 165 166 /// Return true if a vector of the given type should be split 167 /// (TypeSplitVector) instead of promoted (TypePromoteInteger) during type 168 /// legalization. 169 virtual bool shouldSplitVectorElementType(EVT /*VT*/) const { return false; } 170 171 /// Return true if integer divide is usually cheaper than a sequence of 172 /// several shifts, adds, and multiplies for this target. 173 bool isIntDivCheap() const { return IntDivIsCheap; } 174 175 /// Returns true if target has indicated at least one type should be bypassed. 176 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); } 177 178 /// Returns map of slow types for division or remainder with corresponding 179 /// fast types 180 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const { 181 return BypassSlowDivWidths; 182 } 183 184 /// Return true if pow2 div is cheaper than a chain of srl/add/sra. 185 bool isPow2DivCheap() const { return Pow2DivIsCheap; } 186 187 /// Return true if Flow Control is an expensive operation that should be 188 /// avoided. 189 bool isJumpExpensive() const { return JumpIsExpensive; } 190 191 /// Return true if selects are only cheaper than branches if the branch is 192 /// unlikely to be predicted right. 193 bool isPredictableSelectExpensive() const { 194 return PredictableSelectIsExpensive; 195 } 196 197 /// Return the ValueType of the result of SETCC operations. Also used to 198 /// obtain the target's preferred type for the condition operand of SELECT and 199 /// BRCOND nodes. In the case of BRCOND the argument passed is MVT::Other 200 /// since there are no other operands to get a type hint from. 201 virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const; 202 203 /// Return the ValueType for comparison libcalls. Comparions libcalls include 204 /// floating point comparion calls, and Ordered/Unordered check calls on 205 /// floating point numbers. 206 virtual 207 MVT::SimpleValueType getCmpLibcallReturnType() const; 208 209 /// For targets without i1 registers, this gives the nature of the high-bits 210 /// of boolean values held in types wider than i1. 211 /// 212 /// "Boolean values" are special true/false values produced by nodes like 213 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. 214 /// Not to be confused with general values promoted from i1. Some cpus 215 /// distinguish between vectors of boolean and scalars; the isVec parameter 216 /// selects between the two kinds. For example on X86 a scalar boolean should 217 /// be zero extended from i1, while the elements of a vector of booleans 218 /// should be sign extended from i1. 219 BooleanContent getBooleanContents(bool isVec) const { 220 return isVec ? BooleanVectorContents : BooleanContents; 221 } 222 223 /// Return target scheduling preference. 224 Sched::Preference getSchedulingPreference() const { 225 return SchedPreferenceInfo; 226 } 227 228 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics 229 /// for different nodes. This function returns the preference (or none) for 230 /// the given node. 231 virtual Sched::Preference getSchedulingPreference(SDNode *) const { 232 return Sched::None; 233 } 234 235 /// Return the register class that should be used for the specified value 236 /// type. 237 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const { 238 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; 239 assert(RC && "This value type is not natively supported!"); 240 return RC; 241 } 242 243 /// Return the 'representative' register class for the specified value 244 /// type. 245 /// 246 /// The 'representative' register class is the largest legal super-reg 247 /// register class for the register class of the value type. For example, on 248 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep 249 /// register class is GR64 on x86_64. 250 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const { 251 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy]; 252 return RC; 253 } 254 255 /// Return the cost of the 'representative' register class for the specified 256 /// value type. 257 virtual uint8_t getRepRegClassCostFor(MVT VT) const { 258 return RepRegClassCostForVT[VT.SimpleTy]; 259 } 260 261 /// Return true if the target has native support for the specified value type. 262 /// This means that it has a register that directly holds it without 263 /// promotions or expansions. 264 bool isTypeLegal(EVT VT) const { 265 assert(!VT.isSimple() || 266 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)); 267 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0; 268 } 269 270 class ValueTypeActionImpl { 271 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum 272 /// that indicates how instruction selection should deal with the type. 273 uint8_t ValueTypeActions[MVT::LAST_VALUETYPE]; 274 275 public: 276 ValueTypeActionImpl() { 277 std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0); 278 } 279 280 LegalizeTypeAction getTypeAction(MVT VT) const { 281 return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy]; 282 } 283 284 void setTypeAction(MVT VT, LegalizeTypeAction Action) { 285 unsigned I = VT.SimpleTy; 286 ValueTypeActions[I] = Action; 287 } 288 }; 289 290 const ValueTypeActionImpl &getValueTypeActions() const { 291 return ValueTypeActions; 292 } 293 294 /// Return how we should legalize values of this type, either it is already 295 /// legal (return 'Legal') or we need to promote it to a larger type (return 296 /// 'Promote'), or we need to expand it into multiple registers of smaller 297 /// integer type (return 'Expand'). 'Custom' is not an option. 298 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const { 299 return getTypeConversion(Context, VT).first; 300 } 301 LegalizeTypeAction getTypeAction(MVT VT) const { 302 return ValueTypeActions.getTypeAction(VT); 303 } 304 305 /// For types supported by the target, this is an identity function. For 306 /// types that must be promoted to larger types, this returns the larger type 307 /// to promote to. For integer types that are larger than the largest integer 308 /// register, this contains one step in the expansion to get to the smaller 309 /// register. For illegal floating point types, this returns the integer type 310 /// to transform to. 311 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { 312 return getTypeConversion(Context, VT).second; 313 } 314 315 /// For types supported by the target, this is an identity function. For 316 /// types that must be expanded (i.e. integer types that are larger than the 317 /// largest integer register or illegal floating point types), this returns 318 /// the largest legal type it will be expanded to. 319 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const { 320 assert(!VT.isVector()); 321 while (true) { 322 switch (getTypeAction(Context, VT)) { 323 case TypeLegal: 324 return VT; 325 case TypeExpandInteger: 326 VT = getTypeToTransformTo(Context, VT); 327 break; 328 default: 329 llvm_unreachable("Type is not legal nor is it to be expanded!"); 330 } 331 } 332 } 333 334 /// Vector types are broken down into some number of legal first class types. 335 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8 336 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64 337 /// turns into 4 EVT::i32 values with both PPC and X86. 338 /// 339 /// This method returns the number of registers needed, and the VT for each 340 /// register. It also returns the VT and quantity of the intermediate values 341 /// before they are promoted/expanded. 342 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 343 EVT &IntermediateVT, 344 unsigned &NumIntermediates, 345 MVT &RegisterVT) const; 346 347 struct IntrinsicInfo { 348 unsigned opc; // target opcode 349 EVT memVT; // memory VT 350 const Value* ptrVal; // value representing memory location 351 int offset; // offset off of ptrVal 352 unsigned align; // alignment 353 bool vol; // is volatile? 354 bool readMem; // reads memory? 355 bool writeMem; // writes memory? 356 }; 357 358 /// Given an intrinsic, checks if on the target the intrinsic will need to map 359 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns 360 /// true and store the intrinsic information into the IntrinsicInfo that was 361 /// passed to the function. 362 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, 363 unsigned /*Intrinsic*/) const { 364 return false; 365 } 366 367 /// Returns true if the target can instruction select the specified FP 368 /// immediate natively. If false, the legalizer will materialize the FP 369 /// immediate as a load from a constant pool. 370 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const { 371 return false; 372 } 373 374 /// Targets can use this to indicate that they only support *some* 375 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a 376 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be 377 /// legal. 378 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/, 379 EVT /*VT*/) const { 380 return true; 381 } 382 383 /// Returns true if the operation can trap for the value type. 384 /// 385 /// VT must be a legal type. By default, we optimistically assume most 386 /// operations don't trap except for divide and remainder. 387 virtual bool canOpTrap(unsigned Op, EVT VT) const; 388 389 /// Similar to isShuffleMaskLegal. This is used by Targets can use this to 390 /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace 391 /// a VAND with a constant pool entry. 392 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/, 393 EVT /*VT*/) const { 394 return false; 395 } 396 397 /// Return how this operation should be treated: either it is legal, needs to 398 /// be promoted to a larger size, needs to be expanded to some other code 399 /// sequence, or the target has a custom expander for it. 400 LegalizeAction getOperationAction(unsigned Op, EVT VT) const { 401 if (VT.isExtended()) return Expand; 402 // If a target-specific SDNode requires legalization, require the target 403 // to provide custom legalization for it. 404 if (Op > array_lengthof(OpActions[0])) return Custom; 405 unsigned I = (unsigned) VT.getSimpleVT().SimpleTy; 406 return (LegalizeAction)OpActions[I][Op]; 407 } 408 409 /// Return true if the specified operation is legal on this target or can be 410 /// made legal with custom lowering. This is used to help guide high-level 411 /// lowering decisions. 412 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const { 413 return (VT == MVT::Other || isTypeLegal(VT)) && 414 (getOperationAction(Op, VT) == Legal || 415 getOperationAction(Op, VT) == Custom); 416 } 417 418 /// Return true if the specified operation is legal on this target or can be 419 /// made legal using promotion. This is used to help guide high-level lowering 420 /// decisions. 421 bool isOperationLegalOrPromote(unsigned Op, EVT VT) const { 422 return (VT == MVT::Other || isTypeLegal(VT)) && 423 (getOperationAction(Op, VT) == Legal || 424 getOperationAction(Op, VT) == Promote); 425 } 426 427 /// Return true if the specified operation is illegal on this target or 428 /// unlikely to be made legal with custom lowering. This is used to help guide 429 /// high-level lowering decisions. 430 bool isOperationExpand(unsigned Op, EVT VT) const { 431 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand); 432 } 433 434 /// Return true if the specified operation is legal on this target. 435 bool isOperationLegal(unsigned Op, EVT VT) const { 436 return (VT == MVT::Other || isTypeLegal(VT)) && 437 getOperationAction(Op, VT) == Legal; 438 } 439 440 /// Return how this load with extension should be treated: either it is legal, 441 /// needs to be promoted to a larger size, needs to be expanded to some other 442 /// code sequence, or the target has a custom expander for it. 443 LegalizeAction getLoadExtAction(unsigned ExtType, MVT VT) const { 444 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE && 445 "Table isn't big enough!"); 446 return (LegalizeAction)LoadExtActions[VT.SimpleTy][ExtType]; 447 } 448 449 /// Return true if the specified load with extension is legal on this target. 450 bool isLoadExtLegal(unsigned ExtType, EVT VT) const { 451 return VT.isSimple() && 452 getLoadExtAction(ExtType, VT.getSimpleVT()) == Legal; 453 } 454 455 /// Return how this store with truncation should be treated: either it is 456 /// legal, needs to be promoted to a larger size, needs to be expanded to some 457 /// other code sequence, or the target has a custom expander for it. 458 LegalizeAction getTruncStoreAction(MVT ValVT, MVT MemVT) const { 459 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE && 460 "Table isn't big enough!"); 461 return (LegalizeAction)TruncStoreActions[ValVT.SimpleTy] 462 [MemVT.SimpleTy]; 463 } 464 465 /// Return true if the specified store with truncation is legal on this 466 /// target. 467 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const { 468 return isTypeLegal(ValVT) && MemVT.isSimple() && 469 getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal; 470 } 471 472 /// Return how the indexed load should be treated: either it is legal, needs 473 /// to be promoted to a larger size, needs to be expanded to some other code 474 /// sequence, or the target has a custom expander for it. 475 LegalizeAction 476 getIndexedLoadAction(unsigned IdxMode, MVT VT) const { 477 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE && 478 "Table isn't big enough!"); 479 unsigned Ty = (unsigned)VT.SimpleTy; 480 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4); 481 } 482 483 /// Return true if the specified indexed load is legal on this target. 484 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const { 485 return VT.isSimple() && 486 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || 487 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); 488 } 489 490 /// Return how the indexed store should be treated: either it is legal, needs 491 /// to be promoted to a larger size, needs to be expanded to some other code 492 /// sequence, or the target has a custom expander for it. 493 LegalizeAction 494 getIndexedStoreAction(unsigned IdxMode, MVT VT) const { 495 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE && 496 "Table isn't big enough!"); 497 unsigned Ty = (unsigned)VT.SimpleTy; 498 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f); 499 } 500 501 /// Return true if the specified indexed load is legal on this target. 502 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const { 503 return VT.isSimple() && 504 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || 505 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); 506 } 507 508 /// Return how the condition code should be treated: either it is legal, needs 509 /// to be expanded to some other code sequence, or the target has a custom 510 /// expander for it. 511 LegalizeAction 512 getCondCodeAction(ISD::CondCode CC, MVT VT) const { 513 assert((unsigned)CC < array_lengthof(CondCodeActions) && 514 (unsigned)VT.SimpleTy < sizeof(CondCodeActions[0])*4 && 515 "Table isn't big enough!"); 516 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit 517 /// value and the upper 27 bits index into the second dimension of the 518 /// array to select what 64bit value to use. 519 LegalizeAction Action = (LegalizeAction) 520 ((CondCodeActions[CC][VT.SimpleTy >> 5] >> (2*(VT.SimpleTy & 0x1F))) & 3); 521 assert(Action != Promote && "Can't promote condition code!"); 522 return Action; 523 } 524 525 /// Return true if the specified condition code is legal on this target. 526 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { 527 return 528 getCondCodeAction(CC, VT) == Legal || 529 getCondCodeAction(CC, VT) == Custom; 530 } 531 532 533 /// If the action for this operation is to promote, this method returns the 534 /// ValueType to promote to. 535 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { 536 assert(getOperationAction(Op, VT) == Promote && 537 "This operation isn't promoted!"); 538 539 // See if this has an explicit type specified. 540 std::map<std::pair<unsigned, MVT::SimpleValueType>, 541 MVT::SimpleValueType>::const_iterator PTTI = 542 PromoteToType.find(std::make_pair(Op, VT.SimpleTy)); 543 if (PTTI != PromoteToType.end()) return PTTI->second; 544 545 assert((VT.isInteger() || VT.isFloatingPoint()) && 546 "Cannot autopromote this type, add it with AddPromotedToType."); 547 548 MVT NVT = VT; 549 do { 550 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1); 551 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && 552 "Didn't find type to promote to!"); 553 } while (!isTypeLegal(NVT) || 554 getOperationAction(Op, NVT) == Promote); 555 return NVT; 556 } 557 558 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM 559 /// operations except for the pointer size. If AllowUnknown is true, this 560 /// will return MVT::Other for types with no EVT counterpart (e.g. structs), 561 /// otherwise it will assert. 562 EVT getValueType(Type *Ty, bool AllowUnknown = false) const { 563 // Lower scalar pointers to native pointer types. 564 if (Ty->isPointerTy()) return PointerTy; 565 566 if (Ty->isVectorTy()) { 567 VectorType *VTy = cast<VectorType>(Ty); 568 Type *Elm = VTy->getElementType(); 569 // Lower vectors of pointers to native pointer types. 570 if (Elm->isPointerTy()) 571 Elm = EVT(PointerTy).getTypeForEVT(Ty->getContext()); 572 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false), 573 VTy->getNumElements()); 574 } 575 return EVT::getEVT(Ty, AllowUnknown); 576 } 577 578 /// Return the MVT corresponding to this LLVM type. See getValueType. 579 MVT getSimpleValueType(Type *Ty, bool AllowUnknown = false) const { 580 return getValueType(Ty, AllowUnknown).getSimpleVT(); 581 } 582 583 /// Return the desired alignment for ByVal aggregate function arguments in the 584 /// caller parameter area. This is the actual alignment, not its logarithm. 585 virtual unsigned getByValTypeAlignment(Type *Ty) const; 586 587 /// Return the type of registers that this ValueType will eventually require. 588 MVT getRegisterType(MVT VT) const { 589 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT)); 590 return RegisterTypeForVT[VT.SimpleTy]; 591 } 592 593 /// Return the type of registers that this ValueType will eventually require. 594 MVT getRegisterType(LLVMContext &Context, EVT VT) const { 595 if (VT.isSimple()) { 596 assert((unsigned)VT.getSimpleVT().SimpleTy < 597 array_lengthof(RegisterTypeForVT)); 598 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy]; 599 } 600 if (VT.isVector()) { 601 EVT VT1; 602 MVT RegisterVT; 603 unsigned NumIntermediates; 604 (void)getVectorTypeBreakdown(Context, VT, VT1, 605 NumIntermediates, RegisterVT); 606 return RegisterVT; 607 } 608 if (VT.isInteger()) { 609 return getRegisterType(Context, getTypeToTransformTo(Context, VT)); 610 } 611 llvm_unreachable("Unsupported extended type!"); 612 } 613 614 /// Return the number of registers that this ValueType will eventually 615 /// require. 616 /// 617 /// This is one for any types promoted to live in larger registers, but may be 618 /// more than one for types (like i64) that are split into pieces. For types 619 /// like i140, which are first promoted then expanded, it is the number of 620 /// registers needed to hold all the bits of the original type. For an i140 621 /// on a 32 bit machine this means 5 registers. 622 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const { 623 if (VT.isSimple()) { 624 assert((unsigned)VT.getSimpleVT().SimpleTy < 625 array_lengthof(NumRegistersForVT)); 626 return NumRegistersForVT[VT.getSimpleVT().SimpleTy]; 627 } 628 if (VT.isVector()) { 629 EVT VT1; 630 MVT VT2; 631 unsigned NumIntermediates; 632 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2); 633 } 634 if (VT.isInteger()) { 635 unsigned BitWidth = VT.getSizeInBits(); 636 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits(); 637 return (BitWidth + RegWidth - 1) / RegWidth; 638 } 639 llvm_unreachable("Unsupported extended type!"); 640 } 641 642 /// If true, then instruction selection should seek to shrink the FP constant 643 /// of the specified type to a smaller type in order to save space and / or 644 /// reduce runtime. 645 virtual bool ShouldShrinkFPConstant(EVT) const { return true; } 646 647 /// If true, the target has custom DAG combine transformations that it can 648 /// perform for the specified node. 649 bool hasTargetDAGCombine(ISD::NodeType NT) const { 650 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 651 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); 652 } 653 654 /// \brief Get maximum # of store operations permitted for llvm.memset 655 /// 656 /// This function returns the maximum number of store operations permitted 657 /// to replace a call to llvm.memset. The value is set by the target at the 658 /// performance threshold for such a replacement. If OptSize is true, 659 /// return the limit for functions that have OptSize attribute. 660 unsigned getMaxStoresPerMemset(bool OptSize) const { 661 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset; 662 } 663 664 /// \brief Get maximum # of store operations permitted for llvm.memcpy 665 /// 666 /// This function returns the maximum number of store operations permitted 667 /// to replace a call to llvm.memcpy. The value is set by the target at the 668 /// performance threshold for such a replacement. If OptSize is true, 669 /// return the limit for functions that have OptSize attribute. 670 unsigned getMaxStoresPerMemcpy(bool OptSize) const { 671 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy; 672 } 673 674 /// \brief Get maximum # of store operations permitted for llvm.memmove 675 /// 676 /// This function returns the maximum number of store operations permitted 677 /// to replace a call to llvm.memmove. The value is set by the target at the 678 /// performance threshold for such a replacement. If OptSize is true, 679 /// return the limit for functions that have OptSize attribute. 680 unsigned getMaxStoresPerMemmove(bool OptSize) const { 681 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove; 682 } 683 684 /// \brief Determine if the target supports unaligned memory accesses. 685 /// 686 /// This function returns true if the target allows unaligned memory accesses. 687 /// of the specified type. If true, it also returns whether the unaligned 688 /// memory access is "fast" in the second argument by reference. This is used, 689 /// for example, in situations where an array copy/move/set is converted to a 690 /// sequence of store operations. It's use helps to ensure that such 691 /// replacements don't generate code that causes an alignment error (trap) on 692 /// the target machine. 693 virtual bool allowsUnalignedMemoryAccesses(EVT, bool * /*Fast*/ = 0) const { 694 return false; 695 } 696 697 /// Returns the target specific optimal type for load and store operations as 698 /// a result of memset, memcpy, and memmove lowering. 699 /// 700 /// If DstAlign is zero that means it's safe to destination alignment can 701 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't 702 /// a need to check it against alignment requirement, probably because the 703 /// source does not need to be loaded. If 'IsMemset' is true, that means it's 704 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of 705 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it 706 /// does not need to be loaded. It returns EVT::Other if the type should be 707 /// determined using generic target-independent logic. 708 virtual EVT getOptimalMemOpType(uint64_t /*Size*/, 709 unsigned /*DstAlign*/, unsigned /*SrcAlign*/, 710 bool /*IsMemset*/, 711 bool /*ZeroMemset*/, 712 bool /*MemcpyStrSrc*/, 713 MachineFunction &/*MF*/) const { 714 return MVT::Other; 715 } 716 717 /// Returns true if it's safe to use load / store of the specified type to 718 /// expand memcpy / memset inline. 719 /// 720 /// This is mostly true for all types except for some special cases. For 721 /// example, on X86 targets without SSE2 f64 load / store are done with fldl / 722 /// fstpl which also does type conversion. Note the specified type doesn't 723 /// have to be legal as the hook is used before type legalization. 724 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; } 725 726 /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp. 727 bool usesUnderscoreSetJmp() const { 728 return UseUnderscoreSetJmp; 729 } 730 731 /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp. 732 bool usesUnderscoreLongJmp() const { 733 return UseUnderscoreLongJmp; 734 } 735 736 /// Return whether the target can generate code for jump tables. 737 bool supportJumpTables() const { 738 return SupportJumpTables; 739 } 740 741 /// Return integer threshold on number of blocks to use jump tables rather 742 /// than if sequence. 743 int getMinimumJumpTableEntries() const { 744 return MinimumJumpTableEntries; 745 } 746 747 /// If a physical register, this specifies the register that 748 /// llvm.savestack/llvm.restorestack should save and restore. 749 unsigned getStackPointerRegisterToSaveRestore() const { 750 return StackPointerRegisterToSaveRestore; 751 } 752 753 /// If a physical register, this returns the register that receives the 754 /// exception address on entry to a landing pad. 755 unsigned getExceptionPointerRegister() const { 756 return ExceptionPointerRegister; 757 } 758 759 /// If a physical register, this returns the register that receives the 760 /// exception typeid on entry to a landing pad. 761 unsigned getExceptionSelectorRegister() const { 762 return ExceptionSelectorRegister; 763 } 764 765 /// Returns the target's jmp_buf size in bytes (if never set, the default is 766 /// 200) 767 unsigned getJumpBufSize() const { 768 return JumpBufSize; 769 } 770 771 /// Returns the target's jmp_buf alignment in bytes (if never set, the default 772 /// is 0) 773 unsigned getJumpBufAlignment() const { 774 return JumpBufAlignment; 775 } 776 777 /// Return the minimum stack alignment of an argument. 778 unsigned getMinStackArgumentAlignment() const { 779 return MinStackArgumentAlignment; 780 } 781 782 /// Return the minimum function alignment. 783 unsigned getMinFunctionAlignment() const { 784 return MinFunctionAlignment; 785 } 786 787 /// Return the preferred function alignment. 788 unsigned getPrefFunctionAlignment() const { 789 return PrefFunctionAlignment; 790 } 791 792 /// Return the preferred loop alignment. 793 unsigned getPrefLoopAlignment() const { 794 return PrefLoopAlignment; 795 } 796 797 /// Return whether the DAG builder should automatically insert fences and 798 /// reduce ordering for atomics. 799 bool getInsertFencesForAtomic() const { 800 return InsertFencesForAtomic; 801 } 802 803 /// Return true if the target stores stack protector cookies at a fixed offset 804 /// in some non-standard address space, and populates the address space and 805 /// offset as appropriate. 806 virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/, 807 unsigned &/*Offset*/) const { 808 return false; 809 } 810 811 /// Returns the maximal possible offset which can be used for loads / stores 812 /// from the global. 813 virtual unsigned getMaximalGlobalOffset() const { 814 return 0; 815 } 816 817 //===--------------------------------------------------------------------===// 818 /// \name Helpers for TargetTransformInfo implementations 819 /// @{ 820 821 /// Get the ISD node that corresponds to the Instruction class opcode. 822 int InstructionOpcodeToISD(unsigned Opcode) const; 823 824 /// Estimate the cost of type-legalization and the legalized type. 825 std::pair<unsigned, MVT> getTypeLegalizationCost(Type *Ty) const; 826 827 /// @} 828 829 //===--------------------------------------------------------------------===// 830 // TargetLowering Configuration Methods - These methods should be invoked by 831 // the derived class constructor to configure this object for the target. 832 // 833 834 /// \brief Reset the operation actions based on target options. 835 virtual void resetOperationActions() {} 836 837protected: 838 /// Specify how the target extends the result of a boolean value from i1 to a 839 /// wider type. See getBooleanContents. 840 void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; } 841 842 /// Specify how the target extends the result of a vector boolean value from a 843 /// vector of i1 to a wider type. See getBooleanContents. 844 void setBooleanVectorContents(BooleanContent Ty) { 845 BooleanVectorContents = Ty; 846 } 847 848 /// Specify the target scheduling preference. 849 void setSchedulingPreference(Sched::Preference Pref) { 850 SchedPreferenceInfo = Pref; 851 } 852 853 /// Indicate whether this target prefers to use _setjmp to implement 854 /// llvm.setjmp or the non _ version. Defaults to false. 855 void setUseUnderscoreSetJmp(bool Val) { 856 UseUnderscoreSetJmp = Val; 857 } 858 859 /// Indicate whether this target prefers to use _longjmp to implement 860 /// llvm.longjmp or the non _ version. Defaults to false. 861 void setUseUnderscoreLongJmp(bool Val) { 862 UseUnderscoreLongJmp = Val; 863 } 864 865 /// Indicate whether the target can generate code for jump tables. 866 void setSupportJumpTables(bool Val) { 867 SupportJumpTables = Val; 868 } 869 870 /// Indicate the number of blocks to generate jump tables rather than if 871 /// sequence. 872 void setMinimumJumpTableEntries(int Val) { 873 MinimumJumpTableEntries = Val; 874 } 875 876 /// If set to a physical register, this specifies the register that 877 /// llvm.savestack/llvm.restorestack should save and restore. 878 void setStackPointerRegisterToSaveRestore(unsigned R) { 879 StackPointerRegisterToSaveRestore = R; 880 } 881 882 /// If set to a physical register, this sets the register that receives the 883 /// exception address on entry to a landing pad. 884 void setExceptionPointerRegister(unsigned R) { 885 ExceptionPointerRegister = R; 886 } 887 888 /// If set to a physical register, this sets the register that receives the 889 /// exception typeid on entry to a landing pad. 890 void setExceptionSelectorRegister(unsigned R) { 891 ExceptionSelectorRegister = R; 892 } 893 894 /// Tells the code generator not to expand operations into sequences that use 895 /// the select operations if possible. 896 void setSelectIsExpensive(bool isExpensive = true) { 897 SelectIsExpensive = isExpensive; 898 } 899 900 /// Tells the code generator not to expand sequence of operations into a 901 /// separate sequences that increases the amount of flow control. 902 void setJumpIsExpensive(bool isExpensive = true) { 903 JumpIsExpensive = isExpensive; 904 } 905 906 /// Tells the code generator that integer divide is expensive, and if 907 /// possible, should be replaced by an alternate sequence of instructions not 908 /// containing an integer divide. 909 void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; } 910 911 /// Tells the code generator which bitwidths to bypass. 912 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) { 913 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth; 914 } 915 916 /// Tells the code generator that it shouldn't generate srl/add/sra for a 917 /// signed divide by power of two, and let the target handle it. 918 void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; } 919 920 /// Add the specified register class as an available regclass for the 921 /// specified value type. This indicates the selector can handle values of 922 /// that class natively. 923 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) { 924 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT)); 925 AvailableRegClasses.push_back(std::make_pair(VT, RC)); 926 RegClassForVT[VT.SimpleTy] = RC; 927 } 928 929 /// Remove all register classes. 930 void clearRegisterClasses() { 931 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE * sizeof(TargetRegisterClass*)); 932 933 AvailableRegClasses.clear(); 934 } 935 936 /// \brief Remove all operation actions. 937 void clearOperationActions() { 938 } 939 940 /// Return the largest legal super-reg register class of the register class 941 /// for the specified type and its associated "cost". 942 virtual std::pair<const TargetRegisterClass*, uint8_t> 943 findRepresentativeClass(MVT VT) const; 944 945 /// Once all of the register classes are added, this allows us to compute 946 /// derived properties we expose. 947 void computeRegisterProperties(); 948 949 /// Indicate that the specified operation does not work with the specified 950 /// type and indicate what to do about it. 951 void setOperationAction(unsigned Op, MVT VT, 952 LegalizeAction Action) { 953 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!"); 954 OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action; 955 } 956 957 /// Indicate that the specified load with extension does not work with the 958 /// specified type and indicate what to do about it. 959 void setLoadExtAction(unsigned ExtType, MVT VT, 960 LegalizeAction Action) { 961 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE && 962 "Table isn't big enough!"); 963 LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action; 964 } 965 966 /// Indicate that the specified truncating store does not work with the 967 /// specified type and indicate what to do about it. 968 void setTruncStoreAction(MVT ValVT, MVT MemVT, 969 LegalizeAction Action) { 970 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE && 971 "Table isn't big enough!"); 972 TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action; 973 } 974 975 /// Indicate that the specified indexed load does or does not work with the 976 /// specified type and indicate what to do abort it. 977 /// 978 /// NOTE: All indexed mode loads are initialized to Expand in 979 /// TargetLowering.cpp 980 void setIndexedLoadAction(unsigned IdxMode, MVT VT, 981 LegalizeAction Action) { 982 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE && 983 (unsigned)Action < 0xf && "Table isn't big enough!"); 984 // Load action are kept in the upper half. 985 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0; 986 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4; 987 } 988 989 /// Indicate that the specified indexed store does or does not work with the 990 /// specified type and indicate what to do about it. 991 /// 992 /// NOTE: All indexed mode stores are initialized to Expand in 993 /// TargetLowering.cpp 994 void setIndexedStoreAction(unsigned IdxMode, MVT VT, 995 LegalizeAction Action) { 996 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE && 997 (unsigned)Action < 0xf && "Table isn't big enough!"); 998 // Store action are kept in the lower half. 999 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f; 1000 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action); 1001 } 1002 1003 /// Indicate that the specified condition code is or isn't supported on the 1004 /// target and indicate what to do about it. 1005 void setCondCodeAction(ISD::CondCode CC, MVT VT, 1006 LegalizeAction Action) { 1007 assert(VT < MVT::LAST_VALUETYPE && 1008 (unsigned)CC < array_lengthof(CondCodeActions) && 1009 "Table isn't big enough!"); 1010 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit 1011 /// value and the upper 27 bits index into the second dimension of the 1012 /// array to select what 64bit value to use. 1013 CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5] 1014 &= ~(uint64_t(3UL) << (VT.SimpleTy & 0x1F)*2); 1015 CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5] 1016 |= (uint64_t)Action << (VT.SimpleTy & 0x1F)*2; 1017 } 1018 1019 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults 1020 /// to trying a larger integer/fp until it can find one that works. If that 1021 /// default is insufficient, this method can be used by the target to override 1022 /// the default. 1023 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 1024 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy; 1025 } 1026 1027 /// Targets should invoke this method for each target independent node that 1028 /// they want to provide a custom DAG combiner for by implementing the 1029 /// PerformDAGCombine virtual method. 1030 void setTargetDAGCombine(ISD::NodeType NT) { 1031 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 1032 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); 1033 } 1034 1035 /// Set the target's required jmp_buf buffer size (in bytes); default is 200 1036 void setJumpBufSize(unsigned Size) { 1037 JumpBufSize = Size; 1038 } 1039 1040 /// Set the target's required jmp_buf buffer alignment (in bytes); default is 1041 /// 0 1042 void setJumpBufAlignment(unsigned Align) { 1043 JumpBufAlignment = Align; 1044 } 1045 1046 /// Set the target's minimum function alignment (in log2(bytes)) 1047 void setMinFunctionAlignment(unsigned Align) { 1048 MinFunctionAlignment = Align; 1049 } 1050 1051 /// Set the target's preferred function alignment. This should be set if 1052 /// there is a performance benefit to higher-than-minimum alignment (in 1053 /// log2(bytes)) 1054 void setPrefFunctionAlignment(unsigned Align) { 1055 PrefFunctionAlignment = Align; 1056 } 1057 1058 /// Set the target's preferred loop alignment. Default alignment is zero, it 1059 /// means the target does not care about loop alignment. The alignment is 1060 /// specified in log2(bytes). 1061 void setPrefLoopAlignment(unsigned Align) { 1062 PrefLoopAlignment = Align; 1063 } 1064 1065 /// Set the minimum stack alignment of an argument (in log2(bytes)). 1066 void setMinStackArgumentAlignment(unsigned Align) { 1067 MinStackArgumentAlignment = Align; 1068 } 1069 1070 /// Set if the DAG builder should automatically insert fences and reduce the 1071 /// order of atomic memory operations to Monotonic. 1072 void setInsertFencesForAtomic(bool fence) { 1073 InsertFencesForAtomic = fence; 1074 } 1075 1076public: 1077 //===--------------------------------------------------------------------===// 1078 // Addressing mode description hooks (used by LSR etc). 1079 // 1080 1081 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store 1082 /// instructions reading the address. This allows as much computation as 1083 /// possible to be done in the address mode for that operand. This hook lets 1084 /// targets also pass back when this should be done on intrinsics which 1085 /// load/store. 1086 virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/, 1087 SmallVectorImpl<Value*> &/*Ops*/, 1088 Type *&/*AccessTy*/) const { 1089 return false; 1090 } 1091 1092 /// This represents an addressing mode of: 1093 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg 1094 /// If BaseGV is null, there is no BaseGV. 1095 /// If BaseOffs is zero, there is no base offset. 1096 /// If HasBaseReg is false, there is no base register. 1097 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with 1098 /// no scale. 1099 struct AddrMode { 1100 GlobalValue *BaseGV; 1101 int64_t BaseOffs; 1102 bool HasBaseReg; 1103 int64_t Scale; 1104 AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {} 1105 }; 1106 1107 /// Return true if the addressing mode represented by AM is legal for this 1108 /// target, for a load/store of the specified type. 1109 /// 1110 /// The type may be VoidTy, in which case only return true if the addressing 1111 /// mode is legal for a load/store of any legal type. TODO: Handle 1112 /// pre/postinc as well. 1113 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const; 1114 1115 /// \brief Return the cost of the scaling factor used in the addressing mode 1116 /// represented by AM for this target, for a load/store of the specified type. 1117 /// 1118 /// If the AM is supported, the return value must be >= 0. 1119 /// If the AM is not supported, it returns a negative value. 1120 /// TODO: Handle pre/postinc as well. 1121 virtual int getScalingFactorCost(const AddrMode &AM, Type *Ty) const { 1122 // Default: assume that any scaling factor used in a legal AM is free. 1123 if (isLegalAddressingMode(AM, Ty)) return 0; 1124 return -1; 1125 } 1126 1127 /// Return true if the specified immediate is legal icmp immediate, that is 1128 /// the target has icmp instructions which can compare a register against the 1129 /// immediate without having to materialize the immediate into a register. 1130 virtual bool isLegalICmpImmediate(int64_t) const { 1131 return true; 1132 } 1133 1134 /// Return true if the specified immediate is legal add immediate, that is the 1135 /// target has add instructions which can add a register with the immediate 1136 /// without having to materialize the immediate into a register. 1137 virtual bool isLegalAddImmediate(int64_t) const { 1138 return true; 1139 } 1140 1141 /// Return true if it's free to truncate a value of type Ty1 to type 1142 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16 1143 /// by referencing its sub-register AX. 1144 virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const { 1145 return false; 1146 } 1147 1148 virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const { 1149 return false; 1150 } 1151 1152 /// Return true if any actual instruction that defines a value of type Ty1 1153 /// implicitly zero-extends the value to Ty2 in the result register. 1154 /// 1155 /// This does not necessarily include registers defined in unknown ways, such 1156 /// as incoming arguments, or copies from unknown virtual registers. Also, if 1157 /// isTruncateFree(Ty2, Ty1) is true, this does not necessarily apply to 1158 /// truncate instructions. e.g. on x86-64, all instructions that define 32-bit 1159 /// values implicit zero-extend the result out to 64 bits. 1160 virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const { 1161 return false; 1162 } 1163 1164 virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const { 1165 return false; 1166 } 1167 1168 /// Return true if zero-extending the specific node Val to type VT2 is free 1169 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or 1170 /// because it's folded such as X86 zero-extending loads). 1171 virtual bool isZExtFree(SDValue Val, EVT VT2) const { 1172 return isZExtFree(Val.getValueType(), VT2); 1173 } 1174 1175 /// Return true if an fneg operation is free to the point where it is never 1176 /// worthwhile to replace it with a bitwise operation. 1177 virtual bool isFNegFree(EVT) const { 1178 return false; 1179 } 1180 1181 /// Return true if an fneg operation is free to the point where it is never 1182 /// worthwhile to replace it with a bitwise operation. 1183 virtual bool isFAbsFree(EVT) const { 1184 return false; 1185 } 1186 1187 /// Return true if an FMA operation is faster than a pair of fmul and fadd 1188 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method 1189 /// returns true, otherwise fmuladd is expanded to fmul + fadd. 1190 /// 1191 /// NOTE: This may be called before legalization on types for which FMAs are 1192 /// not legal, but should return true if those types will eventually legalize 1193 /// to types that support FMAs. After legalization, it will only be called on 1194 /// types that support FMAs (via Legal or Custom actions) 1195 virtual bool isFMAFasterThanFMulAndFAdd(EVT) const { 1196 return false; 1197 } 1198 1199 /// Return true if it's profitable to narrow operations of type VT1 to 1200 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from 1201 /// i32 to i16. 1202 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const { 1203 return false; 1204 } 1205 1206 //===--------------------------------------------------------------------===// 1207 // Runtime Library hooks 1208 // 1209 1210 /// Rename the default libcall routine name for the specified libcall. 1211 void setLibcallName(RTLIB::Libcall Call, const char *Name) { 1212 LibcallRoutineNames[Call] = Name; 1213 } 1214 1215 /// Get the libcall routine name for the specified libcall. 1216 const char *getLibcallName(RTLIB::Libcall Call) const { 1217 return LibcallRoutineNames[Call]; 1218 } 1219 1220 /// Override the default CondCode to be used to test the result of the 1221 /// comparison libcall against zero. 1222 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { 1223 CmpLibcallCCs[Call] = CC; 1224 } 1225 1226 /// Get the CondCode that's to be used to test the result of the comparison 1227 /// libcall against zero. 1228 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { 1229 return CmpLibcallCCs[Call]; 1230 } 1231 1232 /// Set the CallingConv that should be used for the specified libcall. 1233 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) { 1234 LibcallCallingConvs[Call] = CC; 1235 } 1236 1237 /// Get the CallingConv that should be used for the specified libcall. 1238 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const { 1239 return LibcallCallingConvs[Call]; 1240 } 1241 1242private: 1243 const TargetMachine &TM; 1244 const DataLayout *TD; 1245 const TargetLoweringObjectFile &TLOF; 1246 1247 /// The type to use for pointers for the default address space, usually i32 or 1248 /// i64. 1249 MVT PointerTy; 1250 1251 /// True if this is a little endian target. 1252 bool IsLittleEndian; 1253 1254 /// Tells the code generator not to expand operations into sequences that use 1255 /// the select operations if possible. 1256 bool SelectIsExpensive; 1257 1258 /// Tells the code generator not to expand integer divides by constants into a 1259 /// sequence of muls, adds, and shifts. This is a hack until a real cost 1260 /// model is in place. If we ever optimize for size, this will be set to true 1261 /// unconditionally. 1262 bool IntDivIsCheap; 1263 1264 /// Tells the code generator to bypass slow divide or remainder 1265 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code 1266 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer 1267 /// div/rem when the operands are positive and less than 256. 1268 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths; 1269 1270 /// Tells the code generator that it shouldn't generate srl/add/sra for a 1271 /// signed divide by power of two, and let the target handle it. 1272 bool Pow2DivIsCheap; 1273 1274 /// Tells the code generator that it shouldn't generate extra flow control 1275 /// instructions and should attempt to combine flow control instructions via 1276 /// predication. 1277 bool JumpIsExpensive; 1278 1279 /// This target prefers to use _setjmp to implement llvm.setjmp. 1280 /// 1281 /// Defaults to false. 1282 bool UseUnderscoreSetJmp; 1283 1284 /// This target prefers to use _longjmp to implement llvm.longjmp. 1285 /// 1286 /// Defaults to false. 1287 bool UseUnderscoreLongJmp; 1288 1289 /// Whether the target can generate code for jumptables. If it's not true, 1290 /// then each jumptable must be lowered into if-then-else's. 1291 bool SupportJumpTables; 1292 1293 /// Number of blocks threshold to use jump tables. 1294 int MinimumJumpTableEntries; 1295 1296 /// Information about the contents of the high-bits in boolean values held in 1297 /// a type wider than i1. See getBooleanContents. 1298 BooleanContent BooleanContents; 1299 1300 /// Information about the contents of the high-bits in boolean vector values 1301 /// when the element type is wider than i1. See getBooleanContents. 1302 BooleanContent BooleanVectorContents; 1303 1304 /// The target scheduling preference: shortest possible total cycles or lowest 1305 /// register usage. 1306 Sched::Preference SchedPreferenceInfo; 1307 1308 /// The size, in bytes, of the target's jmp_buf buffers 1309 unsigned JumpBufSize; 1310 1311 /// The alignment, in bytes, of the target's jmp_buf buffers 1312 unsigned JumpBufAlignment; 1313 1314 /// The minimum alignment that any argument on the stack needs to have. 1315 unsigned MinStackArgumentAlignment; 1316 1317 /// The minimum function alignment (used when optimizing for size, and to 1318 /// prevent explicitly provided alignment from leading to incorrect code). 1319 unsigned MinFunctionAlignment; 1320 1321 /// The preferred function alignment (used when alignment unspecified and 1322 /// optimizing for speed). 1323 unsigned PrefFunctionAlignment; 1324 1325 /// The preferred loop alignment. 1326 unsigned PrefLoopAlignment; 1327 1328 /// Whether the DAG builder should automatically insert fences and reduce 1329 /// ordering for atomics. (This will be set for for most architectures with 1330 /// weak memory ordering.) 1331 bool InsertFencesForAtomic; 1332 1333 /// If set to a physical register, this specifies the register that 1334 /// llvm.savestack/llvm.restorestack should save and restore. 1335 unsigned StackPointerRegisterToSaveRestore; 1336 1337 /// If set to a physical register, this specifies the register that receives 1338 /// the exception address on entry to a landing pad. 1339 unsigned ExceptionPointerRegister; 1340 1341 /// If set to a physical register, this specifies the register that receives 1342 /// the exception typeid on entry to a landing pad. 1343 unsigned ExceptionSelectorRegister; 1344 1345 /// This indicates the default register class to use for each ValueType the 1346 /// target supports natively. 1347 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; 1348 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; 1349 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; 1350 1351 /// This indicates the "representative" register class to use for each 1352 /// ValueType the target supports natively. This information is used by the 1353 /// scheduler to track register pressure. By default, the representative 1354 /// register class is the largest legal super-reg register class of the 1355 /// register class of the specified type. e.g. On x86, i8, i16, and i32's 1356 /// representative class would be GR32. 1357 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE]; 1358 1359 /// This indicates the "cost" of the "representative" register class for each 1360 /// ValueType. The cost is used by the scheduler to approximate register 1361 /// pressure. 1362 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE]; 1363 1364 /// For any value types we are promoting or expanding, this contains the value 1365 /// type that we are changing to. For Expanded types, this contains one step 1366 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required 1367 /// (e.g. i64 -> i16). For types natively supported by the system, this holds 1368 /// the same type (e.g. i32 -> i32). 1369 MVT TransformToType[MVT::LAST_VALUETYPE]; 1370 1371 /// For each operation and each value type, keep a LegalizeAction that 1372 /// indicates how instruction selection should deal with the operation. Most 1373 /// operations are Legal (aka, supported natively by the target), but 1374 /// operations that are not should be described. Note that operations on 1375 /// non-legal value types are not described here. 1376 uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END]; 1377 1378 /// For each load extension type and each value type, keep a LegalizeAction 1379 /// that indicates how instruction selection should deal with a load of a 1380 /// specific value type and extension type. 1381 uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE]; 1382 1383 /// For each value type pair keep a LegalizeAction that indicates whether a 1384 /// truncating store of a specific value type and truncating type is legal. 1385 uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]; 1386 1387 /// For each indexed mode and each value type, keep a pair of LegalizeAction 1388 /// that indicates how instruction selection should deal with the load / 1389 /// store. 1390 /// 1391 /// The first dimension is the value_type for the reference. The second 1392 /// dimension represents the various modes for load store. 1393 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE]; 1394 1395 /// For each condition code (ISD::CondCode) keep a LegalizeAction that 1396 /// indicates how instruction selection should deal with the condition code. 1397 /// 1398 /// Because each CC action takes up 2 bits, we need to have the array size be 1399 /// large enough to fit all of the value types. This can be done by dividing 1400 /// the MVT::LAST_VALUETYPE by 32 and adding one. 1401 uint64_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE / 32) + 1]; 1402 1403 ValueTypeActionImpl ValueTypeActions; 1404 1405public: 1406 LegalizeKind 1407 getTypeConversion(LLVMContext &Context, EVT VT) const { 1408 // If this is a simple type, use the ComputeRegisterProp mechanism. 1409 if (VT.isSimple()) { 1410 MVT SVT = VT.getSimpleVT(); 1411 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType)); 1412 MVT NVT = TransformToType[SVT.SimpleTy]; 1413 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT); 1414 1415 assert( 1416 (LA == TypeLegal || 1417 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) 1418 && "Promote may not follow Expand or Promote"); 1419 1420 if (LA == TypeSplitVector) 1421 return LegalizeKind(LA, EVT::getVectorVT(Context, 1422 SVT.getVectorElementType(), 1423 SVT.getVectorNumElements()/2)); 1424 if (LA == TypeScalarizeVector) 1425 return LegalizeKind(LA, SVT.getVectorElementType()); 1426 return LegalizeKind(LA, NVT); 1427 } 1428 1429 // Handle Extended Scalar Types. 1430 if (!VT.isVector()) { 1431 assert(VT.isInteger() && "Float types must be simple"); 1432 unsigned BitSize = VT.getSizeInBits(); 1433 // First promote to a power-of-two size, then expand if necessary. 1434 if (BitSize < 8 || !isPowerOf2_32(BitSize)) { 1435 EVT NVT = VT.getRoundIntegerType(Context); 1436 assert(NVT != VT && "Unable to round integer VT"); 1437 LegalizeKind NextStep = getTypeConversion(Context, NVT); 1438 // Avoid multi-step promotion. 1439 if (NextStep.first == TypePromoteInteger) return NextStep; 1440 // Return rounded integer type. 1441 return LegalizeKind(TypePromoteInteger, NVT); 1442 } 1443 1444 return LegalizeKind(TypeExpandInteger, 1445 EVT::getIntegerVT(Context, VT.getSizeInBits()/2)); 1446 } 1447 1448 // Handle vector types. 1449 unsigned NumElts = VT.getVectorNumElements(); 1450 EVT EltVT = VT.getVectorElementType(); 1451 1452 // Vectors with only one element are always scalarized. 1453 if (NumElts == 1) 1454 return LegalizeKind(TypeScalarizeVector, EltVT); 1455 1456 // Try to widen vector elements until a legal type is found. 1457 if (EltVT.isInteger()) { 1458 // Vectors with a number of elements that is not a power of two are always 1459 // widened, for example <3 x float> -> <4 x float>. 1460 if (!VT.isPow2VectorType()) { 1461 NumElts = (unsigned)NextPowerOf2(NumElts); 1462 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); 1463 return LegalizeKind(TypeWidenVector, NVT); 1464 } 1465 1466 // Examine the element type. 1467 LegalizeKind LK = getTypeConversion(Context, EltVT); 1468 1469 // If type is to be expanded, split the vector. 1470 // <4 x i140> -> <2 x i140> 1471 if (LK.first == TypeExpandInteger) 1472 return LegalizeKind(TypeSplitVector, 1473 EVT::getVectorVT(Context, EltVT, NumElts / 2)); 1474 1475 // Promote the integer element types until a legal vector type is found 1476 // or until the element integer type is too big. If a legal type was not 1477 // found, fallback to the usual mechanism of widening/splitting the 1478 // vector. 1479 EVT OldEltVT = EltVT; 1480 while (1) { 1481 // Increase the bitwidth of the element to the next pow-of-two 1482 // (which is greater than 8 bits). 1483 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits() 1484 ).getRoundIntegerType(Context); 1485 1486 // Stop trying when getting a non-simple element type. 1487 // Note that vector elements may be greater than legal vector element 1488 // types. Example: X86 XMM registers hold 64bit element on 32bit systems. 1489 if (!EltVT.isSimple()) break; 1490 1491 // Build a new vector type and check if it is legal. 1492 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1493 // Found a legal promoted vector type. 1494 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) 1495 return LegalizeKind(TypePromoteInteger, 1496 EVT::getVectorVT(Context, EltVT, NumElts)); 1497 } 1498 1499 // Reset the type to the unexpanded type if we did not find a legal vector 1500 // type with a promoted vector element type. 1501 EltVT = OldEltVT; 1502 } 1503 1504 // Try to widen the vector until a legal type is found. 1505 // If there is no wider legal type, split the vector. 1506 while (1) { 1507 // Round up to the next power of 2. 1508 NumElts = (unsigned)NextPowerOf2(NumElts); 1509 1510 // If there is no simple vector type with this many elements then there 1511 // cannot be a larger legal vector type. Note that this assumes that 1512 // there are no skipped intermediate vector types in the simple types. 1513 if (!EltVT.isSimple()) break; 1514 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1515 if (LargerVector == MVT()) break; 1516 1517 // If this type is legal then widen the vector. 1518 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) 1519 return LegalizeKind(TypeWidenVector, LargerVector); 1520 } 1521 1522 // Widen odd vectors to next power of two. 1523 if (!VT.isPow2VectorType()) { 1524 EVT NVT = VT.getPow2VectorType(Context); 1525 return LegalizeKind(TypeWidenVector, NVT); 1526 } 1527 1528 // Vectors with illegal element types are expanded. 1529 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2); 1530 return LegalizeKind(TypeSplitVector, NVT); 1531 } 1532 1533private: 1534 std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses; 1535 1536 /// Targets can specify ISD nodes that they would like PerformDAGCombine 1537 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this 1538 /// array. 1539 unsigned char 1540 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; 1541 1542 /// For operations that must be promoted to a specific type, this holds the 1543 /// destination type. This map should be sparse, so don't hold it as an 1544 /// array. 1545 /// 1546 /// Targets add entries to this map with AddPromotedToType(..), clients access 1547 /// this with getTypeToPromoteTo(..). 1548 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> 1549 PromoteToType; 1550 1551 /// Stores the name each libcall. 1552 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL]; 1553 1554 /// The ISD::CondCode that should be used to test the result of each of the 1555 /// comparison libcall against zero. 1556 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; 1557 1558 /// Stores the CallingConv that should be used for each libcall. 1559 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL]; 1560 1561protected: 1562 /// \brief Specify maximum number of store instructions per memset call. 1563 /// 1564 /// When lowering \@llvm.memset this field specifies the maximum number of 1565 /// store operations that may be substituted for the call to memset. Targets 1566 /// must set this value based on the cost threshold for that target. Targets 1567 /// should assume that the memset will be done using as many of the largest 1568 /// store operations first, followed by smaller ones, if necessary, per 1569 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine 1570 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte 1571 /// store. This only applies to setting a constant array of a constant size. 1572 unsigned MaxStoresPerMemset; 1573 1574 /// Maximum number of stores operations that may be substituted for the call 1575 /// to memset, used for functions with OptSize attribute. 1576 unsigned MaxStoresPerMemsetOptSize; 1577 1578 /// \brief Specify maximum bytes of store instructions per memcpy call. 1579 /// 1580 /// When lowering \@llvm.memcpy this field specifies the maximum number of 1581 /// store operations that may be substituted for a call to memcpy. Targets 1582 /// must set this value based on the cost threshold for that target. Targets 1583 /// should assume that the memcpy will be done using as many of the largest 1584 /// store operations first, followed by smaller ones, if necessary, per 1585 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine 1586 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store 1587 /// and one 1-byte store. This only applies to copying a constant array of 1588 /// constant size. 1589 unsigned MaxStoresPerMemcpy; 1590 1591 /// Maximum number of store operations that may be substituted for a call to 1592 /// memcpy, used for functions with OptSize attribute. 1593 unsigned MaxStoresPerMemcpyOptSize; 1594 1595 /// \brief Specify maximum bytes of store instructions per memmove call. 1596 /// 1597 /// When lowering \@llvm.memmove this field specifies the maximum number of 1598 /// store instructions that may be substituted for a call to memmove. Targets 1599 /// must set this value based on the cost threshold for that target. Targets 1600 /// should assume that the memmove will be done using as many of the largest 1601 /// store operations first, followed by smaller ones, if necessary, per 1602 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine 1603 /// with 8-bit alignment would result in nine 1-byte stores. This only 1604 /// applies to copying a constant array of constant size. 1605 unsigned MaxStoresPerMemmove; 1606 1607 /// Maximum number of store instructions that may be substituted for a call to 1608 /// memmove, used for functions with OpSize attribute. 1609 unsigned MaxStoresPerMemmoveOptSize; 1610 1611 /// Tells the code generator that select is more expensive than a branch if 1612 /// the branch is usually predicted right. 1613 bool PredictableSelectIsExpensive; 1614 1615protected: 1616 /// Return true if the value types that can be represented by the specified 1617 /// register class are all legal. 1618 bool isLegalRC(const TargetRegisterClass *RC) const; 1619}; 1620 1621/// This class defines information used to lower LLVM code to legal SelectionDAG 1622/// operators that the target instruction selector can accept natively. 1623/// 1624/// This class also defines callbacks that targets must implement to lower 1625/// target-specific constructs to SelectionDAG operators. 1626class TargetLowering : public TargetLoweringBase { 1627 TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION; 1628 void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION; 1629 1630public: 1631 /// NOTE: The constructor takes ownership of TLOF. 1632 explicit TargetLowering(const TargetMachine &TM, 1633 const TargetLoweringObjectFile *TLOF); 1634 1635 /// Returns true by value, base pointer and offset pointer and addressing mode 1636 /// by reference if the node's address can be legally represented as 1637 /// pre-indexed load / store address. 1638 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/, 1639 SDValue &/*Offset*/, 1640 ISD::MemIndexedMode &/*AM*/, 1641 SelectionDAG &/*DAG*/) const { 1642 return false; 1643 } 1644 1645 /// Returns true by value, base pointer and offset pointer and addressing mode 1646 /// by reference if this node can be combined with a load / store to form a 1647 /// post-indexed load / store. 1648 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/, 1649 SDValue &/*Base*/, SDValue &/*Offset*/, 1650 ISD::MemIndexedMode &/*AM*/, 1651 SelectionDAG &/*DAG*/) const { 1652 return false; 1653 } 1654 1655 /// Return the entry encoding for a jump table in the current function. The 1656 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 1657 virtual unsigned getJumpTableEncoding() const; 1658 1659 virtual const MCExpr * 1660 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/, 1661 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/, 1662 MCContext &/*Ctx*/) const { 1663 llvm_unreachable("Need to implement this hook if target has custom JTIs"); 1664 } 1665 1666 /// Returns relocation base for the given PIC jumptable. 1667 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 1668 SelectionDAG &DAG) const; 1669 1670 /// This returns the relocation base for the given PIC jumptable, the same as 1671 /// getPICJumpTableRelocBase, but as an MCExpr. 1672 virtual const MCExpr * 1673 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 1674 unsigned JTI, MCContext &Ctx) const; 1675 1676 /// Return true if folding a constant offset with the given GlobalAddress is 1677 /// legal. It is frequently not legal in PIC relocation models. 1678 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 1679 1680 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 1681 SDValue &Chain) const; 1682 1683 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, 1684 SDValue &NewLHS, SDValue &NewRHS, 1685 ISD::CondCode &CCCode, SDLoc DL) const; 1686 1687 SDValue makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, 1688 const SDValue *Ops, unsigned NumOps, 1689 bool isSigned, SDLoc dl) const; 1690 1691 //===--------------------------------------------------------------------===// 1692 // TargetLowering Optimization Methods 1693 // 1694 1695 /// A convenience struct that encapsulates a DAG, and two SDValues for 1696 /// returning information from TargetLowering to its clients that want to 1697 /// combine. 1698 struct TargetLoweringOpt { 1699 SelectionDAG &DAG; 1700 bool LegalTys; 1701 bool LegalOps; 1702 SDValue Old; 1703 SDValue New; 1704 1705 explicit TargetLoweringOpt(SelectionDAG &InDAG, 1706 bool LT, bool LO) : 1707 DAG(InDAG), LegalTys(LT), LegalOps(LO) {} 1708 1709 bool LegalTypes() const { return LegalTys; } 1710 bool LegalOperations() const { return LegalOps; } 1711 1712 bool CombineTo(SDValue O, SDValue N) { 1713 Old = O; 1714 New = N; 1715 return true; 1716 } 1717 1718 /// Check to see if the specified operand of the specified instruction is a 1719 /// constant integer. If so, check to see if there are any bits set in the 1720 /// constant that are not demanded. If so, shrink the constant and return 1721 /// true. 1722 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded); 1723 1724 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This 1725 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 1726 /// generalized for targets with other types of implicit widening casts. 1727 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded, 1728 SDLoc dl); 1729 }; 1730 1731 /// Look at Op. At this point, we know that only the DemandedMask bits of the 1732 /// result of Op are ever used downstream. If we can use this information to 1733 /// simplify Op, create a new simplified DAG node and return true, returning 1734 /// the original and new nodes in Old and New. Otherwise, analyze the 1735 /// expression and return a mask of KnownOne and KnownZero bits for the 1736 /// expression (used to simplify the caller). The KnownZero/One bits may only 1737 /// be accurate for those bits in the DemandedMask. 1738 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, 1739 APInt &KnownZero, APInt &KnownOne, 1740 TargetLoweringOpt &TLO, unsigned Depth = 0) const; 1741 1742 /// Determine which of the bits specified in Mask are known to be either zero 1743 /// or one and return them in the KnownZero/KnownOne bitsets. 1744 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 1745 APInt &KnownZero, 1746 APInt &KnownOne, 1747 const SelectionDAG &DAG, 1748 unsigned Depth = 0) const; 1749 1750 /// This method can be implemented by targets that want to expose additional 1751 /// information about sign bits to the DAG Combiner. 1752 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 1753 unsigned Depth = 0) const; 1754 1755 struct DAGCombinerInfo { 1756 void *DC; // The DAG Combiner object. 1757 CombineLevel Level; 1758 bool CalledByLegalizer; 1759 public: 1760 SelectionDAG &DAG; 1761 1762 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc) 1763 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {} 1764 1765 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; } 1766 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; } 1767 bool isAfterLegalizeVectorOps() const { 1768 return Level == AfterLegalizeDAG; 1769 } 1770 CombineLevel getDAGCombineLevel() { return Level; } 1771 bool isCalledByLegalizer() const { return CalledByLegalizer; } 1772 1773 void AddToWorklist(SDNode *N); 1774 void RemoveFromWorklist(SDNode *N); 1775 SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To, 1776 bool AddTo = true); 1777 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); 1778 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true); 1779 1780 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); 1781 }; 1782 1783 /// Try to simplify a setcc built with the specified operands and cc. If it is 1784 /// unable to simplify it, return a null SDValue. 1785 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 1786 ISD::CondCode Cond, bool foldBooleans, 1787 DAGCombinerInfo &DCI, SDLoc dl) const; 1788 1789 /// Returns true (and the GlobalValue and the offset) if the node is a 1790 /// GlobalAddress + offset. 1791 virtual bool 1792 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 1793 1794 /// This method will be invoked for all target nodes and for any 1795 /// target-independent nodes that the target has registered with invoke it 1796 /// for. 1797 /// 1798 /// The semantics are as follows: 1799 /// Return Value: 1800 /// SDValue.Val == 0 - No change was made 1801 /// SDValue.Val == N - N was replaced, is dead, and is already handled. 1802 /// otherwise - N should be replaced by the returned Operand. 1803 /// 1804 /// In addition, methods provided by DAGCombinerInfo may be used to perform 1805 /// more complex transformations. 1806 /// 1807 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 1808 1809 /// Return true if the target has native support for the specified value type 1810 /// and it is 'desirable' to use the type for the given node type. e.g. On x86 1811 /// i16 is legal, but undesirable since i16 instruction encodings are longer 1812 /// and some i16 instructions are slow. 1813 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const { 1814 // By default, assume all legal types are desirable. 1815 return isTypeLegal(VT); 1816 } 1817 1818 /// Return true if it is profitable for dag combiner to transform a floating 1819 /// point op of specified opcode to a equivalent op of an integer 1820 /// type. e.g. f32 load -> i32 load can be profitable on ARM. 1821 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/, 1822 EVT /*VT*/) const { 1823 return false; 1824 } 1825 1826 /// This method query the target whether it is beneficial for dag combiner to 1827 /// promote the specified node. If true, it should return the desired 1828 /// promotion type by reference. 1829 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const { 1830 return false; 1831 } 1832 1833 //===--------------------------------------------------------------------===// 1834 // Lowering methods - These methods must be implemented by targets so that 1835 // the SelectionDAGBuilder code knows how to lower these. 1836 // 1837 1838 /// This hook must be implemented to lower the incoming (formal) arguments, 1839 /// described by the Ins array, into the specified DAG. The implementation 1840 /// should fill in the InVals array with legal-type argument values, and 1841 /// return the resulting token chain value. 1842 /// 1843 virtual SDValue 1844 LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 1845 bool /*isVarArg*/, 1846 const SmallVectorImpl<ISD::InputArg> &/*Ins*/, 1847 SDLoc /*dl*/, SelectionDAG &/*DAG*/, 1848 SmallVectorImpl<SDValue> &/*InVals*/) const { 1849 llvm_unreachable("Not Implemented"); 1850 } 1851 1852 struct ArgListEntry { 1853 SDValue Node; 1854 Type* Ty; 1855 bool isSExt : 1; 1856 bool isZExt : 1; 1857 bool isInReg : 1; 1858 bool isSRet : 1; 1859 bool isNest : 1; 1860 bool isByVal : 1; 1861 bool isReturned : 1; 1862 uint16_t Alignment; 1863 1864 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false), 1865 isSRet(false), isNest(false), isByVal(false), isReturned(false), 1866 Alignment(0) { } 1867 }; 1868 typedef std::vector<ArgListEntry> ArgListTy; 1869 1870 /// This structure contains all information that is necessary for lowering 1871 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder 1872 /// needs to lower a call, and targets will see this struct in their LowerCall 1873 /// implementation. 1874 struct CallLoweringInfo { 1875 SDValue Chain; 1876 Type *RetTy; 1877 bool RetSExt : 1; 1878 bool RetZExt : 1; 1879 bool IsVarArg : 1; 1880 bool IsInReg : 1; 1881 bool DoesNotReturn : 1; 1882 bool IsReturnValueUsed : 1; 1883 1884 // IsTailCall should be modified by implementations of 1885 // TargetLowering::LowerCall that perform tail call conversions. 1886 bool IsTailCall; 1887 1888 unsigned NumFixedArgs; 1889 CallingConv::ID CallConv; 1890 SDValue Callee; 1891 ArgListTy &Args; 1892 SelectionDAG &DAG; 1893 SDLoc DL; 1894 ImmutableCallSite *CS; 1895 SmallVector<ISD::OutputArg, 32> Outs; 1896 SmallVector<SDValue, 32> OutVals; 1897 SmallVector<ISD::InputArg, 32> Ins; 1898 1899 1900 /// Constructs a call lowering context based on the ImmutableCallSite \p cs. 1901 CallLoweringInfo(SDValue chain, Type *retTy, 1902 FunctionType *FTy, bool isTailCall, SDValue callee, 1903 ArgListTy &args, SelectionDAG &dag, SDLoc dl, 1904 ImmutableCallSite &cs) 1905 : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attribute::SExt)), 1906 RetZExt(cs.paramHasAttr(0, Attribute::ZExt)), IsVarArg(FTy->isVarArg()), 1907 IsInReg(cs.paramHasAttr(0, Attribute::InReg)), 1908 DoesNotReturn(cs.doesNotReturn()), 1909 IsReturnValueUsed(!cs.getInstruction()->use_empty()), 1910 IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()), 1911 CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag), 1912 DL(dl), CS(&cs) {} 1913 1914 /// Constructs a call lowering context based on the provided call 1915 /// information. 1916 CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool retZExt, 1917 bool isVarArg, bool isInReg, unsigned numFixedArgs, 1918 CallingConv::ID callConv, bool isTailCall, 1919 bool doesNotReturn, bool isReturnValueUsed, SDValue callee, 1920 ArgListTy &args, SelectionDAG &dag, SDLoc dl) 1921 : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt), 1922 IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn), 1923 IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall), 1924 NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee), 1925 Args(args), DAG(dag), DL(dl), CS(NULL) {} 1926 }; 1927 1928 /// This function lowers an abstract call to a function into an actual call. 1929 /// This returns a pair of operands. The first element is the return value 1930 /// for the function (if RetTy is not VoidTy). The second element is the 1931 /// outgoing token chain. It calls LowerCall to do the actual lowering. 1932 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const; 1933 1934 /// This hook must be implemented to lower calls into the the specified 1935 /// DAG. The outgoing arguments to the call are described by the Outs array, 1936 /// and the values to be returned by the call are described by the Ins 1937 /// array. The implementation should fill in the InVals array with legal-type 1938 /// return values from the call, and return the resulting token chain value. 1939 virtual SDValue 1940 LowerCall(CallLoweringInfo &/*CLI*/, 1941 SmallVectorImpl<SDValue> &/*InVals*/) const { 1942 llvm_unreachable("Not Implemented"); 1943 } 1944 1945 /// Target-specific cleanup for formal ByVal parameters. 1946 virtual void HandleByVal(CCState *, unsigned &, unsigned) const {} 1947 1948 /// This hook should be implemented to check whether the return values 1949 /// described by the Outs array can fit into the return registers. If false 1950 /// is returned, an sret-demotion is performed. 1951 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/, 1952 MachineFunction &/*MF*/, bool /*isVarArg*/, 1953 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 1954 LLVMContext &/*Context*/) const 1955 { 1956 // Return true by default to get preexisting behavior. 1957 return true; 1958 } 1959 1960 /// This hook must be implemented to lower outgoing return values, described 1961 /// by the Outs array, into the specified DAG. The implementation should 1962 /// return the resulting token chain value. 1963 virtual SDValue 1964 LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 1965 bool /*isVarArg*/, 1966 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 1967 const SmallVectorImpl<SDValue> &/*OutVals*/, 1968 SDLoc /*dl*/, SelectionDAG &/*DAG*/) const { 1969 llvm_unreachable("Not Implemented"); 1970 } 1971 1972 /// Return true if result of the specified node is used by a return node 1973 /// only. It also compute and return the input chain for the tail call. 1974 /// 1975 /// This is used to determine whether it is possible to codegen a libcall as 1976 /// tail call at legalization time. 1977 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const { 1978 return false; 1979 } 1980 1981 /// Return true if the target may be able emit the call instruction as a tail 1982 /// call. This is used by optimization passes to determine if it's profitable 1983 /// to duplicate return instructions to enable tailcall optimization. 1984 virtual bool mayBeEmittedAsTailCall(CallInst *) const { 1985 return false; 1986 } 1987 1988 /// Return the type that should be used to zero or sign extend a 1989 /// zeroext/signext integer argument or return value. FIXME: Most C calling 1990 /// convention requires the return type to be promoted, but this is not true 1991 /// all the time, e.g. i1 on x86-64. It is also not necessary for non-C 1992 /// calling conventions. The frontend should handle this and include all of 1993 /// the necessary information. 1994 virtual MVT getTypeForExtArgOrReturn(MVT VT, 1995 ISD::NodeType /*ExtendKind*/) const { 1996 MVT MinVT = getRegisterType(MVT::i32); 1997 return VT.bitsLT(MinVT) ? MinVT : VT; 1998 } 1999 2000 /// This callback is invoked by the type legalizer to legalize nodes with an 2001 /// illegal operand type but legal result types. It replaces the 2002 /// LowerOperation callback in the type Legalizer. The reason we can not do 2003 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to 2004 /// use this callback. 2005 /// 2006 /// TODO: Consider merging with ReplaceNodeResults. 2007 /// 2008 /// The target places new result values for the node in Results (their number 2009 /// and types must exactly match those of the original return values of 2010 /// the node), or leaves Results empty, which indicates that the node is not 2011 /// to be custom lowered after all. 2012 /// The default implementation calls LowerOperation. 2013 virtual void LowerOperationWrapper(SDNode *N, 2014 SmallVectorImpl<SDValue> &Results, 2015 SelectionDAG &DAG) const; 2016 2017 /// This callback is invoked for operations that are unsupported by the 2018 /// target, which are registered to use 'custom' lowering, and whose defined 2019 /// values are all legal. If the target has no operations that require custom 2020 /// lowering, it need not implement this. The default implementation of this 2021 /// aborts. 2022 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 2023 2024 /// This callback is invoked when a node result type is illegal for the 2025 /// target, and the operation was registered to use 'custom' lowering for that 2026 /// result type. The target places new result values for the node in Results 2027 /// (their number and types must exactly match those of the original return 2028 /// values of the node), or leaves Results empty, which indicates that the 2029 /// node is not to be custom lowered after all. 2030 /// 2031 /// If the target has no operations that require custom lowering, it need not 2032 /// implement this. The default implementation aborts. 2033 virtual void ReplaceNodeResults(SDNode * /*N*/, 2034 SmallVectorImpl<SDValue> &/*Results*/, 2035 SelectionDAG &/*DAG*/) const { 2036 llvm_unreachable("ReplaceNodeResults not implemented for this target!"); 2037 } 2038 2039 /// This method returns the name of a target specific DAG node. 2040 virtual const char *getTargetNodeName(unsigned Opcode) const; 2041 2042 /// This method returns a target specific FastISel object, or null if the 2043 /// target does not support "fast" ISel. 2044 virtual FastISel *createFastISel(FunctionLoweringInfo &, 2045 const TargetLibraryInfo *) const { 2046 return 0; 2047 } 2048 2049 //===--------------------------------------------------------------------===// 2050 // Inline Asm Support hooks 2051 // 2052 2053 /// This hook allows the target to expand an inline asm call to be explicit 2054 /// llvm code if it wants to. This is useful for turning simple inline asms 2055 /// into LLVM intrinsics, which gives the compiler more information about the 2056 /// behavior of the code. 2057 virtual bool ExpandInlineAsm(CallInst *) const { 2058 return false; 2059 } 2060 2061 enum ConstraintType { 2062 C_Register, // Constraint represents specific register(s). 2063 C_RegisterClass, // Constraint represents any of register(s) in class. 2064 C_Memory, // Memory constraint. 2065 C_Other, // Something else. 2066 C_Unknown // Unsupported constraint. 2067 }; 2068 2069 enum ConstraintWeight { 2070 // Generic weights. 2071 CW_Invalid = -1, // No match. 2072 CW_Okay = 0, // Acceptable. 2073 CW_Good = 1, // Good weight. 2074 CW_Better = 2, // Better weight. 2075 CW_Best = 3, // Best weight. 2076 2077 // Well-known weights. 2078 CW_SpecificReg = CW_Okay, // Specific register operands. 2079 CW_Register = CW_Good, // Register operands. 2080 CW_Memory = CW_Better, // Memory operands. 2081 CW_Constant = CW_Best, // Constant operand. 2082 CW_Default = CW_Okay // Default or don't know type. 2083 }; 2084 2085 /// This contains information for each constraint that we are lowering. 2086 struct AsmOperandInfo : public InlineAsm::ConstraintInfo { 2087 /// This contains the actual string for the code, like "m". TargetLowering 2088 /// picks the 'best' code from ConstraintInfo::Codes that most closely 2089 /// matches the operand. 2090 std::string ConstraintCode; 2091 2092 /// Information about the constraint code, e.g. Register, RegisterClass, 2093 /// Memory, Other, Unknown. 2094 TargetLowering::ConstraintType ConstraintType; 2095 2096 /// If this is the result output operand or a clobber, this is null, 2097 /// otherwise it is the incoming operand to the CallInst. This gets 2098 /// modified as the asm is processed. 2099 Value *CallOperandVal; 2100 2101 /// The ValueType for the operand value. 2102 MVT ConstraintVT; 2103 2104 /// Return true of this is an input operand that is a matching constraint 2105 /// like "4". 2106 bool isMatchingInputConstraint() const; 2107 2108 /// If this is an input matching constraint, this method returns the output 2109 /// operand it matches. 2110 unsigned getMatchedOperand() const; 2111 2112 /// Copy constructor for copying from an AsmOperandInfo. 2113 AsmOperandInfo(const AsmOperandInfo &info) 2114 : InlineAsm::ConstraintInfo(info), 2115 ConstraintCode(info.ConstraintCode), 2116 ConstraintType(info.ConstraintType), 2117 CallOperandVal(info.CallOperandVal), 2118 ConstraintVT(info.ConstraintVT) { 2119 } 2120 2121 /// Copy constructor for copying from a ConstraintInfo. 2122 AsmOperandInfo(const InlineAsm::ConstraintInfo &info) 2123 : InlineAsm::ConstraintInfo(info), 2124 ConstraintType(TargetLowering::C_Unknown), 2125 CallOperandVal(0), ConstraintVT(MVT::Other) { 2126 } 2127 }; 2128 2129 typedef std::vector<AsmOperandInfo> AsmOperandInfoVector; 2130 2131 /// Split up the constraint string from the inline assembly value into the 2132 /// specific constraints and their prefixes, and also tie in the associated 2133 /// operand values. If this returns an empty vector, and if the constraint 2134 /// string itself isn't empty, there was an error parsing. 2135 virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const; 2136 2137 /// Examine constraint type and operand type and determine a weight value. 2138 /// The operand object must already have been set up with the operand type. 2139 virtual ConstraintWeight getMultipleConstraintMatchWeight( 2140 AsmOperandInfo &info, int maIndex) const; 2141 2142 /// Examine constraint string and operand type and determine a weight value. 2143 /// The operand object must already have been set up with the operand type. 2144 virtual ConstraintWeight getSingleConstraintMatchWeight( 2145 AsmOperandInfo &info, const char *constraint) const; 2146 2147 /// Determines the constraint code and constraint type to use for the specific 2148 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 2149 /// If the actual operand being passed in is available, it can be passed in as 2150 /// Op, otherwise an empty SDValue can be passed. 2151 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, 2152 SDValue Op, 2153 SelectionDAG *DAG = 0) const; 2154 2155 /// Given a constraint, return the type of constraint it is for this target. 2156 virtual ConstraintType getConstraintType(const std::string &Constraint) const; 2157 2158 /// Given a physical register constraint (e.g. {edx}), return the register 2159 /// number and the register class for the register. 2160 /// 2161 /// Given a register class constraint, like 'r', if this corresponds directly 2162 /// to an LLVM register class, return a register of 0 and the register class 2163 /// pointer. 2164 /// 2165 /// This should only be used for C_Register constraints. On error, this 2166 /// returns a register number of 0 and a null register class pointer.. 2167 virtual std::pair<unsigned, const TargetRegisterClass*> 2168 getRegForInlineAsmConstraint(const std::string &Constraint, 2169 MVT VT) const; 2170 2171 /// Try to replace an X constraint, which matches anything, with another that 2172 /// has more specific requirements based on the type of the corresponding 2173 /// operand. This returns null if there is no replacement to make. 2174 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 2175 2176 /// Lower the specified operand into the Ops vector. If it is invalid, don't 2177 /// add anything to Ops. 2178 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 2179 std::vector<SDValue> &Ops, 2180 SelectionDAG &DAG) const; 2181 2182 //===--------------------------------------------------------------------===// 2183 // Div utility functions 2184 // 2185 SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl, 2186 SelectionDAG &DAG) const; 2187 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 2188 std::vector<SDNode*> *Created) const; 2189 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 2190 std::vector<SDNode*> *Created) const; 2191 2192 //===--------------------------------------------------------------------===// 2193 // Instruction Emitting Hooks 2194 // 2195 2196 // This method should be implemented by targets that mark instructions with 2197 // the 'usesCustomInserter' flag. These instructions are special in various 2198 // ways, which require special support to insert. The specified MachineInstr 2199 // is created but not inserted into any basic blocks, and this method is 2200 // called to expand it into a sequence of instructions, potentially also 2201 // creating new basic blocks and control flow. 2202 virtual MachineBasicBlock * 2203 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const; 2204 2205 /// This method should be implemented by targets that mark instructions with 2206 /// the 'hasPostISelHook' flag. These instructions must be adjusted after 2207 /// instruction selection by target hooks. e.g. To fill in optional defs for 2208 /// ARM 's' setting instructions. 2209 virtual void 2210 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const; 2211}; 2212 2213/// Given an LLVM IR type and return type attributes, compute the return value 2214/// EVTs and flags, and optionally also the offsets, if the return value is 2215/// being lowered to memory. 2216void GetReturnInfo(Type* ReturnType, AttributeSet attr, 2217 SmallVectorImpl<ISD::OutputArg> &Outs, 2218 const TargetLowering &TLI); 2219 2220} // end llvm namespace 2221 2222#endif 2223