TargetLowering.h revision ad6aedc7d980d407da4452ff3ed4592d3df1a3f7
1//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file describes how to lower LLVM code to machine code. This has two 11// main components: 12// 13// 1. Which ValueTypes are natively supported by the target. 14// 2. Which operations are supported for supported ValueTypes. 15// 3. Cost thresholds for alternative implementations of certain operations. 16// 17// In addition it has a few other components, like information about FP 18// immediates. 19// 20//===----------------------------------------------------------------------===// 21 22#ifndef LLVM_TARGET_TARGETLOWERING_H 23#define LLVM_TARGET_TARGETLOWERING_H 24 25#include "llvm/AddressingMode.h" 26#include "llvm/CallingConv.h" 27#include "llvm/InlineAsm.h" 28#include "llvm/Attributes.h" 29#include "llvm/ADT/DenseMap.h" 30#include "llvm/Support/CallSite.h" 31#include "llvm/CodeGen/SelectionDAGNodes.h" 32#include "llvm/CodeGen/RuntimeLibcalls.h" 33#include "llvm/Support/DebugLoc.h" 34#include "llvm/Target/TargetCallingConv.h" 35#include "llvm/Target/TargetMachine.h" 36#include <climits> 37#include <map> 38#include <vector> 39 40namespace llvm { 41 class CallInst; 42 class CCState; 43 class FastISel; 44 class FunctionLoweringInfo; 45 class ImmutableCallSite; 46 class IntrinsicInst; 47 class MachineBasicBlock; 48 class MachineFunction; 49 class MachineInstr; 50 class MachineJumpTableInfo; 51 class MCContext; 52 class MCExpr; 53 template<typename T> class SmallVectorImpl; 54 class DataLayout; 55 class TargetRegisterClass; 56 class TargetLibraryInfo; 57 class TargetLoweringObjectFile; 58 class Value; 59 60 namespace Sched { 61 enum Preference { 62 None, // No preference 63 Source, // Follow source order. 64 RegPressure, // Scheduling for lowest register pressure. 65 Hybrid, // Scheduling for both latency and register pressure. 66 ILP, // Scheduling for ILP in low register pressure mode. 67 VLIW // Scheduling for VLIW targets. 68 }; 69 } 70 71 72//===----------------------------------------------------------------------===// 73/// TargetLowering - This class defines information used to lower LLVM code to 74/// legal SelectionDAG operators that the target instruction selector can accept 75/// natively. 76/// 77/// This class also defines callbacks that targets must implement to lower 78/// target-specific constructs to SelectionDAG operators. 79/// 80class TargetLowering { 81 TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION; 82 void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION; 83public: 84 /// LegalizeAction - This enum indicates whether operations are valid for a 85 /// target, and if not, what action should be used to make them valid. 86 enum LegalizeAction { 87 Legal, // The target natively supports this operation. 88 Promote, // This operation should be executed in a larger type. 89 Expand, // Try to expand this to other ops, otherwise use a libcall. 90 Custom // Use the LowerOperation hook to implement custom lowering. 91 }; 92 93 /// LegalizeTypeAction - This enum indicates whether a types are legal for a 94 /// target, and if not, what action should be used to make them valid. 95 enum LegalizeTypeAction { 96 TypeLegal, // The target natively supports this type. 97 TypePromoteInteger, // Replace this integer with a larger one. 98 TypeExpandInteger, // Split this integer into two of half the size. 99 TypeSoftenFloat, // Convert this float to a same size integer type. 100 TypeExpandFloat, // Split this float into two of half the size. 101 TypeScalarizeVector, // Replace this one-element vector with its element. 102 TypeSplitVector, // Split this vector into two of half the size. 103 TypeWidenVector // This vector should be widened into a larger vector. 104 }; 105 106 enum BooleanContent { // How the target represents true/false values. 107 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. 108 ZeroOrOneBooleanContent, // All bits zero except for bit 0. 109 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. 110 }; 111 112 enum SelectSupportKind { 113 ScalarValSelect, // The target supports scalar selects (ex: cmov). 114 ScalarCondVectorVal, // The target supports selects with a scalar condition 115 // and vector values (ex: cmov). 116 VectorMaskSelect // The target supports vector selects with a vector 117 // mask (ex: x86 blends). 118 }; 119 120 static ISD::NodeType getExtendForContent(BooleanContent Content) { 121 switch (Content) { 122 case UndefinedBooleanContent: 123 // Extend by adding rubbish bits. 124 return ISD::ANY_EXTEND; 125 case ZeroOrOneBooleanContent: 126 // Extend by adding zero bits. 127 return ISD::ZERO_EXTEND; 128 case ZeroOrNegativeOneBooleanContent: 129 // Extend by copying the sign bit. 130 return ISD::SIGN_EXTEND; 131 } 132 llvm_unreachable("Invalid content kind"); 133 } 134 135 /// NOTE: The constructor takes ownership of TLOF. 136 explicit TargetLowering(const TargetMachine &TM, 137 const TargetLoweringObjectFile *TLOF); 138 virtual ~TargetLowering(); 139 140 const TargetMachine &getTargetMachine() const { return TM; } 141 const DataLayout *getDataLayout() const { return TD; } 142 const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; } 143 144 bool isBigEndian() const { return !IsLittleEndian; } 145 bool isLittleEndian() const { return IsLittleEndian; } 146 MVT getPointerTy() const { return PointerTy; } 147 virtual MVT getShiftAmountTy(EVT LHSTy) const; 148 149 /// isSelectExpensive - Return true if the select operation is expensive for 150 /// this target. 151 bool isSelectExpensive() const { return SelectIsExpensive; } 152 153 virtual bool isSelectSupported(SelectSupportKind kind) const { return true; } 154 155 /// isIntDivCheap() - Return true if integer divide is usually cheaper than 156 /// a sequence of several shifts, adds, and multiplies for this target. 157 bool isIntDivCheap() const { return IntDivIsCheap; } 158 159 /// isSlowDivBypassed - Returns true if target has indicated at least one 160 /// type should be bypassed. 161 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); } 162 163 /// getBypassSlowDivTypes - Returns map of slow types for division or 164 /// remainder with corresponding fast types 165 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const { 166 return BypassSlowDivWidths; 167 } 168 169 /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of 170 /// srl/add/sra. 171 bool isPow2DivCheap() const { return Pow2DivIsCheap; } 172 173 /// isJumpExpensive() - Return true if Flow Control is an expensive operation 174 /// that should be avoided. 175 bool isJumpExpensive() const { return JumpIsExpensive; } 176 177 /// isPredictableSelectExpensive - Return true if selects are only cheaper 178 /// than branches if the branch is unlikely to be predicted right. 179 bool isPredictableSelectExpensive() const { 180 return predictableSelectIsExpensive; 181 } 182 183 /// getSetCCResultType - Return the ValueType of the result of SETCC 184 /// operations. Also used to obtain the target's preferred type for 185 /// the condition operand of SELECT and BRCOND nodes. In the case of 186 /// BRCOND the argument passed is MVT::Other since there are no other 187 /// operands to get a type hint from. 188 virtual EVT getSetCCResultType(EVT VT) const; 189 190 /// getCmpLibcallReturnType - Return the ValueType for comparison 191 /// libcalls. Comparions libcalls include floating point comparion calls, 192 /// and Ordered/Unordered check calls on floating point numbers. 193 virtual 194 MVT::SimpleValueType getCmpLibcallReturnType() const; 195 196 /// getBooleanContents - For targets without i1 registers, this gives the 197 /// nature of the high-bits of boolean values held in types wider than i1. 198 /// "Boolean values" are special true/false values produced by nodes like 199 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. 200 /// Not to be confused with general values promoted from i1. 201 /// Some cpus distinguish between vectors of boolean and scalars; the isVec 202 /// parameter selects between the two kinds. For example on X86 a scalar 203 /// boolean should be zero extended from i1, while the elements of a vector 204 /// of booleans should be sign extended from i1. 205 BooleanContent getBooleanContents(bool isVec) const { 206 return isVec ? BooleanVectorContents : BooleanContents; 207 } 208 209 /// getSchedulingPreference - Return target scheduling preference. 210 Sched::Preference getSchedulingPreference() const { 211 return SchedPreferenceInfo; 212 } 213 214 /// getSchedulingPreference - Some scheduler, e.g. hybrid, can switch to 215 /// different scheduling heuristics for different nodes. This function returns 216 /// the preference (or none) for the given node. 217 virtual Sched::Preference getSchedulingPreference(SDNode *) const { 218 return Sched::None; 219 } 220 221 /// getRegClassFor - Return the register class that should be used for the 222 /// specified value type. 223 virtual const TargetRegisterClass *getRegClassFor(EVT VT) const { 224 assert(VT.isSimple() && "getRegClassFor called on illegal type!"); 225 const TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy]; 226 assert(RC && "This value type is not natively supported!"); 227 return RC; 228 } 229 230 /// getRepRegClassFor - Return the 'representative' register class for the 231 /// specified value type. The 'representative' register class is the largest 232 /// legal super-reg register class for the register class of the value type. 233 /// For example, on i386 the rep register class for i8, i16, and i32 are GR32; 234 /// while the rep register class is GR64 on x86_64. 235 virtual const TargetRegisterClass *getRepRegClassFor(EVT VT) const { 236 assert(VT.isSimple() && "getRepRegClassFor called on illegal type!"); 237 const TargetRegisterClass *RC = RepRegClassForVT[VT.getSimpleVT().SimpleTy]; 238 return RC; 239 } 240 241 /// getRepRegClassCostFor - Return the cost of the 'representative' register 242 /// class for the specified value type. 243 virtual uint8_t getRepRegClassCostFor(EVT VT) const { 244 assert(VT.isSimple() && "getRepRegClassCostFor called on illegal type!"); 245 return RepRegClassCostForVT[VT.getSimpleVT().SimpleTy]; 246 } 247 248 /// isTypeLegal - Return true if the target has native support for the 249 /// specified value type. This means that it has a register that directly 250 /// holds it without promotions or expansions. 251 bool isTypeLegal(EVT VT) const { 252 assert(!VT.isSimple() || 253 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)); 254 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0; 255 } 256 257 class ValueTypeActionImpl { 258 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum 259 /// that indicates how instruction selection should deal with the type. 260 uint8_t ValueTypeActions[MVT::LAST_VALUETYPE]; 261 262 public: 263 ValueTypeActionImpl() { 264 std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0); 265 } 266 267 LegalizeTypeAction getTypeAction(MVT VT) const { 268 return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy]; 269 } 270 271 void setTypeAction(EVT VT, LegalizeTypeAction Action) { 272 unsigned I = VT.getSimpleVT().SimpleTy; 273 ValueTypeActions[I] = Action; 274 } 275 }; 276 277 const ValueTypeActionImpl &getValueTypeActions() const { 278 return ValueTypeActions; 279 } 280 281 /// getTypeAction - Return how we should legalize values of this type, either 282 /// it is already legal (return 'Legal') or we need to promote it to a larger 283 /// type (return 'Promote'), or we need to expand it into multiple registers 284 /// of smaller integer type (return 'Expand'). 'Custom' is not an option. 285 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const { 286 return getTypeConversion(Context, VT).first; 287 } 288 LegalizeTypeAction getTypeAction(MVT VT) const { 289 return ValueTypeActions.getTypeAction(VT); 290 } 291 292 /// getTypeToTransformTo - For types supported by the target, this is an 293 /// identity function. For types that must be promoted to larger types, this 294 /// returns the larger type to promote to. For integer types that are larger 295 /// than the largest integer register, this contains one step in the expansion 296 /// to get to the smaller register. For illegal floating point types, this 297 /// returns the integer type to transform to. 298 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { 299 return getTypeConversion(Context, VT).second; 300 } 301 302 /// getTypeToExpandTo - For types supported by the target, this is an 303 /// identity function. For types that must be expanded (i.e. integer types 304 /// that are larger than the largest integer register or illegal floating 305 /// point types), this returns the largest legal type it will be expanded to. 306 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const { 307 assert(!VT.isVector()); 308 while (true) { 309 switch (getTypeAction(Context, VT)) { 310 case TypeLegal: 311 return VT; 312 case TypeExpandInteger: 313 VT = getTypeToTransformTo(Context, VT); 314 break; 315 default: 316 llvm_unreachable("Type is not legal nor is it to be expanded!"); 317 } 318 } 319 } 320 321 /// getVectorTypeBreakdown - Vector types are broken down into some number of 322 /// legal first class types. For example, EVT::v8f32 maps to 2 EVT::v4f32 323 /// with Altivec or SSE1, or 8 promoted EVT::f64 values with the X86 FP stack. 324 /// Similarly, EVT::v2i64 turns into 4 EVT::i32 values with both PPC and X86. 325 /// 326 /// This method returns the number of registers needed, and the VT for each 327 /// register. It also returns the VT and quantity of the intermediate values 328 /// before they are promoted/expanded. 329 /// 330 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 331 EVT &IntermediateVT, 332 unsigned &NumIntermediates, 333 EVT &RegisterVT) const; 334 335 /// getTgtMemIntrinsic: Given an intrinsic, checks if on the target the 336 /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If 337 /// this is the case, it returns true and store the intrinsic 338 /// information into the IntrinsicInfo that was passed to the function. 339 struct IntrinsicInfo { 340 unsigned opc; // target opcode 341 EVT memVT; // memory VT 342 const Value* ptrVal; // value representing memory location 343 int offset; // offset off of ptrVal 344 unsigned align; // alignment 345 bool vol; // is volatile? 346 bool readMem; // reads memory? 347 bool writeMem; // writes memory? 348 }; 349 350 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, 351 unsigned /*Intrinsic*/) const { 352 return false; 353 } 354 355 /// isFPImmLegal - Returns true if the target can instruction select the 356 /// specified FP immediate natively. If false, the legalizer will materialize 357 /// the FP immediate as a load from a constant pool. 358 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const { 359 return false; 360 } 361 362 /// isShuffleMaskLegal - Targets can use this to indicate that they only 363 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 364 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 365 /// are assumed to be legal. 366 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/, 367 EVT /*VT*/) const { 368 return true; 369 } 370 371 /// canOpTrap - Returns true if the operation can trap for the value type. 372 /// VT must be a legal type. By default, we optimistically assume most 373 /// operations don't trap except for divide and remainder. 374 virtual bool canOpTrap(unsigned Op, EVT VT) const; 375 376 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is 377 /// used by Targets can use this to indicate if there is a suitable 378 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant 379 /// pool entry. 380 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/, 381 EVT /*VT*/) const { 382 return false; 383 } 384 385 /// getOperationAction - Return how this operation should be treated: either 386 /// it is legal, needs to be promoted to a larger size, needs to be 387 /// expanded to some other code sequence, or the target has a custom expander 388 /// for it. 389 LegalizeAction getOperationAction(unsigned Op, EVT VT) const { 390 if (VT.isExtended()) return Expand; 391 // If a target-specific SDNode requires legalization, require the target 392 // to provide custom legalization for it. 393 if (Op > array_lengthof(OpActions[0])) return Custom; 394 unsigned I = (unsigned) VT.getSimpleVT().SimpleTy; 395 return (LegalizeAction)OpActions[I][Op]; 396 } 397 398 /// isOperationLegalOrCustom - Return true if the specified operation is 399 /// legal on this target or can be made legal with custom lowering. This 400 /// is used to help guide high-level lowering decisions. 401 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const { 402 return (VT == MVT::Other || isTypeLegal(VT)) && 403 (getOperationAction(Op, VT) == Legal || 404 getOperationAction(Op, VT) == Custom); 405 } 406 407 /// isOperationLegal - Return true if the specified operation is legal on this 408 /// target. 409 bool isOperationLegal(unsigned Op, EVT VT) const { 410 return (VT == MVT::Other || isTypeLegal(VT)) && 411 getOperationAction(Op, VT) == Legal; 412 } 413 414 /// getLoadExtAction - Return how this load with extension should be treated: 415 /// either it is legal, needs to be promoted to a larger size, needs to be 416 /// expanded to some other code sequence, or the target has a custom expander 417 /// for it. 418 LegalizeAction getLoadExtAction(unsigned ExtType, EVT VT) const { 419 assert(ExtType < ISD::LAST_LOADEXT_TYPE && 420 VT.getSimpleVT() < MVT::LAST_VALUETYPE && 421 "Table isn't big enough!"); 422 return (LegalizeAction)LoadExtActions[VT.getSimpleVT().SimpleTy][ExtType]; 423 } 424 425 /// isLoadExtLegal - Return true if the specified load with extension is legal 426 /// on this target. 427 bool isLoadExtLegal(unsigned ExtType, EVT VT) const { 428 return VT.isSimple() && getLoadExtAction(ExtType, VT) == Legal; 429 } 430 431 /// getTruncStoreAction - Return how this store with truncation should be 432 /// treated: either it is legal, needs to be promoted to a larger size, needs 433 /// to be expanded to some other code sequence, or the target has a custom 434 /// expander for it. 435 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const { 436 assert(ValVT.getSimpleVT() < MVT::LAST_VALUETYPE && 437 MemVT.getSimpleVT() < MVT::LAST_VALUETYPE && 438 "Table isn't big enough!"); 439 return (LegalizeAction)TruncStoreActions[ValVT.getSimpleVT().SimpleTy] 440 [MemVT.getSimpleVT().SimpleTy]; 441 } 442 443 /// isTruncStoreLegal - Return true if the specified store with truncation is 444 /// legal on this target. 445 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const { 446 return isTypeLegal(ValVT) && MemVT.isSimple() && 447 getTruncStoreAction(ValVT, MemVT) == Legal; 448 } 449 450 /// getIndexedLoadAction - Return how the indexed load should be treated: 451 /// either it is legal, needs to be promoted to a larger size, needs to be 452 /// expanded to some other code sequence, or the target has a custom expander 453 /// for it. 454 LegalizeAction 455 getIndexedLoadAction(unsigned IdxMode, EVT VT) const { 456 assert(IdxMode < ISD::LAST_INDEXED_MODE && 457 VT.getSimpleVT() < MVT::LAST_VALUETYPE && 458 "Table isn't big enough!"); 459 unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy; 460 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4); 461 } 462 463 /// isIndexedLoadLegal - Return true if the specified indexed load is legal 464 /// on this target. 465 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const { 466 return VT.isSimple() && 467 (getIndexedLoadAction(IdxMode, VT) == Legal || 468 getIndexedLoadAction(IdxMode, VT) == Custom); 469 } 470 471 /// getIndexedStoreAction - Return how the indexed store should be treated: 472 /// either it is legal, needs to be promoted to a larger size, needs to be 473 /// expanded to some other code sequence, or the target has a custom expander 474 /// for it. 475 LegalizeAction 476 getIndexedStoreAction(unsigned IdxMode, EVT VT) const { 477 assert(IdxMode < ISD::LAST_INDEXED_MODE && 478 VT.getSimpleVT() < MVT::LAST_VALUETYPE && 479 "Table isn't big enough!"); 480 unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy; 481 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f); 482 } 483 484 /// isIndexedStoreLegal - Return true if the specified indexed load is legal 485 /// on this target. 486 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const { 487 return VT.isSimple() && 488 (getIndexedStoreAction(IdxMode, VT) == Legal || 489 getIndexedStoreAction(IdxMode, VT) == Custom); 490 } 491 492 /// getCondCodeAction - Return how the condition code should be treated: 493 /// either it is legal, needs to be expanded to some other code sequence, 494 /// or the target has a custom expander for it. 495 LegalizeAction 496 getCondCodeAction(ISD::CondCode CC, EVT VT) const { 497 assert((unsigned)CC < array_lengthof(CondCodeActions) && 498 (unsigned)VT.getSimpleVT().SimpleTy < sizeof(CondCodeActions[0])*4 && 499 "Table isn't big enough!"); 500 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit 501 /// value and the upper 27 bits index into the second dimension of the 502 /// array to select what 64bit value to use. 503 LegalizeAction Action = (LegalizeAction) 504 ((CondCodeActions[CC][VT.getSimpleVT().SimpleTy >> 5] 505 >> (2*(VT.getSimpleVT().SimpleTy & 0x1F))) & 3); 506 assert(Action != Promote && "Can't promote condition code!"); 507 return Action; 508 } 509 510 /// isCondCodeLegal - Return true if the specified condition code is legal 511 /// on this target. 512 bool isCondCodeLegal(ISD::CondCode CC, EVT VT) const { 513 return getCondCodeAction(CC, VT) == Legal || 514 getCondCodeAction(CC, VT) == Custom; 515 } 516 517 518 /// getTypeToPromoteTo - If the action for this operation is to promote, this 519 /// method returns the ValueType to promote to. 520 EVT getTypeToPromoteTo(unsigned Op, EVT VT) const { 521 assert(getOperationAction(Op, VT) == Promote && 522 "This operation isn't promoted!"); 523 524 // See if this has an explicit type specified. 525 std::map<std::pair<unsigned, MVT::SimpleValueType>, 526 MVT::SimpleValueType>::const_iterator PTTI = 527 PromoteToType.find(std::make_pair(Op, VT.getSimpleVT().SimpleTy)); 528 if (PTTI != PromoteToType.end()) return PTTI->second; 529 530 assert((VT.isInteger() || VT.isFloatingPoint()) && 531 "Cannot autopromote this type, add it with AddPromotedToType."); 532 533 EVT NVT = VT; 534 do { 535 NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1); 536 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && 537 "Didn't find type to promote to!"); 538 } while (!isTypeLegal(NVT) || 539 getOperationAction(Op, NVT) == Promote); 540 return NVT; 541 } 542 543 /// getValueType - Return the EVT corresponding to this LLVM type. 544 /// This is fixed by the LLVM operations except for the pointer size. If 545 /// AllowUnknown is true, this will return MVT::Other for types with no EVT 546 /// counterpart (e.g. structs), otherwise it will assert. 547 EVT getValueType(Type *Ty, bool AllowUnknown = false) const { 548 // Lower scalar pointers to native pointer types. 549 if (Ty->isPointerTy()) return PointerTy; 550 551 if (Ty->isVectorTy()) { 552 VectorType *VTy = cast<VectorType>(Ty); 553 Type *Elm = VTy->getElementType(); 554 // Lower vectors of pointers to native pointer types. 555 if (Elm->isPointerTy()) 556 Elm = EVT(PointerTy).getTypeForEVT(Ty->getContext()); 557 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false), 558 VTy->getNumElements()); 559 } 560 return EVT::getEVT(Ty, AllowUnknown); 561 } 562 563 564 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 565 /// function arguments in the caller parameter area. This is the actual 566 /// alignment, not its logarithm. 567 virtual unsigned getByValTypeAlignment(Type *Ty) const; 568 569 /// getRegisterType - Return the type of registers that this ValueType will 570 /// eventually require. 571 EVT getRegisterType(MVT VT) const { 572 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT)); 573 return RegisterTypeForVT[VT.SimpleTy]; 574 } 575 576 /// getRegisterType - Return the type of registers that this ValueType will 577 /// eventually require. 578 EVT getRegisterType(LLVMContext &Context, EVT VT) const { 579 if (VT.isSimple()) { 580 assert((unsigned)VT.getSimpleVT().SimpleTy < 581 array_lengthof(RegisterTypeForVT)); 582 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy]; 583 } 584 if (VT.isVector()) { 585 EVT VT1, RegisterVT; 586 unsigned NumIntermediates; 587 (void)getVectorTypeBreakdown(Context, VT, VT1, 588 NumIntermediates, RegisterVT); 589 return RegisterVT; 590 } 591 if (VT.isInteger()) { 592 return getRegisterType(Context, getTypeToTransformTo(Context, VT)); 593 } 594 llvm_unreachable("Unsupported extended type!"); 595 } 596 597 /// getNumRegisters - Return the number of registers that this ValueType will 598 /// eventually require. This is one for any types promoted to live in larger 599 /// registers, but may be more than one for types (like i64) that are split 600 /// into pieces. For types like i140, which are first promoted then expanded, 601 /// it is the number of registers needed to hold all the bits of the original 602 /// type. For an i140 on a 32 bit machine this means 5 registers. 603 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const { 604 if (VT.isSimple()) { 605 assert((unsigned)VT.getSimpleVT().SimpleTy < 606 array_lengthof(NumRegistersForVT)); 607 return NumRegistersForVT[VT.getSimpleVT().SimpleTy]; 608 } 609 if (VT.isVector()) { 610 EVT VT1, VT2; 611 unsigned NumIntermediates; 612 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2); 613 } 614 if (VT.isInteger()) { 615 unsigned BitWidth = VT.getSizeInBits(); 616 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits(); 617 return (BitWidth + RegWidth - 1) / RegWidth; 618 } 619 llvm_unreachable("Unsupported extended type!"); 620 } 621 622 /// ShouldShrinkFPConstant - If true, then instruction selection should 623 /// seek to shrink the FP constant of the specified type to a smaller type 624 /// in order to save space and / or reduce runtime. 625 virtual bool ShouldShrinkFPConstant(EVT) const { return true; } 626 627 /// hasTargetDAGCombine - If true, the target has custom DAG combine 628 /// transformations that it can perform for the specified node. 629 bool hasTargetDAGCombine(ISD::NodeType NT) const { 630 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 631 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); 632 } 633 634 /// This function returns the maximum number of store operations permitted 635 /// to replace a call to llvm.memset. The value is set by the target at the 636 /// performance threshold for such a replacement. If OptSize is true, 637 /// return the limit for functions that have OptSize attribute. 638 /// @brief Get maximum # of store operations permitted for llvm.memset 639 unsigned getMaxStoresPerMemset(bool OptSize) const { 640 return OptSize ? maxStoresPerMemsetOptSize : maxStoresPerMemset; 641 } 642 643 /// This function returns the maximum number of store operations permitted 644 /// to replace a call to llvm.memcpy. The value is set by the target at the 645 /// performance threshold for such a replacement. If OptSize is true, 646 /// return the limit for functions that have OptSize attribute. 647 /// @brief Get maximum # of store operations permitted for llvm.memcpy 648 unsigned getMaxStoresPerMemcpy(bool OptSize) const { 649 return OptSize ? maxStoresPerMemcpyOptSize : maxStoresPerMemcpy; 650 } 651 652 /// This function returns the maximum number of store operations permitted 653 /// to replace a call to llvm.memmove. The value is set by the target at the 654 /// performance threshold for such a replacement. If OptSize is true, 655 /// return the limit for functions that have OptSize attribute. 656 /// @brief Get maximum # of store operations permitted for llvm.memmove 657 unsigned getMaxStoresPerMemmove(bool OptSize) const { 658 return OptSize ? maxStoresPerMemmoveOptSize : maxStoresPerMemmove; 659 } 660 661 /// This function returns true if the target allows unaligned memory accesses. 662 /// of the specified type. This is used, for example, in situations where an 663 /// array copy/move/set is converted to a sequence of store operations. It's 664 /// use helps to ensure that such replacements don't generate code that causes 665 /// an alignment error (trap) on the target machine. 666 /// @brief Determine if the target supports unaligned memory accesses. 667 virtual bool allowsUnalignedMemoryAccesses(EVT) const { 668 return false; 669 } 670 671 /// This function returns true if the target would benefit from code placement 672 /// optimization. 673 /// @brief Determine if the target should perform code placement optimization. 674 bool shouldOptimizeCodePlacement() const { 675 return benefitFromCodePlacementOpt; 676 } 677 678 /// getOptimalMemOpType - Returns the target specific optimal type for load 679 /// and store operations as a result of memset, memcpy, and memmove 680 /// lowering. If DstAlign is zero that means it's safe to destination 681 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 682 /// means there isn't a need to check it against alignment requirement, 683 /// probably because the source does not need to be loaded. If 684 /// 'IsZeroVal' is true, that means it's safe to return a 685 /// non-scalar-integer type, e.g. empty string source, constant, or loaded 686 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 687 /// constant so it does not need to be loaded. 688 /// It returns EVT::Other if the type should be determined using generic 689 /// target-independent logic. 690 virtual EVT getOptimalMemOpType(uint64_t /*Size*/, 691 unsigned /*DstAlign*/, unsigned /*SrcAlign*/, 692 bool /*IsZeroVal*/, 693 bool /*MemcpyStrSrc*/, 694 MachineFunction &/*MF*/) const { 695 return MVT::Other; 696 } 697 698 /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp 699 /// to implement llvm.setjmp. 700 bool usesUnderscoreSetJmp() const { 701 return UseUnderscoreSetJmp; 702 } 703 704 /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp 705 /// to implement llvm.longjmp. 706 bool usesUnderscoreLongJmp() const { 707 return UseUnderscoreLongJmp; 708 } 709 710 /// supportJumpTables - return whether the target can generate code for 711 /// jump tables. 712 bool supportJumpTables() const { 713 return SupportJumpTables; 714 } 715 716 /// getMinimumJumpTableEntries - return integer threshold on number of 717 /// blocks to use jump tables rather than if sequence. 718 int getMinimumJumpTableEntries() const { 719 return MinimumJumpTableEntries; 720 } 721 722 /// getStackPointerRegisterToSaveRestore - If a physical register, this 723 /// specifies the register that llvm.savestack/llvm.restorestack should save 724 /// and restore. 725 unsigned getStackPointerRegisterToSaveRestore() const { 726 return StackPointerRegisterToSaveRestore; 727 } 728 729 /// getExceptionPointerRegister - If a physical register, this returns 730 /// the register that receives the exception address on entry to a landing 731 /// pad. 732 unsigned getExceptionPointerRegister() const { 733 return ExceptionPointerRegister; 734 } 735 736 /// getExceptionSelectorRegister - If a physical register, this returns 737 /// the register that receives the exception typeid on entry to a landing 738 /// pad. 739 unsigned getExceptionSelectorRegister() const { 740 return ExceptionSelectorRegister; 741 } 742 743 /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never 744 /// set, the default is 200) 745 unsigned getJumpBufSize() const { 746 return JumpBufSize; 747 } 748 749 /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes 750 /// (if never set, the default is 0) 751 unsigned getJumpBufAlignment() const { 752 return JumpBufAlignment; 753 } 754 755 /// getMinStackArgumentAlignment - return the minimum stack alignment of an 756 /// argument. 757 unsigned getMinStackArgumentAlignment() const { 758 return MinStackArgumentAlignment; 759 } 760 761 /// getMinFunctionAlignment - return the minimum function alignment. 762 /// 763 unsigned getMinFunctionAlignment() const { 764 return MinFunctionAlignment; 765 } 766 767 /// getPrefFunctionAlignment - return the preferred function alignment. 768 /// 769 unsigned getPrefFunctionAlignment() const { 770 return PrefFunctionAlignment; 771 } 772 773 /// getPrefLoopAlignment - return the preferred loop alignment. 774 /// 775 unsigned getPrefLoopAlignment() const { 776 return PrefLoopAlignment; 777 } 778 779 /// getShouldFoldAtomicFences - return whether the combiner should fold 780 /// fence MEMBARRIER instructions into the atomic intrinsic instructions. 781 /// 782 bool getShouldFoldAtomicFences() const { 783 return ShouldFoldAtomicFences; 784 } 785 786 /// getInsertFencesFor - return whether the DAG builder should automatically 787 /// insert fences and reduce ordering for atomics. 788 /// 789 bool getInsertFencesForAtomic() const { 790 return InsertFencesForAtomic; 791 } 792 793 /// getPreIndexedAddressParts - returns true by value, base pointer and 794 /// offset pointer and addressing mode by reference if the node's address 795 /// can be legally represented as pre-indexed load / store address. 796 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/, 797 SDValue &/*Offset*/, 798 ISD::MemIndexedMode &/*AM*/, 799 SelectionDAG &/*DAG*/) const { 800 return false; 801 } 802 803 /// getPostIndexedAddressParts - returns true by value, base pointer and 804 /// offset pointer and addressing mode by reference if this node can be 805 /// combined with a load / store to form a post-indexed load / store. 806 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/, 807 SDValue &/*Base*/, SDValue &/*Offset*/, 808 ISD::MemIndexedMode &/*AM*/, 809 SelectionDAG &/*DAG*/) const { 810 return false; 811 } 812 813 /// getJumpTableEncoding - Return the entry encoding for a jump table in the 814 /// current function. The returned value is a member of the 815 /// MachineJumpTableInfo::JTEntryKind enum. 816 virtual unsigned getJumpTableEncoding() const; 817 818 virtual const MCExpr * 819 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/, 820 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/, 821 MCContext &/*Ctx*/) const { 822 llvm_unreachable("Need to implement this hook if target has custom JTIs"); 823 } 824 825 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 826 /// jumptable. 827 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 828 SelectionDAG &DAG) const; 829 830 /// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 831 /// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 832 /// MCExpr. 833 virtual const MCExpr * 834 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 835 unsigned JTI, MCContext &Ctx) const; 836 837 /// isOffsetFoldingLegal - Return true if folding a constant offset 838 /// with the given GlobalAddress is legal. It is frequently not legal in 839 /// PIC relocation models. 840 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 841 842 /// getStackCookieLocation - Return true if the target stores stack 843 /// protector cookies at a fixed offset in some non-standard address 844 /// space, and populates the address space and offset as 845 /// appropriate. 846 virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/, 847 unsigned &/*Offset*/) const { 848 return false; 849 } 850 851 /// getMaximalGlobalOffset - Returns the maximal possible offset which can be 852 /// used for loads / stores from the global. 853 virtual unsigned getMaximalGlobalOffset() const { 854 return 0; 855 } 856 857 //===--------------------------------------------------------------------===// 858 // TargetLowering Optimization Methods 859 // 860 861 /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two 862 /// SDValues for returning information from TargetLowering to its clients 863 /// that want to combine 864 struct TargetLoweringOpt { 865 SelectionDAG &DAG; 866 bool LegalTys; 867 bool LegalOps; 868 SDValue Old; 869 SDValue New; 870 871 explicit TargetLoweringOpt(SelectionDAG &InDAG, 872 bool LT, bool LO) : 873 DAG(InDAG), LegalTys(LT), LegalOps(LO) {} 874 875 bool LegalTypes() const { return LegalTys; } 876 bool LegalOperations() const { return LegalOps; } 877 878 bool CombineTo(SDValue O, SDValue N) { 879 Old = O; 880 New = N; 881 return true; 882 } 883 884 /// ShrinkDemandedConstant - Check to see if the specified operand of the 885 /// specified instruction is a constant integer. If so, check to see if 886 /// there are any bits set in the constant that are not demanded. If so, 887 /// shrink the constant and return true. 888 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded); 889 890 /// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the 891 /// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening 892 /// cast, but it could be generalized for targets with other types of 893 /// implicit widening casts. 894 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded, 895 DebugLoc dl); 896 }; 897 898 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the 899 /// DemandedMask bits of the result of Op are ever used downstream. If we can 900 /// use this information to simplify Op, create a new simplified DAG node and 901 /// return true, returning the original and new nodes in Old and New. 902 /// Otherwise, analyze the expression and return a mask of KnownOne and 903 /// KnownZero bits for the expression (used to simplify the caller). 904 /// The KnownZero/One bits may only be accurate for those bits in the 905 /// DemandedMask. 906 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, 907 APInt &KnownZero, APInt &KnownOne, 908 TargetLoweringOpt &TLO, unsigned Depth = 0) const; 909 910 /// computeMaskedBitsForTargetNode - Determine which of the bits specified in 911 /// Mask are known to be either zero or one and return them in the 912 /// KnownZero/KnownOne bitsets. 913 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 914 APInt &KnownZero, 915 APInt &KnownOne, 916 const SelectionDAG &DAG, 917 unsigned Depth = 0) const; 918 919 /// ComputeNumSignBitsForTargetNode - This method can be implemented by 920 /// targets that want to expose additional information about sign bits to the 921 /// DAG Combiner. 922 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 923 unsigned Depth = 0) const; 924 925 struct DAGCombinerInfo { 926 void *DC; // The DAG Combiner object. 927 bool BeforeLegalize; 928 bool BeforeLegalizeOps; 929 bool CalledByLegalizer; 930 public: 931 SelectionDAG &DAG; 932 933 DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc) 934 : DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo), 935 CalledByLegalizer(cl), DAG(dag) {} 936 937 bool isBeforeLegalize() const { return BeforeLegalize; } 938 bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; } 939 bool isCalledByLegalizer() const { return CalledByLegalizer; } 940 941 void AddToWorklist(SDNode *N); 942 void RemoveFromWorklist(SDNode *N); 943 SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To, 944 bool AddTo = true); 945 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); 946 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true); 947 948 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); 949 }; 950 951 /// SimplifySetCC - Try to simplify a setcc built with the specified operands 952 /// and cc. If it is unable to simplify it, return a null SDValue. 953 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 954 ISD::CondCode Cond, bool foldBooleans, 955 DAGCombinerInfo &DCI, DebugLoc dl) const; 956 957 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 958 /// node is a GlobalAddress + offset. 959 virtual bool 960 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 961 962 /// PerformDAGCombine - This method will be invoked for all target nodes and 963 /// for any target-independent nodes that the target has registered with 964 /// invoke it for. 965 /// 966 /// The semantics are as follows: 967 /// Return Value: 968 /// SDValue.Val == 0 - No change was made 969 /// SDValue.Val == N - N was replaced, is dead, and is already handled. 970 /// otherwise - N should be replaced by the returned Operand. 971 /// 972 /// In addition, methods provided by DAGCombinerInfo may be used to perform 973 /// more complex transformations. 974 /// 975 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 976 977 /// isTypeDesirableForOp - Return true if the target has native support for 978 /// the specified value type and it is 'desirable' to use the type for the 979 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16 980 /// instruction encodings are longer and some i16 instructions are slow. 981 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const { 982 // By default, assume all legal types are desirable. 983 return isTypeLegal(VT); 984 } 985 986 /// isDesirableToPromoteOp - Return true if it is profitable for dag combiner 987 /// to transform a floating point op of specified opcode to a equivalent op of 988 /// an integer type. e.g. f32 load -> i32 load can be profitable on ARM. 989 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/, 990 EVT /*VT*/) const { 991 return false; 992 } 993 994 /// IsDesirableToPromoteOp - This method query the target whether it is 995 /// beneficial for dag combiner to promote the specified node. If true, it 996 /// should return the desired promotion type by reference. 997 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const { 998 return false; 999 } 1000 1001 //===--------------------------------------------------------------------===// 1002 // TargetLowering Configuration Methods - These methods should be invoked by 1003 // the derived class constructor to configure this object for the target. 1004 // 1005 1006protected: 1007 /// setBooleanContents - Specify how the target extends the result of a 1008 /// boolean value from i1 to a wider type. See getBooleanContents. 1009 void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; } 1010 /// setBooleanVectorContents - Specify how the target extends the result 1011 /// of a vector boolean value from a vector of i1 to a wider type. See 1012 /// getBooleanContents. 1013 void setBooleanVectorContents(BooleanContent Ty) { 1014 BooleanVectorContents = Ty; 1015 } 1016 1017 /// setSchedulingPreference - Specify the target scheduling preference. 1018 void setSchedulingPreference(Sched::Preference Pref) { 1019 SchedPreferenceInfo = Pref; 1020 } 1021 1022 /// setUseUnderscoreSetJmp - Indicate whether this target prefers to 1023 /// use _setjmp to implement llvm.setjmp or the non _ version. 1024 /// Defaults to false. 1025 void setUseUnderscoreSetJmp(bool Val) { 1026 UseUnderscoreSetJmp = Val; 1027 } 1028 1029 /// setUseUnderscoreLongJmp - Indicate whether this target prefers to 1030 /// use _longjmp to implement llvm.longjmp or the non _ version. 1031 /// Defaults to false. 1032 void setUseUnderscoreLongJmp(bool Val) { 1033 UseUnderscoreLongJmp = Val; 1034 } 1035 1036 /// setSupportJumpTables - Indicate whether the target can generate code for 1037 /// jump tables. 1038 void setSupportJumpTables(bool Val) { 1039 SupportJumpTables = Val; 1040 } 1041 1042 /// setMinimumJumpTableEntries - Indicate the number of blocks to generate 1043 /// jump tables rather than if sequence. 1044 void setMinimumJumpTableEntries(int Val) { 1045 MinimumJumpTableEntries = Val; 1046 } 1047 1048 /// setStackPointerRegisterToSaveRestore - If set to a physical register, this 1049 /// specifies the register that llvm.savestack/llvm.restorestack should save 1050 /// and restore. 1051 void setStackPointerRegisterToSaveRestore(unsigned R) { 1052 StackPointerRegisterToSaveRestore = R; 1053 } 1054 1055 /// setExceptionPointerRegister - If set to a physical register, this sets 1056 /// the register that receives the exception address on entry to a landing 1057 /// pad. 1058 void setExceptionPointerRegister(unsigned R) { 1059 ExceptionPointerRegister = R; 1060 } 1061 1062 /// setExceptionSelectorRegister - If set to a physical register, this sets 1063 /// the register that receives the exception typeid on entry to a landing 1064 /// pad. 1065 void setExceptionSelectorRegister(unsigned R) { 1066 ExceptionSelectorRegister = R; 1067 } 1068 1069 /// SelectIsExpensive - Tells the code generator not to expand operations 1070 /// into sequences that use the select operations if possible. 1071 void setSelectIsExpensive(bool isExpensive = true) { 1072 SelectIsExpensive = isExpensive; 1073 } 1074 1075 /// JumpIsExpensive - Tells the code generator not to expand sequence of 1076 /// operations into a separate sequences that increases the amount of 1077 /// flow control. 1078 void setJumpIsExpensive(bool isExpensive = true) { 1079 JumpIsExpensive = isExpensive; 1080 } 1081 1082 /// setIntDivIsCheap - Tells the code generator that integer divide is 1083 /// expensive, and if possible, should be replaced by an alternate sequence 1084 /// of instructions not containing an integer divide. 1085 void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; } 1086 1087 /// addBypassSlowDiv - Tells the code generator which bitwidths to bypass. 1088 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) { 1089 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth; 1090 } 1091 1092 /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate 1093 /// srl/add/sra for a signed divide by power of two, and let the target handle 1094 /// it. 1095 void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; } 1096 1097 /// addRegisterClass - Add the specified register class as an available 1098 /// regclass for the specified value type. This indicates the selector can 1099 /// handle values of that class natively. 1100 void addRegisterClass(EVT VT, const TargetRegisterClass *RC) { 1101 assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)); 1102 AvailableRegClasses.push_back(std::make_pair(VT, RC)); 1103 RegClassForVT[VT.getSimpleVT().SimpleTy] = RC; 1104 } 1105 1106 /// findRepresentativeClass - Return the largest legal super-reg register class 1107 /// of the register class for the specified type and its associated "cost". 1108 virtual std::pair<const TargetRegisterClass*, uint8_t> 1109 findRepresentativeClass(EVT VT) const; 1110 1111 /// computeRegisterProperties - Once all of the register classes are added, 1112 /// this allows us to compute derived properties we expose. 1113 void computeRegisterProperties(); 1114 1115 /// setOperationAction - Indicate that the specified operation does not work 1116 /// with the specified type and indicate what to do about it. 1117 void setOperationAction(unsigned Op, MVT VT, 1118 LegalizeAction Action) { 1119 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!"); 1120 OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action; 1121 } 1122 1123 /// setLoadExtAction - Indicate that the specified load with extension does 1124 /// not work with the specified type and indicate what to do about it. 1125 void setLoadExtAction(unsigned ExtType, MVT VT, 1126 LegalizeAction Action) { 1127 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE && 1128 "Table isn't big enough!"); 1129 LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action; 1130 } 1131 1132 /// setTruncStoreAction - Indicate that the specified truncating store does 1133 /// not work with the specified type and indicate what to do about it. 1134 void setTruncStoreAction(MVT ValVT, MVT MemVT, 1135 LegalizeAction Action) { 1136 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE && 1137 "Table isn't big enough!"); 1138 TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action; 1139 } 1140 1141 /// setIndexedLoadAction - Indicate that the specified indexed load does or 1142 /// does not work with the specified type and indicate what to do abort 1143 /// it. NOTE: All indexed mode loads are initialized to Expand in 1144 /// TargetLowering.cpp 1145 void setIndexedLoadAction(unsigned IdxMode, MVT VT, 1146 LegalizeAction Action) { 1147 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE && 1148 (unsigned)Action < 0xf && "Table isn't big enough!"); 1149 // Load action are kept in the upper half. 1150 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0; 1151 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4; 1152 } 1153 1154 /// setIndexedStoreAction - Indicate that the specified indexed store does or 1155 /// does not work with the specified type and indicate what to do about 1156 /// it. NOTE: All indexed mode stores are initialized to Expand in 1157 /// TargetLowering.cpp 1158 void setIndexedStoreAction(unsigned IdxMode, MVT VT, 1159 LegalizeAction Action) { 1160 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE && 1161 (unsigned)Action < 0xf && "Table isn't big enough!"); 1162 // Store action are kept in the lower half. 1163 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f; 1164 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action); 1165 } 1166 1167 /// setCondCodeAction - Indicate that the specified condition code is or isn't 1168 /// supported on the target and indicate what to do about it. 1169 void setCondCodeAction(ISD::CondCode CC, MVT VT, 1170 LegalizeAction Action) { 1171 assert(VT < MVT::LAST_VALUETYPE && 1172 (unsigned)CC < array_lengthof(CondCodeActions) && 1173 "Table isn't big enough!"); 1174 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit 1175 /// value and the upper 27 bits index into the second dimension of the 1176 /// array to select what 64bit value to use. 1177 CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5] 1178 &= ~(uint64_t(3UL) << (VT.SimpleTy & 0x1F)*2); 1179 CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5] 1180 |= (uint64_t)Action << (VT.SimpleTy & 0x1F)*2; 1181 } 1182 1183 /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the 1184 /// promotion code defaults to trying a larger integer/fp until it can find 1185 /// one that works. If that default is insufficient, this method can be used 1186 /// by the target to override the default. 1187 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 1188 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy; 1189 } 1190 1191 /// setTargetDAGCombine - Targets should invoke this method for each target 1192 /// independent node that they want to provide a custom DAG combiner for by 1193 /// implementing the PerformDAGCombine virtual method. 1194 void setTargetDAGCombine(ISD::NodeType NT) { 1195 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 1196 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); 1197 } 1198 1199 /// setJumpBufSize - Set the target's required jmp_buf buffer size (in 1200 /// bytes); default is 200 1201 void setJumpBufSize(unsigned Size) { 1202 JumpBufSize = Size; 1203 } 1204 1205 /// setJumpBufAlignment - Set the target's required jmp_buf buffer 1206 /// alignment (in bytes); default is 0 1207 void setJumpBufAlignment(unsigned Align) { 1208 JumpBufAlignment = Align; 1209 } 1210 1211 /// setMinFunctionAlignment - Set the target's minimum function alignment (in 1212 /// log2(bytes)) 1213 void setMinFunctionAlignment(unsigned Align) { 1214 MinFunctionAlignment = Align; 1215 } 1216 1217 /// setPrefFunctionAlignment - Set the target's preferred function alignment. 1218 /// This should be set if there is a performance benefit to 1219 /// higher-than-minimum alignment (in log2(bytes)) 1220 void setPrefFunctionAlignment(unsigned Align) { 1221 PrefFunctionAlignment = Align; 1222 } 1223 1224 /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default 1225 /// alignment is zero, it means the target does not care about loop alignment. 1226 /// The alignment is specified in log2(bytes). 1227 void setPrefLoopAlignment(unsigned Align) { 1228 PrefLoopAlignment = Align; 1229 } 1230 1231 /// setMinStackArgumentAlignment - Set the minimum stack alignment of an 1232 /// argument (in log2(bytes)). 1233 void setMinStackArgumentAlignment(unsigned Align) { 1234 MinStackArgumentAlignment = Align; 1235 } 1236 1237 /// setShouldFoldAtomicFences - Set if the target's implementation of the 1238 /// atomic operation intrinsics includes locking. Default is false. 1239 void setShouldFoldAtomicFences(bool fold) { 1240 ShouldFoldAtomicFences = fold; 1241 } 1242 1243 /// setInsertFencesForAtomic - Set if the DAG builder should 1244 /// automatically insert fences and reduce the order of atomic memory 1245 /// operations to Monotonic. 1246 void setInsertFencesForAtomic(bool fence) { 1247 InsertFencesForAtomic = fence; 1248 } 1249 1250public: 1251 //===--------------------------------------------------------------------===// 1252 // Lowering methods - These methods must be implemented by targets so that 1253 // the SelectionDAGLowering code knows how to lower these. 1254 // 1255 1256 /// LowerFormalArguments - This hook must be implemented to lower the 1257 /// incoming (formal) arguments, described by the Ins array, into the 1258 /// specified DAG. The implementation should fill in the InVals array 1259 /// with legal-type argument values, and return the resulting token 1260 /// chain value. 1261 /// 1262 virtual SDValue 1263 LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 1264 bool /*isVarArg*/, 1265 const SmallVectorImpl<ISD::InputArg> &/*Ins*/, 1266 DebugLoc /*dl*/, SelectionDAG &/*DAG*/, 1267 SmallVectorImpl<SDValue> &/*InVals*/) const { 1268 llvm_unreachable("Not Implemented"); 1269 } 1270 1271 struct ArgListEntry { 1272 SDValue Node; 1273 Type* Ty; 1274 bool isSExt : 1; 1275 bool isZExt : 1; 1276 bool isInReg : 1; 1277 bool isSRet : 1; 1278 bool isNest : 1; 1279 bool isByVal : 1; 1280 uint16_t Alignment; 1281 1282 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false), 1283 isSRet(false), isNest(false), isByVal(false), Alignment(0) { } 1284 }; 1285 typedef std::vector<ArgListEntry> ArgListTy; 1286 1287 /// CallLoweringInfo - This structure contains all information that is 1288 /// necessary for lowering calls. It is passed to TLI::LowerCallTo when the 1289 /// SelectionDAG builder needs to lower a call, and targets will see this 1290 /// struct in their LowerCall implementation. 1291 struct CallLoweringInfo { 1292 SDValue Chain; 1293 Type *RetTy; 1294 bool RetSExt : 1; 1295 bool RetZExt : 1; 1296 bool IsVarArg : 1; 1297 bool IsInReg : 1; 1298 bool DoesNotReturn : 1; 1299 bool IsReturnValueUsed : 1; 1300 1301 // IsTailCall should be modified by implementations of 1302 // TargetLowering::LowerCall that perform tail call conversions. 1303 bool IsTailCall; 1304 1305 unsigned NumFixedArgs; 1306 CallingConv::ID CallConv; 1307 SDValue Callee; 1308 ArgListTy &Args; 1309 SelectionDAG &DAG; 1310 DebugLoc DL; 1311 ImmutableCallSite *CS; 1312 SmallVector<ISD::OutputArg, 32> Outs; 1313 SmallVector<SDValue, 32> OutVals; 1314 SmallVector<ISD::InputArg, 32> Ins; 1315 1316 1317 /// CallLoweringInfo - Constructs a call lowering context based on the 1318 /// ImmutableCallSite \p cs. 1319 CallLoweringInfo(SDValue chain, Type *retTy, 1320 FunctionType *FTy, bool isTailCall, SDValue callee, 1321 ArgListTy &args, SelectionDAG &dag, DebugLoc dl, 1322 ImmutableCallSite &cs) 1323 : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasSExtAttr(0)), 1324 RetZExt(cs.paramHasZExtAttr(0)), IsVarArg(FTy->isVarArg()), 1325 IsInReg(cs.paramHasInRegAttr(0)), 1326 DoesNotReturn(cs.doesNotReturn()), 1327 IsReturnValueUsed(!cs.getInstruction()->use_empty()), 1328 IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()), 1329 CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag), 1330 DL(dl), CS(&cs) {} 1331 1332 /// CallLoweringInfo - Constructs a call lowering context based on the 1333 /// provided call information. 1334 CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool retZExt, 1335 bool isVarArg, bool isInReg, unsigned numFixedArgs, 1336 CallingConv::ID callConv, bool isTailCall, 1337 bool doesNotReturn, bool isReturnValueUsed, SDValue callee, 1338 ArgListTy &args, SelectionDAG &dag, DebugLoc dl) 1339 : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt), 1340 IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn), 1341 IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall), 1342 NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee), 1343 Args(args), DAG(dag), DL(dl), CS(NULL) {} 1344 }; 1345 1346 /// LowerCallTo - This function lowers an abstract call to a function into an 1347 /// actual call. This returns a pair of operands. The first element is the 1348 /// return value for the function (if RetTy is not VoidTy). The second 1349 /// element is the outgoing token chain. It calls LowerCall to do the actual 1350 /// lowering. 1351 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const; 1352 1353 /// LowerCall - This hook must be implemented to lower calls into the 1354 /// the specified DAG. The outgoing arguments to the call are described 1355 /// by the Outs array, and the values to be returned by the call are 1356 /// described by the Ins array. The implementation should fill in the 1357 /// InVals array with legal-type return values from the call, and return 1358 /// the resulting token chain value. 1359 virtual SDValue 1360 LowerCall(CallLoweringInfo &/*CLI*/, 1361 SmallVectorImpl<SDValue> &/*InVals*/) const { 1362 llvm_unreachable("Not Implemented"); 1363 } 1364 1365 /// HandleByVal - Target-specific cleanup for formal ByVal parameters. 1366 virtual void HandleByVal(CCState *, unsigned &) const {} 1367 1368 /// CanLowerReturn - This hook should be implemented to check whether the 1369 /// return values described by the Outs array can fit into the return 1370 /// registers. If false is returned, an sret-demotion is performed. 1371 /// 1372 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/, 1373 MachineFunction &/*MF*/, bool /*isVarArg*/, 1374 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 1375 LLVMContext &/*Context*/) const 1376 { 1377 // Return true by default to get preexisting behavior. 1378 return true; 1379 } 1380 1381 /// LowerReturn - This hook must be implemented to lower outgoing 1382 /// return values, described by the Outs array, into the specified 1383 /// DAG. The implementation should return the resulting token chain 1384 /// value. 1385 /// 1386 virtual SDValue 1387 LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 1388 bool /*isVarArg*/, 1389 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 1390 const SmallVectorImpl<SDValue> &/*OutVals*/, 1391 DebugLoc /*dl*/, SelectionDAG &/*DAG*/) const { 1392 llvm_unreachable("Not Implemented"); 1393 } 1394 1395 /// isUsedByReturnOnly - Return true if result of the specified node is used 1396 /// by a return node only. It also compute and return the input chain for the 1397 /// tail call. 1398 /// This is used to determine whether it is possible 1399 /// to codegen a libcall as tail call at legalization time. 1400 virtual bool isUsedByReturnOnly(SDNode *, SDValue &Chain) const { 1401 return false; 1402 } 1403 1404 /// mayBeEmittedAsTailCall - Return true if the target may be able emit the 1405 /// call instruction as a tail call. This is used by optimization passes to 1406 /// determine if it's profitable to duplicate return instructions to enable 1407 /// tailcall optimization. 1408 virtual bool mayBeEmittedAsTailCall(CallInst *) const { 1409 return false; 1410 } 1411 1412 /// getTypeForExtArgOrReturn - Return the type that should be used to zero or 1413 /// sign extend a zeroext/signext integer argument or return value. 1414 /// FIXME: Most C calling convention requires the return type to be promoted, 1415 /// but this is not true all the time, e.g. i1 on x86-64. It is also not 1416 /// necessary for non-C calling conventions. The frontend should handle this 1417 /// and include all of the necessary information. 1418 virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 1419 ISD::NodeType /*ExtendKind*/) const { 1420 EVT MinVT = getRegisterType(Context, MVT::i32); 1421 return VT.bitsLT(MinVT) ? MinVT : VT; 1422 } 1423 1424 /// LowerOperationWrapper - This callback is invoked by the type legalizer 1425 /// to legalize nodes with an illegal operand type but legal result types. 1426 /// It replaces the LowerOperation callback in the type Legalizer. 1427 /// The reason we can not do away with LowerOperation entirely is that 1428 /// LegalizeDAG isn't yet ready to use this callback. 1429 /// TODO: Consider merging with ReplaceNodeResults. 1430 1431 /// The target places new result values for the node in Results (their number 1432 /// and types must exactly match those of the original return values of 1433 /// the node), or leaves Results empty, which indicates that the node is not 1434 /// to be custom lowered after all. 1435 /// The default implementation calls LowerOperation. 1436 virtual void LowerOperationWrapper(SDNode *N, 1437 SmallVectorImpl<SDValue> &Results, 1438 SelectionDAG &DAG) const; 1439 1440 /// LowerOperation - This callback is invoked for operations that are 1441 /// unsupported by the target, which are registered to use 'custom' lowering, 1442 /// and whose defined values are all legal. 1443 /// If the target has no operations that require custom lowering, it need not 1444 /// implement this. The default implementation of this aborts. 1445 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 1446 1447 /// ReplaceNodeResults - This callback is invoked when a node result type is 1448 /// illegal for the target, and the operation was registered to use 'custom' 1449 /// lowering for that result type. The target places new result values for 1450 /// the node in Results (their number and types must exactly match those of 1451 /// the original return values of the node), or leaves Results empty, which 1452 /// indicates that the node is not to be custom lowered after all. 1453 /// 1454 /// If the target has no operations that require custom lowering, it need not 1455 /// implement this. The default implementation aborts. 1456 virtual void ReplaceNodeResults(SDNode * /*N*/, 1457 SmallVectorImpl<SDValue> &/*Results*/, 1458 SelectionDAG &/*DAG*/) const { 1459 llvm_unreachable("ReplaceNodeResults not implemented for this target!"); 1460 } 1461 1462 /// getTargetNodeName() - This method returns the name of a target specific 1463 /// DAG node. 1464 virtual const char *getTargetNodeName(unsigned Opcode) const; 1465 1466 /// createFastISel - This method returns a target specific FastISel object, 1467 /// or null if the target does not support "fast" ISel. 1468 virtual FastISel *createFastISel(FunctionLoweringInfo &, 1469 const TargetLibraryInfo *) const { 1470 return 0; 1471 } 1472 1473 //===--------------------------------------------------------------------===// 1474 // Inline Asm Support hooks 1475 // 1476 1477 /// ExpandInlineAsm - This hook allows the target to expand an inline asm 1478 /// call to be explicit llvm code if it wants to. This is useful for 1479 /// turning simple inline asms into LLVM intrinsics, which gives the 1480 /// compiler more information about the behavior of the code. 1481 virtual bool ExpandInlineAsm(CallInst *) const { 1482 return false; 1483 } 1484 1485 enum ConstraintType { 1486 C_Register, // Constraint represents specific register(s). 1487 C_RegisterClass, // Constraint represents any of register(s) in class. 1488 C_Memory, // Memory constraint. 1489 C_Other, // Something else. 1490 C_Unknown // Unsupported constraint. 1491 }; 1492 1493 enum ConstraintWeight { 1494 // Generic weights. 1495 CW_Invalid = -1, // No match. 1496 CW_Okay = 0, // Acceptable. 1497 CW_Good = 1, // Good weight. 1498 CW_Better = 2, // Better weight. 1499 CW_Best = 3, // Best weight. 1500 1501 // Well-known weights. 1502 CW_SpecificReg = CW_Okay, // Specific register operands. 1503 CW_Register = CW_Good, // Register operands. 1504 CW_Memory = CW_Better, // Memory operands. 1505 CW_Constant = CW_Best, // Constant operand. 1506 CW_Default = CW_Okay // Default or don't know type. 1507 }; 1508 1509 /// AsmOperandInfo - This contains information for each constraint that we are 1510 /// lowering. 1511 struct AsmOperandInfo : public InlineAsm::ConstraintInfo { 1512 /// ConstraintCode - This contains the actual string for the code, like "m". 1513 /// TargetLowering picks the 'best' code from ConstraintInfo::Codes that 1514 /// most closely matches the operand. 1515 std::string ConstraintCode; 1516 1517 /// ConstraintType - Information about the constraint code, e.g. Register, 1518 /// RegisterClass, Memory, Other, Unknown. 1519 TargetLowering::ConstraintType ConstraintType; 1520 1521 /// CallOperandval - If this is the result output operand or a 1522 /// clobber, this is null, otherwise it is the incoming operand to the 1523 /// CallInst. This gets modified as the asm is processed. 1524 Value *CallOperandVal; 1525 1526 /// ConstraintVT - The ValueType for the operand value. 1527 EVT ConstraintVT; 1528 1529 /// isMatchingInputConstraint - Return true of this is an input operand that 1530 /// is a matching constraint like "4". 1531 bool isMatchingInputConstraint() const; 1532 1533 /// getMatchedOperand - If this is an input matching constraint, this method 1534 /// returns the output operand it matches. 1535 unsigned getMatchedOperand() const; 1536 1537 /// Copy constructor for copying from an AsmOperandInfo. 1538 AsmOperandInfo(const AsmOperandInfo &info) 1539 : InlineAsm::ConstraintInfo(info), 1540 ConstraintCode(info.ConstraintCode), 1541 ConstraintType(info.ConstraintType), 1542 CallOperandVal(info.CallOperandVal), 1543 ConstraintVT(info.ConstraintVT) { 1544 } 1545 1546 /// Copy constructor for copying from a ConstraintInfo. 1547 AsmOperandInfo(const InlineAsm::ConstraintInfo &info) 1548 : InlineAsm::ConstraintInfo(info), 1549 ConstraintType(TargetLowering::C_Unknown), 1550 CallOperandVal(0), ConstraintVT(MVT::Other) { 1551 } 1552 }; 1553 1554 typedef std::vector<AsmOperandInfo> AsmOperandInfoVector; 1555 1556 /// ParseConstraints - Split up the constraint string from the inline 1557 /// assembly value into the specific constraints and their prefixes, 1558 /// and also tie in the associated operand values. 1559 /// If this returns an empty vector, and if the constraint string itself 1560 /// isn't empty, there was an error parsing. 1561 virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const; 1562 1563 /// Examine constraint type and operand type and determine a weight value. 1564 /// The operand object must already have been set up with the operand type. 1565 virtual ConstraintWeight getMultipleConstraintMatchWeight( 1566 AsmOperandInfo &info, int maIndex) const; 1567 1568 /// Examine constraint string and operand type and determine a weight value. 1569 /// The operand object must already have been set up with the operand type. 1570 virtual ConstraintWeight getSingleConstraintMatchWeight( 1571 AsmOperandInfo &info, const char *constraint) const; 1572 1573 /// ComputeConstraintToUse - Determines the constraint code and constraint 1574 /// type to use for the specific AsmOperandInfo, setting 1575 /// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand 1576 /// being passed in is available, it can be passed in as Op, otherwise an 1577 /// empty SDValue can be passed. 1578 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, 1579 SDValue Op, 1580 SelectionDAG *DAG = 0) const; 1581 1582 /// getConstraintType - Given a constraint, return the type of constraint it 1583 /// is for this target. 1584 virtual ConstraintType getConstraintType(const std::string &Constraint) const; 1585 1586 /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g. 1587 /// {edx}), return the register number and the register class for the 1588 /// register. 1589 /// 1590 /// Given a register class constraint, like 'r', if this corresponds directly 1591 /// to an LLVM register class, return a register of 0 and the register class 1592 /// pointer. 1593 /// 1594 /// This should only be used for C_Register constraints. On error, 1595 /// this returns a register number of 0 and a null register class pointer.. 1596 virtual std::pair<unsigned, const TargetRegisterClass*> 1597 getRegForInlineAsmConstraint(const std::string &Constraint, 1598 EVT VT) const; 1599 1600 /// LowerXConstraint - try to replace an X constraint, which matches anything, 1601 /// with another that has more specific requirements based on the type of the 1602 /// corresponding operand. This returns null if there is no replacement to 1603 /// make. 1604 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 1605 1606 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 1607 /// vector. If it is invalid, don't add anything to Ops. 1608 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 1609 std::vector<SDValue> &Ops, 1610 SelectionDAG &DAG) const; 1611 1612 //===--------------------------------------------------------------------===// 1613 // Instruction Emitting Hooks 1614 // 1615 1616 // EmitInstrWithCustomInserter - This method should be implemented by targets 1617 // that mark instructions with the 'usesCustomInserter' flag. These 1618 // instructions are special in various ways, which require special support to 1619 // insert. The specified MachineInstr is created but not inserted into any 1620 // basic blocks, and this method is called to expand it into a sequence of 1621 // instructions, potentially also creating new basic blocks and control flow. 1622 virtual MachineBasicBlock * 1623 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const; 1624 1625 /// AdjustInstrPostInstrSelection - This method should be implemented by 1626 /// targets that mark instructions with the 'hasPostISelHook' flag. These 1627 /// instructions must be adjusted after instruction selection by target hooks. 1628 /// e.g. To fill in optional defs for ARM 's' setting instructions. 1629 virtual void 1630 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const; 1631 1632 //===--------------------------------------------------------------------===// 1633 // Addressing mode description hooks (used by LSR etc). 1634 // 1635 1636 /// GetAddrModeArguments - CodeGenPrepare sinks address calculations into the 1637 /// same BB as Load/Store instructions reading the address. This allows as 1638 /// much computation as possible to be done in the address mode for that 1639 /// operand. This hook lets targets also pass back when this should be done 1640 /// on intrinsics which load/store. 1641 virtual bool GetAddrModeArguments(IntrinsicInst *I, 1642 SmallVectorImpl<Value*> &Ops, 1643 Type *&AccessTy) const { 1644 return false; 1645 } 1646 1647 /// isLegalAddressingMode - Return true if the addressing mode represented by 1648 /// AM is legal for this target, for a load/store of the specified type. 1649 /// The type may be VoidTy, in which case only return true if the addressing 1650 /// mode is legal for a load/store of any legal type. 1651 /// TODO: Handle pre/postinc as well. 1652 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const; 1653 1654 /// isLegalICmpImmediate - Return true if the specified immediate is legal 1655 /// icmp immediate, that is the target has icmp instructions which can compare 1656 /// a register against the immediate without having to materialize the 1657 /// immediate into a register. 1658 virtual bool isLegalICmpImmediate(int64_t) const { 1659 return true; 1660 } 1661 1662 /// isLegalAddImmediate - Return true if the specified immediate is legal 1663 /// add immediate, that is the target has add instructions which can add 1664 /// a register with the immediate without having to materialize the 1665 /// immediate into a register. 1666 virtual bool isLegalAddImmediate(int64_t) const { 1667 return true; 1668 } 1669 1670 /// isTruncateFree - Return true if it's free to truncate a value of 1671 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in 1672 /// register EAX to i16 by referencing its sub-register AX. 1673 virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const { 1674 return false; 1675 } 1676 1677 virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const { 1678 return false; 1679 } 1680 1681 /// isZExtFree - Return true if any actual instruction that defines a 1682 /// value of type Ty1 implicitly zero-extends the value to Ty2 in the result 1683 /// register. This does not necessarily include registers defined in 1684 /// unknown ways, such as incoming arguments, or copies from unknown 1685 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this 1686 /// does not necessarily apply to truncate instructions. e.g. on x86-64, 1687 /// all instructions that define 32-bit values implicit zero-extend the 1688 /// result out to 64 bits. 1689 virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const { 1690 return false; 1691 } 1692 1693 virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const { 1694 return false; 1695 } 1696 1697 /// isFNegFree - Return true if an fneg operation is free to the point where 1698 /// it is never worthwhile to replace it with a bitwise operation. 1699 virtual bool isFNegFree(EVT) const { 1700 return false; 1701 } 1702 1703 /// isFAbsFree - Return true if an fneg operation is free to the point where 1704 /// it is never worthwhile to replace it with a bitwise operation. 1705 virtual bool isFAbsFree(EVT) const { 1706 return false; 1707 } 1708 1709 /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than 1710 /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to 1711 /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd 1712 /// is expanded to mul + add. 1713 virtual bool isFMAFasterThanMulAndAdd(EVT) const { 1714 return false; 1715 } 1716 1717 /// isNarrowingProfitable - Return true if it's profitable to narrow 1718 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow 1719 /// from i32 to i8 but not from i32 to i16. 1720 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const { 1721 return false; 1722 } 1723 1724 //===--------------------------------------------------------------------===// 1725 // Div utility functions 1726 // 1727 SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, DebugLoc dl, 1728 SelectionDAG &DAG) const; 1729 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 1730 std::vector<SDNode*>* Created) const; 1731 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 1732 std::vector<SDNode*>* Created) const; 1733 1734 1735 //===--------------------------------------------------------------------===// 1736 // Runtime Library hooks 1737 // 1738 1739 /// setLibcallName - Rename the default libcall routine name for the specified 1740 /// libcall. 1741 void setLibcallName(RTLIB::Libcall Call, const char *Name) { 1742 LibcallRoutineNames[Call] = Name; 1743 } 1744 1745 /// getLibcallName - Get the libcall routine name for the specified libcall. 1746 /// 1747 const char *getLibcallName(RTLIB::Libcall Call) const { 1748 return LibcallRoutineNames[Call]; 1749 } 1750 1751 /// setCmpLibcallCC - Override the default CondCode to be used to test the 1752 /// result of the comparison libcall against zero. 1753 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { 1754 CmpLibcallCCs[Call] = CC; 1755 } 1756 1757 /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of 1758 /// the comparison libcall against zero. 1759 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { 1760 return CmpLibcallCCs[Call]; 1761 } 1762 1763 /// setLibcallCallingConv - Set the CallingConv that should be used for the 1764 /// specified libcall. 1765 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) { 1766 LibcallCallingConvs[Call] = CC; 1767 } 1768 1769 /// getLibcallCallingConv - Get the CallingConv that should be used for the 1770 /// specified libcall. 1771 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const { 1772 return LibcallCallingConvs[Call]; 1773 } 1774 1775private: 1776 const TargetMachine &TM; 1777 const DataLayout *TD; 1778 const TargetLoweringObjectFile &TLOF; 1779 1780 /// PointerTy - The type to use for pointers, usually i32 or i64. 1781 /// 1782 MVT PointerTy; 1783 1784 /// IsLittleEndian - True if this is a little endian target. 1785 /// 1786 bool IsLittleEndian; 1787 1788 /// SelectIsExpensive - Tells the code generator not to expand operations 1789 /// into sequences that use the select operations if possible. 1790 bool SelectIsExpensive; 1791 1792 /// IntDivIsCheap - Tells the code generator not to expand integer divides by 1793 /// constants into a sequence of muls, adds, and shifts. This is a hack until 1794 /// a real cost model is in place. If we ever optimize for size, this will be 1795 /// set to true unconditionally. 1796 bool IntDivIsCheap; 1797 1798 /// BypassSlowDivMap - Tells the code generator to bypass slow divide or 1799 /// remainder instructions. For example, BypassSlowDivWidths[32,8] tells the 1800 /// code generator to bypass 32-bit integer div/rem with an 8-bit unsigned 1801 /// integer div/rem when the operands are positive and less than 256. 1802 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths; 1803 1804 /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate 1805 /// srl/add/sra for a signed divide by power of two, and let the target handle 1806 /// it. 1807 bool Pow2DivIsCheap; 1808 1809 /// JumpIsExpensive - Tells the code generator that it shouldn't generate 1810 /// extra flow control instructions and should attempt to combine flow 1811 /// control instructions via predication. 1812 bool JumpIsExpensive; 1813 1814 /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement 1815 /// llvm.setjmp. Defaults to false. 1816 bool UseUnderscoreSetJmp; 1817 1818 /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement 1819 /// llvm.longjmp. Defaults to false. 1820 bool UseUnderscoreLongJmp; 1821 1822 /// SupportJumpTables - Whether the target can generate code for jumptables. 1823 /// If it's not true, then each jumptable must be lowered into if-then-else's. 1824 bool SupportJumpTables; 1825 1826 /// MinimumJumpTableEntries - Number of blocks threshold to use jump tables. 1827 int MinimumJumpTableEntries; 1828 1829 /// BooleanContents - Information about the contents of the high-bits in 1830 /// boolean values held in a type wider than i1. See getBooleanContents. 1831 BooleanContent BooleanContents; 1832 /// BooleanVectorContents - Information about the contents of the high-bits 1833 /// in boolean vector values when the element type is wider than i1. See 1834 /// getBooleanContents. 1835 BooleanContent BooleanVectorContents; 1836 1837 /// SchedPreferenceInfo - The target scheduling preference: shortest possible 1838 /// total cycles or lowest register usage. 1839 Sched::Preference SchedPreferenceInfo; 1840 1841 /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers 1842 unsigned JumpBufSize; 1843 1844 /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf 1845 /// buffers 1846 unsigned JumpBufAlignment; 1847 1848 /// MinStackArgumentAlignment - The minimum alignment that any argument 1849 /// on the stack needs to have. 1850 /// 1851 unsigned MinStackArgumentAlignment; 1852 1853 /// MinFunctionAlignment - The minimum function alignment (used when 1854 /// optimizing for size, and to prevent explicitly provided alignment 1855 /// from leading to incorrect code). 1856 /// 1857 unsigned MinFunctionAlignment; 1858 1859 /// PrefFunctionAlignment - The preferred function alignment (used when 1860 /// alignment unspecified and optimizing for speed). 1861 /// 1862 unsigned PrefFunctionAlignment; 1863 1864 /// PrefLoopAlignment - The preferred loop alignment. 1865 /// 1866 unsigned PrefLoopAlignment; 1867 1868 /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should 1869 /// be folded into the enclosed atomic intrinsic instruction by the 1870 /// combiner. 1871 bool ShouldFoldAtomicFences; 1872 1873 /// InsertFencesForAtomic - Whether the DAG builder should automatically 1874 /// insert fences and reduce ordering for atomics. (This will be set for 1875 /// for most architectures with weak memory ordering.) 1876 bool InsertFencesForAtomic; 1877 1878 /// StackPointerRegisterToSaveRestore - If set to a physical register, this 1879 /// specifies the register that llvm.savestack/llvm.restorestack should save 1880 /// and restore. 1881 unsigned StackPointerRegisterToSaveRestore; 1882 1883 /// ExceptionPointerRegister - If set to a physical register, this specifies 1884 /// the register that receives the exception address on entry to a landing 1885 /// pad. 1886 unsigned ExceptionPointerRegister; 1887 1888 /// ExceptionSelectorRegister - If set to a physical register, this specifies 1889 /// the register that receives the exception typeid on entry to a landing 1890 /// pad. 1891 unsigned ExceptionSelectorRegister; 1892 1893 /// RegClassForVT - This indicates the default register class to use for 1894 /// each ValueType the target supports natively. 1895 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; 1896 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; 1897 EVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; 1898 1899 /// RepRegClassForVT - This indicates the "representative" register class to 1900 /// use for each ValueType the target supports natively. This information is 1901 /// used by the scheduler to track register pressure. By default, the 1902 /// representative register class is the largest legal super-reg register 1903 /// class of the register class of the specified type. e.g. On x86, i8, i16, 1904 /// and i32's representative class would be GR32. 1905 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE]; 1906 1907 /// RepRegClassCostForVT - This indicates the "cost" of the "representative" 1908 /// register class for each ValueType. The cost is used by the scheduler to 1909 /// approximate register pressure. 1910 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE]; 1911 1912 /// TransformToType - For any value types we are promoting or expanding, this 1913 /// contains the value type that we are changing to. For Expanded types, this 1914 /// contains one step of the expand (e.g. i64 -> i32), even if there are 1915 /// multiple steps required (e.g. i64 -> i16). For types natively supported 1916 /// by the system, this holds the same type (e.g. i32 -> i32). 1917 EVT TransformToType[MVT::LAST_VALUETYPE]; 1918 1919 /// OpActions - For each operation and each value type, keep a LegalizeAction 1920 /// that indicates how instruction selection should deal with the operation. 1921 /// Most operations are Legal (aka, supported natively by the target), but 1922 /// operations that are not should be described. Note that operations on 1923 /// non-legal value types are not described here. 1924 uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END]; 1925 1926 /// LoadExtActions - For each load extension type and each value type, 1927 /// keep a LegalizeAction that indicates how instruction selection should deal 1928 /// with a load of a specific value type and extension type. 1929 uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE]; 1930 1931 /// TruncStoreActions - For each value type pair keep a LegalizeAction that 1932 /// indicates whether a truncating store of a specific value type and 1933 /// truncating type is legal. 1934 uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]; 1935 1936 /// IndexedModeActions - For each indexed mode and each value type, 1937 /// keep a pair of LegalizeAction that indicates how instruction 1938 /// selection should deal with the load / store. The first dimension is the 1939 /// value_type for the reference. The second dimension represents the various 1940 /// modes for load store. 1941 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE]; 1942 1943 /// CondCodeActions - For each condition code (ISD::CondCode) keep a 1944 /// LegalizeAction that indicates how instruction selection should 1945 /// deal with the condition code. 1946 /// Because each CC action takes up 2 bits, we need to have the array size 1947 /// be large enough to fit all of the value types. This can be done by 1948 /// dividing the MVT::LAST_VALUETYPE by 32 and adding one. 1949 uint64_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE / 32) + 1]; 1950 1951 ValueTypeActionImpl ValueTypeActions; 1952 1953 typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind; 1954 1955 LegalizeKind 1956 getTypeConversion(LLVMContext &Context, EVT VT) const { 1957 // If this is a simple type, use the ComputeRegisterProp mechanism. 1958 if (VT.isSimple()) { 1959 assert((unsigned)VT.getSimpleVT().SimpleTy < 1960 array_lengthof(TransformToType)); 1961 EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy]; 1962 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(VT.getSimpleVT()); 1963 1964 assert( 1965 (!(NVT.isSimple() && LA != TypeLegal) || 1966 ValueTypeActions.getTypeAction(NVT.getSimpleVT()) != TypePromoteInteger) 1967 && "Promote may not follow Expand or Promote"); 1968 1969 return LegalizeKind(LA, NVT); 1970 } 1971 1972 // Handle Extended Scalar Types. 1973 if (!VT.isVector()) { 1974 assert(VT.isInteger() && "Float types must be simple"); 1975 unsigned BitSize = VT.getSizeInBits(); 1976 // First promote to a power-of-two size, then expand if necessary. 1977 if (BitSize < 8 || !isPowerOf2_32(BitSize)) { 1978 EVT NVT = VT.getRoundIntegerType(Context); 1979 assert(NVT != VT && "Unable to round integer VT"); 1980 LegalizeKind NextStep = getTypeConversion(Context, NVT); 1981 // Avoid multi-step promotion. 1982 if (NextStep.first == TypePromoteInteger) return NextStep; 1983 // Return rounded integer type. 1984 return LegalizeKind(TypePromoteInteger, NVT); 1985 } 1986 1987 return LegalizeKind(TypeExpandInteger, 1988 EVT::getIntegerVT(Context, VT.getSizeInBits()/2)); 1989 } 1990 1991 // Handle vector types. 1992 unsigned NumElts = VT.getVectorNumElements(); 1993 EVT EltVT = VT.getVectorElementType(); 1994 1995 // Vectors with only one element are always scalarized. 1996 if (NumElts == 1) 1997 return LegalizeKind(TypeScalarizeVector, EltVT); 1998 1999 // Try to widen vector elements until a legal type is found. 2000 if (EltVT.isInteger()) { 2001 // Vectors with a number of elements that is not a power of two are always 2002 // widened, for example <3 x float> -> <4 x float>. 2003 if (!VT.isPow2VectorType()) { 2004 NumElts = (unsigned)NextPowerOf2(NumElts); 2005 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); 2006 return LegalizeKind(TypeWidenVector, NVT); 2007 } 2008 2009 // Examine the element type. 2010 LegalizeKind LK = getTypeConversion(Context, EltVT); 2011 2012 // If type is to be expanded, split the vector. 2013 // <4 x i140> -> <2 x i140> 2014 if (LK.first == TypeExpandInteger) 2015 return LegalizeKind(TypeSplitVector, 2016 EVT::getVectorVT(Context, EltVT, NumElts / 2)); 2017 2018 // Promote the integer element types until a legal vector type is found 2019 // or until the element integer type is too big. If a legal type was not 2020 // found, fallback to the usual mechanism of widening/splitting the 2021 // vector. 2022 while (1) { 2023 // Increase the bitwidth of the element to the next pow-of-two 2024 // (which is greater than 8 bits). 2025 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits() 2026 ).getRoundIntegerType(Context); 2027 2028 // Stop trying when getting a non-simple element type. 2029 // Note that vector elements may be greater than legal vector element 2030 // types. Example: X86 XMM registers hold 64bit element on 32bit systems. 2031 if (!EltVT.isSimple()) break; 2032 2033 // Build a new vector type and check if it is legal. 2034 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 2035 // Found a legal promoted vector type. 2036 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) 2037 return LegalizeKind(TypePromoteInteger, 2038 EVT::getVectorVT(Context, EltVT, NumElts)); 2039 } 2040 } 2041 2042 // Try to widen the vector until a legal type is found. 2043 // If there is no wider legal type, split the vector. 2044 while (1) { 2045 // Round up to the next power of 2. 2046 NumElts = (unsigned)NextPowerOf2(NumElts); 2047 2048 // If there is no simple vector type with this many elements then there 2049 // cannot be a larger legal vector type. Note that this assumes that 2050 // there are no skipped intermediate vector types in the simple types. 2051 if (!EltVT.isSimple()) break; 2052 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 2053 if (LargerVector == MVT()) break; 2054 2055 // If this type is legal then widen the vector. 2056 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) 2057 return LegalizeKind(TypeWidenVector, LargerVector); 2058 } 2059 2060 // Widen odd vectors to next power of two. 2061 if (!VT.isPow2VectorType()) { 2062 EVT NVT = VT.getPow2VectorType(Context); 2063 return LegalizeKind(TypeWidenVector, NVT); 2064 } 2065 2066 // Vectors with illegal element types are expanded. 2067 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2); 2068 return LegalizeKind(TypeSplitVector, NVT); 2069 } 2070 2071 std::vector<std::pair<EVT, const TargetRegisterClass*> > AvailableRegClasses; 2072 2073 /// TargetDAGCombineArray - Targets can specify ISD nodes that they would 2074 /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(), 2075 /// which sets a bit in this array. 2076 unsigned char 2077 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; 2078 2079 /// PromoteToType - For operations that must be promoted to a specific type, 2080 /// this holds the destination type. This map should be sparse, so don't hold 2081 /// it as an array. 2082 /// 2083 /// Targets add entries to this map with AddPromotedToType(..), clients access 2084 /// this with getTypeToPromoteTo(..). 2085 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> 2086 PromoteToType; 2087 2088 /// LibcallRoutineNames - Stores the name each libcall. 2089 /// 2090 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL]; 2091 2092 /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result 2093 /// of each of the comparison libcall against zero. 2094 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; 2095 2096 /// LibcallCallingConvs - Stores the CallingConv that should be used for each 2097 /// libcall. 2098 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL]; 2099 2100protected: 2101 /// When lowering \@llvm.memset this field specifies the maximum number of 2102 /// store operations that may be substituted for the call to memset. Targets 2103 /// must set this value based on the cost threshold for that target. Targets 2104 /// should assume that the memset will be done using as many of the largest 2105 /// store operations first, followed by smaller ones, if necessary, per 2106 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine 2107 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte 2108 /// store. This only applies to setting a constant array of a constant size. 2109 /// @brief Specify maximum number of store instructions per memset call. 2110 unsigned maxStoresPerMemset; 2111 2112 /// Maximum number of stores operations that may be substituted for the call 2113 /// to memset, used for functions with OptSize attribute. 2114 unsigned maxStoresPerMemsetOptSize; 2115 2116 /// When lowering \@llvm.memcpy this field specifies the maximum number of 2117 /// store operations that may be substituted for a call to memcpy. Targets 2118 /// must set this value based on the cost threshold for that target. Targets 2119 /// should assume that the memcpy will be done using as many of the largest 2120 /// store operations first, followed by smaller ones, if necessary, per 2121 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine 2122 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store 2123 /// and one 1-byte store. This only applies to copying a constant array of 2124 /// constant size. 2125 /// @brief Specify maximum bytes of store instructions per memcpy call. 2126 unsigned maxStoresPerMemcpy; 2127 2128 /// Maximum number of store operations that may be substituted for a call 2129 /// to memcpy, used for functions with OptSize attribute. 2130 unsigned maxStoresPerMemcpyOptSize; 2131 2132 /// When lowering \@llvm.memmove this field specifies the maximum number of 2133 /// store instructions that may be substituted for a call to memmove. Targets 2134 /// must set this value based on the cost threshold for that target. Targets 2135 /// should assume that the memmove will be done using as many of the largest 2136 /// store operations first, followed by smaller ones, if necessary, per 2137 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine 2138 /// with 8-bit alignment would result in nine 1-byte stores. This only 2139 /// applies to copying a constant array of constant size. 2140 /// @brief Specify maximum bytes of store instructions per memmove call. 2141 unsigned maxStoresPerMemmove; 2142 2143 /// Maximum number of store instructions that may be substituted for a call 2144 /// to memmove, used for functions with OpSize attribute. 2145 unsigned maxStoresPerMemmoveOptSize; 2146 2147 /// This field specifies whether the target can benefit from code placement 2148 /// optimization. 2149 bool benefitFromCodePlacementOpt; 2150 2151 /// predictableSelectIsExpensive - Tells the code generator that select is 2152 /// more expensive than a branch if the branch is usually predicted right. 2153 bool predictableSelectIsExpensive; 2154 2155private: 2156 /// isLegalRC - Return true if the value types that can be represented by the 2157 /// specified register class are all legal. 2158 bool isLegalRC(const TargetRegisterClass *RC) const; 2159}; 2160 2161/// GetReturnInfo - Given an LLVM IR type and return type attributes, 2162/// compute the return value EVTs and flags, and optionally also 2163/// the offsets, if the return value is being lowered to memory. 2164void GetReturnInfo(Type* ReturnType, Attributes attr, 2165 SmallVectorImpl<ISD::OutputArg> &Outs, 2166 const TargetLowering &TLI); 2167 2168} // end llvm namespace 2169 2170#endif 2171